text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def to_jd(year, month, day):
'''Convert a Positivist date to Julian day count.'''
legal_date(year, month, day)
gyear = year + YEAR_EPOCH - 1
return (
gregorian.EPOCH - 1 + (365 * (gyear - 1)) +
floor((gyear - 1) / 4) + (-floor((gyear - 1) / 100)) +
floor((gyear - 1) / 400) + (month - 1) * 28 + day
) | [
"def",
"to_jd",
"(",
"year",
",",
"month",
",",
"day",
")",
":",
"legal_date",
"(",
"year",
",",
"month",
",",
"day",
")",
"gyear",
"=",
"year",
"+",
"YEAR_EPOCH",
"-",
"1",
"return",
"(",
"gregorian",
".",
"EPOCH",
"-",
"1",
"+",
"(",
"365",
"*"... | 33.6 | 19.6 |
def discover_json(self):
"""Discovers the JSON format and registers it if available.
To speed up JSON parsing and composing, install `simplejson`::
pip install simplejson
The standard library module `json` will be used by default.
"""
try:
import simplejson as json
except ImportError:
import json
self.register('json', json.loads, json.dumps) | [
"def",
"discover_json",
"(",
"self",
")",
":",
"try",
":",
"import",
"simplejson",
"as",
"json",
"except",
"ImportError",
":",
"import",
"json",
"self",
".",
"register",
"(",
"'json'",
",",
"json",
".",
"loads",
",",
"json",
".",
"dumps",
")"
] | 30.357143 | 19.5 |
def install_os_snaps(snaps, refresh=False):
"""Install OpenStack snaps from channel and with mode
@param snaps: Dictionary of snaps with channels and modes of the form:
{'snap_name': {'channel': 'snap_channel',
'mode': 'snap_mode'}}
Where channel is a snapstore channel and mode is --classic, --devmode
or --jailmode.
@param post_snap_install: Callback function to run after snaps have been
installed
"""
def _ensure_flag(flag):
if flag.startswith('--'):
return flag
return '--{}'.format(flag)
if refresh:
for snap in snaps.keys():
snap_refresh(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode']))
else:
for snap in snaps.keys():
snap_install(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode'])) | [
"def",
"install_os_snaps",
"(",
"snaps",
",",
"refresh",
"=",
"False",
")",
":",
"def",
"_ensure_flag",
"(",
"flag",
")",
":",
"if",
"flag",
".",
"startswith",
"(",
"'--'",
")",
":",
"return",
"flag",
"return",
"'--{}'",
".",
"format",
"(",
"flag",
")"... | 35.962963 | 18.62963 |
def add_status_line(self, label):
"""Add a status bar line to the table.
This function returns the status bar and it can be modified
from this return value.
"""
status_line = StatusBar(label,
self._sep_start, self._sep_end,
self._fill_char)
self._lines.append(status_line)
return status_line | [
"def",
"add_status_line",
"(",
"self",
",",
"label",
")",
":",
"status_line",
"=",
"StatusBar",
"(",
"label",
",",
"self",
".",
"_sep_start",
",",
"self",
".",
"_sep_end",
",",
"self",
".",
"_fill_char",
")",
"self",
".",
"_lines",
".",
"append",
"(",
... | 36.545455 | 11.909091 |
def do_region(self, x, y, w, h):
"""Apply region selection."""
if (x is None):
self.logger.debug("region: full (nop)")
else:
self.logger.debug("region: (%d,%d,%d,%d)" % (x, y, w, h))
self.image = self.image.crop((x, y, x + w, y + h))
self.width = w
self.height = h | [
"def",
"do_region",
"(",
"self",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
":",
"if",
"(",
"x",
"is",
"None",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"region: full (nop)\"",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
... | 37.777778 | 15.666667 |
def _wait_new_conf(self):
"""Ask the daemon to drop its configuration and wait for a new one
This overrides the default method from GenericInterface
:return: None
"""
with self.app.conf_lock:
logger.warning("My master Arbiter wants me to wait for a new configuration.")
self.app.cur_conf = {} | [
"def",
"_wait_new_conf",
"(",
"self",
")",
":",
"with",
"self",
".",
"app",
".",
"conf_lock",
":",
"logger",
".",
"warning",
"(",
"\"My master Arbiter wants me to wait for a new configuration.\"",
")",
"self",
".",
"app",
".",
"cur_conf",
"=",
"{",
"}"
] | 34.9 | 20 |
def handle_purge(environ, start_response):
"""
Handle a PURGE request.
"""
from utils import is_valid_security, get_cached_files
from settings import DEBUG
server = environ['SERVER_NAME']
try:
request_uri = get_path(environ)
path_and_query = request_uri.lstrip("/")
query_string = environ.get('QUERY_STRING', '')
if is_valid_security('PURGE', query_string):
cached_files = get_cached_files(path_and_query, server)
for i in cached_files:
try:
os.remove(i)
except OSError as e:
return do_500(environ, start_response, e.message)
start_response("204 No Content", [])
return []
else:
return do_405(environ, start_response)
except Http404 as e:
return do_404(environ, start_response, e.message, DEBUG) | [
"def",
"handle_purge",
"(",
"environ",
",",
"start_response",
")",
":",
"from",
"utils",
"import",
"is_valid_security",
",",
"get_cached_files",
"from",
"settings",
"import",
"DEBUG",
"server",
"=",
"environ",
"[",
"'SERVER_NAME'",
"]",
"try",
":",
"request_uri",
... | 36.833333 | 13 |
def check_manifest ():
"""Snatched from roundup.sf.net.
Check that the files listed in the MANIFEST are present when the
source is unpacked."""
try:
f = open('MANIFEST')
except Exception:
print('\n*** SOURCE WARNING: The MANIFEST file is missing!')
return
try:
manifest = [l.strip() for l in f.readlines() if not l.startswith('#')]
finally:
f.close()
err = [line for line in manifest if not os.path.exists(line)]
if err:
n = len(manifest)
print('\n*** SOURCE WARNING: There are files missing (%d/%d found)!' %
(n - len(err), n))
print('\nMissing: '.join(err)) | [
"def",
"check_manifest",
"(",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"'MANIFEST'",
")",
"except",
"Exception",
":",
"print",
"(",
"'\\n*** SOURCE WARNING: The MANIFEST file is missing!'",
")",
"return",
"try",
":",
"manifest",
"=",
"[",
"l",
".",
"strip"... | 34.368421 | 21.210526 |
def send_batches(self, batch_list):
"""Sends a list of batches to the validator.
Args:
batch_list (:obj:`BatchList`): the list of batches
Returns:
dict: the json result data, as a dict
"""
if isinstance(batch_list, BaseMessage):
batch_list = batch_list.SerializeToString()
return self._post('/batches', batch_list) | [
"def",
"send_batches",
"(",
"self",
",",
"batch_list",
")",
":",
"if",
"isinstance",
"(",
"batch_list",
",",
"BaseMessage",
")",
":",
"batch_list",
"=",
"batch_list",
".",
"SerializeToString",
"(",
")",
"return",
"self",
".",
"_post",
"(",
"'/batches'",
",",... | 29.923077 | 18.307692 |
def knock_out(self):
"""Knockout gene by marking it as non-functional and setting all
associated reactions bounds to zero.
The change is reverted upon exit if executed within the model as
context.
"""
self.functional = False
for reaction in self.reactions:
if not reaction.functional:
reaction.bounds = (0, 0) | [
"def",
"knock_out",
"(",
"self",
")",
":",
"self",
".",
"functional",
"=",
"False",
"for",
"reaction",
"in",
"self",
".",
"reactions",
":",
"if",
"not",
"reaction",
".",
"functional",
":",
"reaction",
".",
"bounds",
"=",
"(",
"0",
",",
"0",
")"
] | 34.909091 | 11.909091 |
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._holder._concat_same_type(
[blk.values for blk in to_concat])
placement = placement or slice(0, len(values), 1)
return self.make_block_same_class(values, ndim=self.ndim,
placement=placement) | [
"def",
"concat_same_type",
"(",
"self",
",",
"to_concat",
",",
"placement",
"=",
"None",
")",
":",
"values",
"=",
"self",
".",
"_holder",
".",
"_concat_same_type",
"(",
"[",
"blk",
".",
"values",
"for",
"blk",
"in",
"to_concat",
"]",
")",
"placement",
"=... | 45.888889 | 12.333333 |
def bounding_box_from_annotation(source=None, padding=None, **kwargs):
"""bounding_box_from_annotation(source, padding, **kwargs) -> bounding_box
Creates a bounding box from the given parameters, which are, in general, annotations read using :py:func:`bob.ip.facedetect.read_annotation_file`.
Different kinds of annotations are supported, given by the ``source`` keyword:
* ``direct`` : bounding boxes are directly specified by keyword arguments ``topleft`` and ``bottomright``
* ``eyes`` : the left and right eyes are specified by keyword arguments ``leye`` and ``reye``
* ``left-profile`` : the left eye and the mouth are specified by keyword arguments ``eye`` and ``mouth``
* ``right-profile`` : the right eye and the mouth are specified by keyword arguments ``eye`` and ``mouth``
* ``ellipse`` : the face ellipse as well as face angle and axis radius is provided by keyword arguments ``center``, ``angle`` and ``axis_radius``
If a ``source`` is specified, the according keywords must be given as well.
Otherwise, the source is estimated from the given keyword parameters if possible.
If 'topleft' and 'bottomright' are given (i.e., the 'direct' source), they are taken as is.
Note that the 'bottomright' is NOT included in the bounding box.
Please assure that the aspect ratio of the bounding box is 6:5 (height : width).
For source 'ellipse', the bounding box is computed to capture the whole ellipse, even if it is rotated.
For other sources (i.e., 'eyes'), the center of the two given positions is computed, and the ``padding`` is applied, which is relative to the distance between the two given points.
If ``padding`` is ``None`` (the default) the default_paddings of this source are used instead.
These padding is required to keep an aspect ratio of 6:5.
**Parameters:**
``source`` : str or ``None``
The type of annotations present in the list of keyword arguments, see above.
``padding`` : {'top':float, 'bottom':float, 'left':float, 'right':float}
This padding is added to the center between the given points, to define the top left and bottom right positions in the bounding box; values are relative to the distance between the two given points; ignored for some of the ``source``\s
``kwargs`` : key=value
Further keyword arguments specifying the annotations.
**Returns:**
bounding_box : :py:class:`BoundingBox`
The bounding box that was estimated from the given annotations.
"""
if source is None:
# try to estimate the source
for s,k in available_sources.items():
# check if the according keyword arguments are given
if k[0] in kwargs and k[1] in kwargs:
# check if we already assigned a source before
if source is not None:
raise ValueError("The given list of keywords (%s) is ambiguous. Please specify a source" % kwargs)
# assign source
source = s
# check if a source could be estimated from the keywords
if source is None:
raise ValueError("The given list of keywords (%s) could not be interpreted" % kwargs)
assert source in available_sources
# use default padding if not specified
if padding is None:
padding = default_paddings[source]
keys = available_sources[source]
if source == 'ellipse':
# compute the tight bounding box for the ellipse
angle = kwargs['angle']
axis = kwargs['axis_radius']
center = kwargs['center']
dx = abs(math.cos(angle) * axis[0]) + abs(math.sin(angle) * axis[1])
dy = abs(math.sin(angle) * axis[0]) + abs(math.cos(angle) * axis[1])
top = center[0] - dy
bottom = center[0] + dy
left = center[1] - dx
right = center[1] + dx
elif padding is None:
# There is no padding to be applied -> take nodes as they are
top = kwargs[keys[0]][0]
bottom = kwargs[keys[1]][0]
left = kwargs[keys[0]][1]
right = kwargs[keys[1]][1]
else:
# apply padding
pos_0 = kwargs[keys[0]]
pos_1 = kwargs[keys[1]]
tb_center = float(pos_0[0] + pos_1[0]) / 2.
lr_center = float(pos_0[1] + pos_1[1]) / 2.
distance = math.sqrt((pos_0[0] - pos_1[0])**2 + (pos_0[1] - pos_1[1])**2)
top = tb_center + padding['top'] * distance
bottom = tb_center + padding['bottom'] * distance
left = lr_center + padding['left'] * distance
right = lr_center + padding['right'] * distance
return BoundingBox((top, left), (bottom - top, right - left)) | [
"def",
"bounding_box_from_annotation",
"(",
"source",
"=",
"None",
",",
"padding",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"source",
"is",
"None",
":",
"# try to estimate the source",
"for",
"s",
",",
"k",
"in",
"available_sources",
".",
"items... | 45.673684 | 30.905263 |
def auto_thaw(vault_client, opt):
"""Will thaw into a temporary location"""
icefile = opt.thaw_from
if not os.path.exists(icefile):
raise aomi.exceptions.IceFile("%s missing" % icefile)
thaw(vault_client, icefile, opt)
return opt | [
"def",
"auto_thaw",
"(",
"vault_client",
",",
"opt",
")",
":",
"icefile",
"=",
"opt",
".",
"thaw_from",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"icefile",
")",
":",
"raise",
"aomi",
".",
"exceptions",
".",
"IceFile",
"(",
"\"%s missing\"",
... | 31.375 | 14.5 |
def transition(value, maximum, start, end):
""" Transition between two values.
:param value: Current iteration.
:param maximum: Maximum number of iterations.
:param start: Start value.
:param end: End value.
:returns: Transitional value.
"""
return round(start + (end - start) * value / maximum, 2) | [
"def",
"transition",
"(",
"value",
",",
"maximum",
",",
"start",
",",
"end",
")",
":",
"return",
"round",
"(",
"start",
"+",
"(",
"end",
"-",
"start",
")",
"*",
"value",
"/",
"maximum",
",",
"2",
")"
] | 32.2 | 10.7 |
def _write_config(self):
'''Write the parameters to a file for PhantomJS to read.'''
param_dict = {
'url': self._params.url,
'snapshot_paths': self._params.snapshot_paths,
'wait_time': self._params.wait_time,
'num_scrolls': self._params.num_scrolls,
'smart_scroll': self._params.smart_scroll,
'snapshot': self._params.snapshot,
'viewport_width': self._params.viewport_size[0],
'viewport_height': self._params.viewport_size[1],
'paper_width': self._params.paper_size[0],
'paper_height': self._params.paper_size[1],
'custom_headers': self._params.custom_headers,
'page_settings': self._params.page_settings,
}
if self._params.event_log_filename:
param_dict['event_log_filename'] = \
os.path.abspath(self._params.event_log_filename)
if self._params.action_log_filename:
param_dict['action_log_filename'] = \
os.path.abspath(self._params.action_log_filename)
config_text = json.dumps(param_dict)
self._config_file.write(config_text.encode('utf-8'))
# Close it so the phantomjs process can read it on Windows
self._config_file.close() | [
"def",
"_write_config",
"(",
"self",
")",
":",
"param_dict",
"=",
"{",
"'url'",
":",
"self",
".",
"_params",
".",
"url",
",",
"'snapshot_paths'",
":",
"self",
".",
"_params",
".",
"snapshot_paths",
",",
"'wait_time'",
":",
"self",
".",
"_params",
".",
"w... | 41.16129 | 18.967742 |
def _substitute(self, str):
"""
Substitute words in the string, according to the specified reflections,
e.g. "I'm" -> "you are"
:type str: str
:param str: The string to be mapped
:rtype: str
"""
if not self.attr.get("substitute",True):return str
return self._regex.sub(lambda mo:
self._reflections[mo.string[mo.start():mo.end()]],
str.lower()) | [
"def",
"_substitute",
"(",
"self",
",",
"str",
")",
":",
"if",
"not",
"self",
".",
"attr",
".",
"get",
"(",
"\"substitute\"",
",",
"True",
")",
":",
"return",
"str",
"return",
"self",
".",
"_regex",
".",
"sub",
"(",
"lambda",
"mo",
":",
"self",
"."... | 33.846154 | 15.076923 |
def infer(self, data, initial_proposal=None, full_output=False,**kwargs):
"""
Infer the model parameters, given the data.
auto_convergence=True,
walkers=100, burn=2000, sample=2000, minimum_sample=2000,
convergence_check_frequency=1000, a=2.0, threads=1,
"""
# Apply data masks now so we don't have to do it on the fly.
data, pixels_affected = self._apply_data_mask(data)
# Any channels / parameters to ignore?
matched_channels, missing_channels, ignore_parameters \
= self._match_channels_to_data(data)
parameters = [p for p in self.parameters if p not in ignore_parameters]
#parameters = list(set(self.parameters).difference(ignore_parameters))
logger.debug("Inferring {0} parameters: {1}".format(len(parameters),
", ".join(parameters)))
# What sampling behaviour will we have?
# - Auto-convergence:
# + Sample for `minimum_sample` (default 2000, 200 walkers)
# + Calculate the maximum exponential autocorrelation time for
# all parameters
# + For the rest of the chain, calculate the autocorrelation time
# + Ensure that the number of samples we have is more than
# `effectively_independent_samples` (default 100) times.
# - Specified convergence:
# + Burn for `burn` (default 2000) steps
# + Sample for `sample` (default 2000) steps
kwd = {
"auto_convergence": False, # TODO CHANGE ME
"walkers": 100,
"burn": 2000,
"sample": 2000,
# The minimum_sample, n_tau_exp_as_burn_in, minimum_eis are only
# used if auto_convergence is turned on.
"minimum_sample": 2000,
"maximum_sample": 100000,
"n_tau_exp_as_burn_in": 3,
"minimum_effective_independent_samples": 100,
"check_convergence_frequency": 1000,
"a": 2.0,
"threads": 1
}
# Update from the model, then update from any keyword arguments given.
kwd.update(self._configuration.get("infer", {}).copy())
kwd.update(**kwargs)
# Make some checks.
if kwd["walkers"] % 2 > 0 or kwd["walkers"] < 2 * len(parameters):
raise ValueError("the number of walkers must be an even number and "
"be at least twice the number of model parameters")
check_keywords = ["threads", "a"]
if kwd["auto_convergence"]:
logger.info("Convergence will be estimated automatically.")
check_keywords += ["minimum_sample", "check_convergence_frequency",
"minimum_effective_independent_samples", "n_tau_exp_as_burn_in",
"maximum_sample"]
else:
check_keywords += ["burn", "sample"]
logger.warn("No convergence checks will be done!")
logger.info("Burning for {0} steps and sampling for {1} with {2} "\
"walkers".format(kwd["burn"], kwd["sample"], kwd["walkers"]))
for keyword in check_keywords:
if kwd[keyword] < 1:
raise ValueError("keyword {} must be a positive value".format(
keyword))
# Check for non-standard proposal scales.
if kwd["a"] != 2.0:
logger.warn("Using proposal scale of {0:.2f}".format(kwd["a"]))
# If no initial proposal given, estimate the model parameters.
if initial_proposal is None:
initial_proposal = self.estimate(data)
# Initial proposal could be:
# - an array (N_walkers, N_dimensions)
# - a dictionary containing key/value pairs for the dimensions
if isinstance(initial_proposal, dict):
wavelengths_required = []
for channel, spectrum in zip(matched_channels, data):
if channel is None: continue
z = initial_proposal.get("z",
initial_proposal.get("z_{}".format(channel), 0))
wavelengths_required.append(
[spectrum.disp[0] * (1 - z), spectrum.disp[-1] * (1 - z)])
closest_point = [initial_proposal[p] \
for p in self.grid_points.dtype.names]
subset_bounds = self._initialise_approximator(
closest_point=closest_point,
wavelengths_required=wavelengths_required, force=True, **kwargs)
initial_proposal = self._initial_proposal_distribution(
parameters, initial_proposal, kwd["walkers"])
elif isinstance(initial_proposal, np.ndarray):
initial_proposal = np.atleast_2d(initial_proposal)
if initial_proposal.shape != (kwd["walkers"], len(parameters)):
raise ValueError("initial proposal must be an array of shape "\
"(N_parameters, N_walkers) ({0}, {1})".format(kwd["walkers"],
len(parameters)))
# Prepare the convolution functions.
self._create_convolution_functions(matched_channels, data, parameters)
# Create the sampler.
logger.info("Creating sampler with {0} walkers and {1} threads".format(
kwd["walkers"], kwd["threads"]))
debug = kwargs.get("debug", False)
sampler = emcee.EnsembleSampler(kwd["walkers"], len(parameters),
inference.ln_probability, a=kwd["a"], threads=kwd["threads"],
args=(parameters, self, data, debug),
kwargs={"matched_channels": matched_channels})
# Regardless of whether we automatically check for convergence or not,
# we will still need to burn in for some minimum amount of time.
if kwd["auto_convergence"]:
# Sample for `minimum_sample` period.
descr, iterations = "", kwd["minimum_sample"]
else:
# Sample for `burn` period
descr, iterations = "burn-in", kwd["burn"]
# Start sampling.
t_init = time()
acceptance_fractions = []
progress_bar = kwargs.get("__show_progress_bar", True)
sampler, init_acceptance_fractions, pos, lnprob, rstate, init_elapsed \
= self._sample(sampler, initial_proposal, iterations, descr=descr,
parameters=parameters, __show_progress_bar=progress_bar)
acceptance_fractions.append(init_acceptance_fractions)
# If we don't have to check for convergence, it's easy:
if not kwd["auto_convergence"]:
# Save the chain and log probabilities before we reset the chain.
burn, sample = kwd["burn"], kwd["sample"]
converged = None # we don't know!
burn_chains = sampler.chain
burn_ln_probabilities = sampler.lnprobability
# Reset the chain.
logger.debug("Resetting chain...")
sampler.reset()
# Sample the posterior.
sampler, prod_acceptance_fractions, pos, lnprob, rstate, t_elapsed \
= self._sample(sampler, pos, kwd["sample"], lnprob0=lnprob,
rstate0=rstate, descr="production", parameters=parameters,
__show_progress_bar=progress_bar)
production_chains = sampler.chain
production_ln_probabilities = sampler.lnprobability
acceptance_fractions.append(prod_acceptance_fractions)
else:
# Start checking for convergence at a frequency
# of check_convergence_frequency
last_state = [pos, lnprob, rstate]
converged, total_steps = False, 0 + iterations
min_eis_required = kwd["minimum_effective_independent_samples"]
while not converged and kwd["maximum_sample"] > total_steps:
# Check for convergence.
# Estimate the exponential autocorrelation time.
try:
tau_exp, rho, rho_max_fit \
= utils.estimate_tau_exp(sampler.chain)
except:
logger.exception("Exception occurred when trying to "
"estimate the exponential autocorrelation time:")
logger.info("To recover, we are temporarily setting tau_exp"
" to {0}".format(total_steps))
tau_exp = total_steps
logger.info("Estimated tau_exp at {0} is {1:.0f}".format(
total_steps, tau_exp))
# Grab everything n_tau_exp_as_burn_in times that.
burn = int(np.ceil(tau_exp)) * kwd["n_tau_exp_as_burn_in"]
sample = sampler.chain.shape[1] - burn
if 1 > sample:
logger.info("Sampler has not converged because {0}x the "
"estimated exponential autocorrelation time of {1:.0f}"
" is step {2}, and we are only at step {3}".format(
kwd["n_tau_exp_as_burn_in"], tau_exp, burn,
total_steps))
else:
# Calculate the integrated autocorrelation time in the
# remaining sample, for every parameter.
tau_int = utils.estimate_tau_int(sampler.chain[:, burn:])
# Calculate the effective number of independent samples in
# each parameter.
num_effective = (kwd["walkers"] * sample)/(2*tau_int)
logger.info("Effective number of independent samples in "
"each parameter:")
for parameter, n_eis in zip(parameters, num_effective):
logger.info("\t{0}: {1:.0f}".format(parameter, n_eis))
if num_effective.min() > min_eis_required:
# Converged.
converged = True
logger.info("Convergence achieved ({0:.0f} > {1:.0f})"\
.format(num_effective.min() > min_eis_required))
# Separate the samples into burn and production..
burn_chains = sampler.chain[:, :burn, :]
burn_ln_probabilities = sampler.lnprobability[:burn]
production_chains = sampler.chain[:, burn:, :]
production_ln_probabilities = sampler.lnprobability[burn:]
break
else:
# Nope.
logger.info("Sampler has not converged because it did "
"not meet the minimum number of effective "
"independent samples ({0:.0f})".format(kwd["n"]))
# Keep sampling.
iterations = kwd["check_convergence_frequency"]
logger.info("Trying for another {0} steps".format(iterations))
pos, lnprob, rstate = last_state
sampler, af, pos, lnprob, rstate, t_elapsed = self._sample(
sampler, pos, iterations, lnprob0=lnprob, rstate0=rstate,
descr="", parameters=parameters,
__show_progress_bar=progress_bar)
total_steps += iterations
acceptance_fractions.append(af)
last_state.extend(pos, lnprob, rstate)
del last_state[:3]
if not converged:
logger.warn("Maximum number of samples ({:.0f}) reached without"
"convergence!".format(kwd["maximum_sample"]))
logger.info("Total time elapsed: {0} seconds".format(time() - t_init))
if sampler.pool:
sampler.pool.close()
sampler.pool.join()
# Stack burn and production information together.
chains = np.hstack([burn_chains, production_chains])
lnprobability = np.hstack([
burn_ln_probabilities, production_ln_probabilities])
acceptance_fractions = np.hstack(acceptance_fractions)
chi_sq, dof, model_fluxes = self._chi_sq(dict(zip(parameters,
[np.percentile(chains[:, burn:, i], 50)
for i in range(len(parameters))])), data)
# Convert velocity scales.
symbol, scale, units = self._preferred_redshift_scale
labels = [] + parameters
scales = np.ones(len(parameters))
if symbol != "z":
for i, parameter in enumerate(parameters):
if parameter == "z" or parameter.startswith("z_"):
chains[:, :, i] *= scale
scales[i] = scale
if "_" in parameter:
labels[i] = "_".join([symbol, parameter.split("_")[1:]])
else:
labels[i] = symbol
logger.debug("Scaled {0} (now {1}) to units of {2}".format(
parameter, labels[i], units))
# Calculate MAP values and associated uncertainties.
theta = OrderedDict()
for i, label in enumerate(labels):
l, c, u = np.percentile(chains[:, burn:, i], [16, 50, 84])
theta[label] = (c, u-c, l-c)
# Re-arrange the chains to be in the same order as the model parameters.
indices = np.array([parameters.index(p) \
for p in self.parameters if p in parameters])
chains = chains[:, :, indices]
# Remove the convolution functions.
if not kwargs.get("__keep_convolution_functions", False):
self._destroy_convolution_functions()
if full_output:
metadata = {
"burn": burn,
"walkers": kwd["walkers"],
"sample": sample,
"parameters": labels,
"scales": scales,
"chi_sq": chi_sq,
"dof": dof
}
return (theta, chains, lnprobability, acceptance_fractions, sampler,
metadata)
return theta | [
"def",
"infer",
"(",
"self",
",",
"data",
",",
"initial_proposal",
"=",
"None",
",",
"full_output",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Apply data masks now so we don't have to do it on the fly.",
"data",
",",
"pixels_affected",
"=",
"self",
".",
... | 43.625786 | 22.745283 |
def list_active_vms(cwd=None):
'''
Return a list of machine names for active virtual machine on the host,
which are defined in the Vagrantfile at the indicated path.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_active_vms cwd=/projects/project_1
'''
vms = []
cmd = 'vagrant status'
reply = __salt__['cmd.shell'](cmd, cwd=cwd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
if len(tokens) > 1:
if tokens[1] == 'running':
vms.append(tokens[0])
return vms | [
"def",
"list_active_vms",
"(",
"cwd",
"=",
"None",
")",
":",
"vms",
"=",
"[",
"]",
"cmd",
"=",
"'vagrant status'",
"reply",
"=",
"__salt__",
"[",
"'cmd.shell'",
"]",
"(",
"cmd",
",",
"cwd",
"=",
"cwd",
")",
"log",
".",
"info",
"(",
"'--->\\n%s'",
","... | 29.666667 | 21.47619 |
def redraw_current_line(self):
""" Redraw the highlighted line """
if self.no_streams:
return
row = self.pads[self.current_pad].getyx()[0]
s = self.filtered_streams[row]
pad = self.pads['streams']
pad.move(row, 0)
pad.clrtoeol()
pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE)
pad.chgat(curses.A_REVERSE)
pad.move(row, 0)
self.refresh_current_pad() | [
"def",
"redraw_current_line",
"(",
"self",
")",
":",
"if",
"self",
".",
"no_streams",
":",
"return",
"row",
"=",
"self",
".",
"pads",
"[",
"self",
".",
"current_pad",
"]",
".",
"getyx",
"(",
")",
"[",
"0",
"]",
"s",
"=",
"self",
".",
"filtered_stream... | 34.846154 | 12.153846 |
def cmd_wp_movemulti(self, args):
'''handle wp move of multiple waypoints'''
if len(args) < 3:
print("usage: wp movemulti WPNUM WPSTART WPEND <rotation>")
return
idx = int(args[0])
if idx < 1 or idx > self.wploader.count():
print("Invalid wp number %u" % idx)
return
wpstart = int(args[1])
if wpstart < 1 or wpstart > self.wploader.count():
print("Invalid wp number %u" % wpstart)
return
wpend = int(args[2])
if wpend < 1 or wpend > self.wploader.count():
print("Invalid wp number %u" % wpend)
return
if idx < wpstart or idx > wpend:
print("WPNUM must be between WPSTART and WPEND")
return
# optional rotation about center point
if len(args) > 3:
rotation = float(args[3])
else:
rotation = 0
try:
latlon = self.module('map').click_position
except Exception:
print("No map available")
return
if latlon is None:
print("No map click position available")
return
wp = self.wploader.wp(idx)
if not self.wploader.is_location_command(wp.command):
print("WP must be a location command")
return
(lat, lon) = latlon
distance = mp_util.gps_distance(wp.x, wp.y, lat, lon)
bearing = mp_util.gps_bearing(wp.x, wp.y, lat, lon)
for wpnum in range(wpstart, wpend+1):
wp = self.wploader.wp(wpnum)
if not self.wploader.is_location_command(wp.command):
continue
(newlat, newlon) = mp_util.gps_newpos(wp.x, wp.y, bearing, distance)
if wpnum != idx and rotation != 0:
# add in rotation
d2 = mp_util.gps_distance(lat, lon, newlat, newlon)
b2 = mp_util.gps_bearing(lat, lon, newlat, newlon)
(newlat, newlon) = mp_util.gps_newpos(lat, lon, b2+rotation, d2)
if getattr(self.console, 'ElevationMap', None) is not None and wp.frame != mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT:
alt1 = self.console.ElevationMap.GetElevation(newlat, newlon)
alt2 = self.console.ElevationMap.GetElevation(wp.x, wp.y)
if alt1 is not None and alt2 is not None:
wp.z += alt1 - alt2
wp.x = newlat
wp.y = newlon
wp.target_system = self.target_system
wp.target_component = self.target_component
self.wploader.set(wp, wpnum)
self.loading_waypoints = True
self.loading_waypoint_lasttime = time.time()
self.master.mav.mission_write_partial_list_send(self.target_system,
self.target_component,
wpstart, wpend+1)
print("Moved WPs %u:%u to %f, %f rotation=%.1f" % (wpstart, wpend, lat, lon, rotation)) | [
"def",
"cmd_wp_movemulti",
"(",
"self",
",",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"<",
"3",
":",
"print",
"(",
"\"usage: wp movemulti WPNUM WPSTART WPEND <rotation>\"",
")",
"return",
"idx",
"=",
"int",
"(",
"args",
"[",
"0",
"]",
")",
"if",
... | 41.930556 | 19.791667 |
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self | [
"def",
"skew_y",
"(",
"self",
",",
"y",
")",
":",
"self",
".",
"root",
".",
"set",
"(",
"\"transform\"",
",",
"\"%s skewY(%f)\"",
"%",
"(",
"self",
".",
"root",
".",
"get",
"(",
"\"transform\"",
")",
"or",
"''",
",",
"y",
")",
")",
"return",
"self"... | 28.545455 | 16.272727 |
def enable(step: 'projects.ProjectStep'):
"""
Create a print equivalent function that also writes the output to the
project page. The write_through is enabled so that the TextIOWrapper
immediately writes all of its input data directly to the underlying
BytesIO buffer. This is needed so that we can safely access the buffer
data in a multi-threaded environment to display updates while the buffer
is being written to.
:param step:
"""
# Prevent anything unusual from causing buffer issues
restore_default_configuration()
stdout_interceptor = RedirectBuffer(sys.stdout)
sys.stdout = stdout_interceptor
step.report.stdout_interceptor = stdout_interceptor
stderr_interceptor = RedirectBuffer(sys.stderr)
sys.stderr = stderr_interceptor
step.report.stderr_interceptor = stderr_interceptor
stdout_interceptor.active = True
stderr_interceptor.active = True | [
"def",
"enable",
"(",
"step",
":",
"'projects.ProjectStep'",
")",
":",
"# Prevent anything unusual from causing buffer issues",
"restore_default_configuration",
"(",
")",
"stdout_interceptor",
"=",
"RedirectBuffer",
"(",
"sys",
".",
"stdout",
")",
"sys",
".",
"stdout",
... | 36.28 | 19.96 |
def classify_segmented_recording(recording, result_format=None):
"""Use this function if you are sure you have a single symbol.
Parameters
----------
recording : string
The recording in JSON format
Returns
-------
list of dictionaries
Each dictionary contains the keys 'symbol' and 'probability'. The list
is sorted descending by probability.
"""
global single_symbol_classifier
if single_symbol_classifier is None:
single_symbol_classifier = SingleClassificer()
return single_symbol_classifier.predict(recording, result_format) | [
"def",
"classify_segmented_recording",
"(",
"recording",
",",
"result_format",
"=",
"None",
")",
":",
"global",
"single_symbol_classifier",
"if",
"single_symbol_classifier",
"is",
"None",
":",
"single_symbol_classifier",
"=",
"SingleClassificer",
"(",
")",
"return",
"si... | 32.722222 | 19 |
def have_graph(self, graph):
"""Return whether I have a graph by this name."""
graph = self.pack(graph)
return bool(self.sql('graphs_named', graph).fetchone()[0]) | [
"def",
"have_graph",
"(",
"self",
",",
"graph",
")",
":",
"graph",
"=",
"self",
".",
"pack",
"(",
"graph",
")",
"return",
"bool",
"(",
"self",
".",
"sql",
"(",
"'graphs_named'",
",",
"graph",
")",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
")"
] | 45.75 | 11.5 |
def _build_model_factories(store):
"""Generate factories to construct objects from schemata"""
result = {}
for schemaname in store:
schema = None
try:
schema = store[schemaname]['schema']
except KeyError:
schemata_log("No schema found for ", schemaname, lvl=critical, exc=True)
try:
result[schemaname] = warmongo.model_factory(schema)
except Exception as e:
schemata_log("Could not create factory for schema ", schemaname, schema, lvl=critical, exc=True)
return result | [
"def",
"_build_model_factories",
"(",
"store",
")",
":",
"result",
"=",
"{",
"}",
"for",
"schemaname",
"in",
"store",
":",
"schema",
"=",
"None",
"try",
":",
"schema",
"=",
"store",
"[",
"schemaname",
"]",
"[",
"'schema'",
"]",
"except",
"KeyError",
":",... | 27.95 | 27.5 |
def remove_timex3s(self, list_timex_ids):
"""
Removes a list of terms from the layer
@type list_timex_ids: list (of strings)
@param list_timex_ids: list of timex identifier to be removed
"""
nodes_to_remove = set()
for timex in self:
if timex.get_id() in list_timex_ids:
nodes_to_remove.add(timex.get_node())
#for removing the previous comment (expected the layer will look like termlayer)
prv = timex.get_node().getprevious()
if prv is not None:
nodes_to_remove.add(prv)
for node in nodes_to_remove:
self.node.remove(node) | [
"def",
"remove_timex3s",
"(",
"self",
",",
"list_timex_ids",
")",
":",
"nodes_to_remove",
"=",
"set",
"(",
")",
"for",
"timex",
"in",
"self",
":",
"if",
"timex",
".",
"get_id",
"(",
")",
"in",
"list_timex_ids",
":",
"nodes_to_remove",
".",
"add",
"(",
"t... | 40 | 12.588235 |
def createsnippet(self, project_id, title, file_name, code, visibility_level=0):
"""
Creates an snippet
:param project_id: project id to create the snippet under
:param title: title of the snippet
:param file_name: filename for the snippet
:param code: content of the snippet
:param visibility_level: snippets can be either private (0), internal(10) or public(20)
:return: True if correct, false if failed
"""
data = {'id': project_id, 'title': title, 'file_name': file_name, 'code': code}
if visibility_level in [0, 10, 20]:
data['visibility_level'] = visibility_level
request = requests.post(
'{0}/{1}/snippets'.format(self.projects_url, project_id),
data=data, verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False | [
"def",
"createsnippet",
"(",
"self",
",",
"project_id",
",",
"title",
",",
"file_name",
",",
"code",
",",
"visibility_level",
"=",
"0",
")",
":",
"data",
"=",
"{",
"'id'",
":",
"project_id",
",",
"'title'",
":",
"title",
",",
"'file_name'",
":",
"file_na... | 40.5 | 22.416667 |
def windowed_run_count(da, window, dim='time'):
"""Return the number of consecutive true values in array for runs at least as long as given duration.
Parameters
----------
da: N-dimensional Xarray data array (boolean)
Input data array
window : int
Minimum run length.
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
Returns
-------
out : N-dimensional xarray data array (int)
Total number of true values part of a consecutive runs of at least `window` long.
"""
d = rle(da, dim=dim)
out = d.where(d >= window, 0).sum(dim=dim)
return out | [
"def",
"windowed_run_count",
"(",
"da",
",",
"window",
",",
"dim",
"=",
"'time'",
")",
":",
"d",
"=",
"rle",
"(",
"da",
",",
"dim",
"=",
"dim",
")",
"out",
"=",
"d",
".",
"where",
"(",
"d",
">=",
"window",
",",
"0",
")",
".",
"sum",
"(",
"dim... | 33 | 19.952381 |
def _get_encoder_method(stream_type):
"""A function to get the python type to device cloud type converter function.
:param stream_type: The streams data type
:return: A function that when called with the python object will return the serializable
type for sending to the cloud. If there is no function for the given type, or the `stream_type`
is `None` the returned function will simply return the object unchanged.
"""
if stream_type is not None:
return DSTREAM_TYPE_MAP.get(stream_type.upper(), (lambda x: x, lambda x: x))[1]
else:
return lambda x: x | [
"def",
"_get_encoder_method",
"(",
"stream_type",
")",
":",
"if",
"stream_type",
"is",
"not",
"None",
":",
"return",
"DSTREAM_TYPE_MAP",
".",
"get",
"(",
"stream_type",
".",
"upper",
"(",
")",
",",
"(",
"lambda",
"x",
":",
"x",
",",
"lambda",
"x",
":",
... | 49.166667 | 24.666667 |
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval) | [
"def",
"start",
"(",
"track_file",
",",
"twitter_api_key",
",",
"twitter_api_secret",
",",
"twitter_access_token",
",",
"twitter_access_token_secret",
",",
"poll_interval",
"=",
"15",
",",
"unfiltered",
"=",
"False",
",",
"languages",
"=",
"None",
",",
"debug",
"=... | 31.346154 | 17.076923 |
def fromurl(url):
""" Parse patch from an URL, return False
if an error occured. Note that this also
can throw urlopen() exceptions.
"""
ps = PatchSet( urllib_request.urlopen(url) )
if ps.errors == 0:
return ps
return False | [
"def",
"fromurl",
"(",
"url",
")",
":",
"ps",
"=",
"PatchSet",
"(",
"urllib_request",
".",
"urlopen",
"(",
"url",
")",
")",
"if",
"ps",
".",
"errors",
"==",
"0",
":",
"return",
"ps",
"return",
"False"
] | 26.777778 | 12.333333 |
def shift_christmas_boxing_days(self, year):
""" When Christmas and/or Boxing Day falls on a weekend, it is rolled
forward to the next weekday.
"""
christmas = date(year, 12, 25)
boxing_day = date(year, 12, 26)
boxing_day_label = "{} Shift".format(self.boxing_day_label)
results = []
if christmas.weekday() in self.get_weekend_days():
shift = self.find_following_working_day(christmas)
results.append((shift, "Christmas Shift"))
results.append((shift + timedelta(days=1), boxing_day_label))
elif boxing_day.weekday() in self.get_weekend_days():
shift = self.find_following_working_day(boxing_day)
results.append((shift, boxing_day_label))
return results | [
"def",
"shift_christmas_boxing_days",
"(",
"self",
",",
"year",
")",
":",
"christmas",
"=",
"date",
"(",
"year",
",",
"12",
",",
"25",
")",
"boxing_day",
"=",
"date",
"(",
"year",
",",
"12",
",",
"26",
")",
"boxing_day_label",
"=",
"\"{} Shift\"",
".",
... | 48.875 | 13.5 |
def reply_to(self) -> Optional[Sequence[AddressHeader]]:
"""The ``Reply-To`` header."""
try:
return cast(Sequence[AddressHeader], self[b'reply-to'])
except KeyError:
return None | [
"def",
"reply_to",
"(",
"self",
")",
"->",
"Optional",
"[",
"Sequence",
"[",
"AddressHeader",
"]",
"]",
":",
"try",
":",
"return",
"cast",
"(",
"Sequence",
"[",
"AddressHeader",
"]",
",",
"self",
"[",
"b'reply-to'",
"]",
")",
"except",
"KeyError",
":",
... | 36.666667 | 17.333333 |
def logged_in():
"""
Method called by Strava (redirect) that includes parameters.
- state
- code
- error
"""
error = request.args.get('error')
state = request.args.get('state')
if error:
return render_template('login_error.html', error=error)
else:
code = request.args.get('code')
client = Client()
access_token = client.exchange_code_for_token(client_id=app.config['STRAVA_CLIENT_ID'],
client_secret=app.config['STRAVA_CLIENT_SECRET'],
code=code)
# Probably here you'd want to store this somewhere -- e.g. in a database.
strava_athlete = client.get_athlete()
return render_template('login_results.html', athlete=strava_athlete, access_token=access_token) | [
"def",
"logged_in",
"(",
")",
":",
"error",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'error'",
")",
"state",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'state'",
")",
"if",
"error",
":",
"return",
"render_template",
"(",
"'login_error.html'",
... | 40 | 25.238095 |
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, robust_cluster=3, niter=1, robust_nselect=-1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
False, default
compute_h : bool
iteratively update values for H.
True, default
compute_w : bool
iteratively update values for W.
default, True
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
robust_cluster : int, optional
set the number of clusters for robust map selection.
3, default
robust_nselect : int, optional
set the number of samples to consider for robust map
selection.
-1, default (automatically determine suitable number)
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
self._robust_cluster = robust_cluster
self._robust_nselect = robust_nselect
if self._robust_nselect == -1:
self._robust_nselect = np.round(np.log(self.data.shape[1])*2)
AA.factorize(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err) | [
"def",
"factorize",
"(",
"self",
",",
"show_progress",
"=",
"False",
",",
"compute_w",
"=",
"True",
",",
"compute_h",
"=",
"True",
",",
"compute_err",
"=",
"True",
",",
"robust_cluster",
"=",
"3",
",",
"niter",
"=",
"1",
",",
"robust_nselect",
"=",
"-",
... | 40.02439 | 16.292683 |
def reduce_number(num):
"""Reduces the string representation of a number.
If the number is of the format n.00..., returns n.
If the decimal portion of the number has a repeating decimal, followed by up to two trailing
numbers, such as:
0.3333333
or
0.343434346
It will return just one instance of the repeating decimals:
0.3
or
0.34
"""
parts = str(num).split(".")
if len(parts) == 1 or parts[1] == "0":
return int(parts[0])
else:
match = _REPEATING_NUMBER_TRIM_RE.search(parts[1])
if match:
from_index, _ = match.span()
if from_index == 0 and match.group(2) == "0":
return int(parts[0])
else:
return Decimal(parts[0] + "." + parts[1][:from_index] + match.group(2))
else:
return num | [
"def",
"reduce_number",
"(",
"num",
")",
":",
"parts",
"=",
"str",
"(",
"num",
")",
".",
"split",
"(",
"\".\"",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
"or",
"parts",
"[",
"1",
"]",
"==",
"\"0\"",
":",
"return",
"int",
"(",
"parts",
"["... | 21.852941 | 26.323529 |
def cartesian_to_spherical_azimuthal(x, y):
""" Calculates the azimuthal angle in spherical coordinates from Cartesian
coordinates. The azimuthal angle is in [0,2*pi].
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
Returns
-------
phi : {numpy.array, float}
The azimuthal angle.
"""
y = float(y) if isinstance(y, int) else y
phi = numpy.arctan2(y, x)
return phi % (2 * numpy.pi) | [
"def",
"cartesian_to_spherical_azimuthal",
"(",
"x",
",",
"y",
")",
":",
"y",
"=",
"float",
"(",
"y",
")",
"if",
"isinstance",
"(",
"y",
",",
"int",
")",
"else",
"y",
"phi",
"=",
"numpy",
".",
"arctan2",
"(",
"y",
",",
"x",
")",
"return",
"phi",
... | 25.842105 | 16.526316 |
def simplify_graph(graph):
"""
strips out everything but connectivity
Args:
graph (nx.Graph):
Returns:
nx.Graph: new_graph
CommandLine:
python3 -m utool.util_graph simplify_graph --show
python2 -m utool.util_graph simplify_graph --show
python2 -c "import networkx as nx; print(nx.__version__)"
python3 -c "import networkx as nx; print(nx.__version__)"
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> new_graph = simplify_graph(graph)
>>> result = ut.repr2(list(new_graph.edges()))
>>> #adj_list = sorted(list(nx.generate_adjlist(new_graph)))
>>> #result = ut.repr2(adj_list)
>>> print(result)
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)]
['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3']
"""
import utool as ut
nodes = sorted(list(graph.nodes()))
node_lookup = ut.make_index_lookup(nodes)
if graph.is_multigraph():
edges = list(graph.edges(keys=True))
else:
edges = list(graph.edges())
new_nodes = ut.take(node_lookup, nodes)
if graph.is_multigraph():
new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges]
else:
new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges]
cls = graph.__class__
new_graph = cls()
new_graph.add_nodes_from(new_nodes)
new_graph.add_edges_from(new_edges)
return new_graph | [
"def",
"simplify_graph",
"(",
"graph",
")",
":",
"import",
"utool",
"as",
"ut",
"nodes",
"=",
"sorted",
"(",
"list",
"(",
"graph",
".",
"nodes",
"(",
")",
")",
")",
"node_lookup",
"=",
"ut",
".",
"make_index_lookup",
"(",
"nodes",
")",
"if",
"graph",
... | 33.294118 | 19.45098 |
def get_collection(self, qs, view_kwargs):
"""Retrieve a collection of objects through sqlalchemy
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
:return tuple: the number of object and the list of objects
"""
self.before_get_collection(qs, view_kwargs)
query = self.query(view_kwargs)
if qs.filters:
query = self.filter_query(query, qs.filters, self.model)
if qs.sorting:
query = self.sort_query(query, qs.sorting)
object_count = query.count()
if getattr(self, 'eagerload_includes', True):
query = self.eagerload_includes(query, qs)
query = self.paginate_query(query, qs.pagination)
collection = query.all()
collection = self.after_get_collection(collection, qs, view_kwargs)
return object_count, collection | [
"def",
"get_collection",
"(",
"self",
",",
"qs",
",",
"view_kwargs",
")",
":",
"self",
".",
"before_get_collection",
"(",
"qs",
",",
"view_kwargs",
")",
"query",
"=",
"self",
".",
"query",
"(",
"view_kwargs",
")",
"if",
"qs",
".",
"filters",
":",
"query"... | 32.344828 | 23.62069 |
def expose(rule, **options):
"""Decorator to add an url rule to a function
"""
def decorator(f):
if not hasattr(f, "urls"):
f.urls = []
if isinstance(rule, (list, tuple)):
f.urls.extend(rule)
else:
f.urls.append((rule, options))
return f
return decorator | [
"def",
"expose",
"(",
"rule",
",",
"*",
"*",
"options",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"if",
"not",
"hasattr",
"(",
"f",
",",
"\"urls\"",
")",
":",
"f",
".",
"urls",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"rule",
",",
"(",
... | 27.25 | 11.583333 |
def _initialize_attributes(model_class, name, bases, attrs):
"""Initialize the attributes of the model."""
model_class._attributes = {}
for k, v in attrs.iteritems():
if isinstance(v, Attribute):
model_class._attributes[k] = v
v.name = v.name or k | [
"def",
"_initialize_attributes",
"(",
"model_class",
",",
"name",
",",
"bases",
",",
"attrs",
")",
":",
"model_class",
".",
"_attributes",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"attrs",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"v"... | 40.714286 | 6.857143 |
def active(self):
"""
Return the currently active :class:`~opentracing.Scope` which
can be used to access the currently active
:attr:`Scope.span`.
:return: the :class:`~opentracing.Scope` that is active,
or ``None`` if not available.
"""
context = self._get_context()
if not context:
return super(TornadoScopeManager, self).active
return context.active | [
"def",
"active",
"(",
"self",
")",
":",
"context",
"=",
"self",
".",
"_get_context",
"(",
")",
"if",
"not",
"context",
":",
"return",
"super",
"(",
"TornadoScopeManager",
",",
"self",
")",
".",
"active",
"return",
"context",
".",
"active"
] | 29.133333 | 17.933333 |
def _process_pong(self):
"""
Process PONG sent by server.
"""
if len(self._pongs) > 0:
future = self._pongs.pop(0)
future.set_result(True)
self._pongs_received += 1
self._pings_outstanding -= 1 | [
"def",
"_process_pong",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_pongs",
")",
">",
"0",
":",
"future",
"=",
"self",
".",
"_pongs",
".",
"pop",
"(",
"0",
")",
"future",
".",
"set_result",
"(",
"True",
")",
"self",
".",
"_pongs_receive... | 29.444444 | 4.111111 |
def maybe_put_task(self):
"""Enqueue the next task, if there are any waiting."""
try:
task = next(self.tasks)
except StopIteration:
pass
else:
log.debug('Putting %s on queue', task)
self.task_queue.put(task) | [
"def",
"maybe_put_task",
"(",
"self",
")",
":",
"try",
":",
"task",
"=",
"next",
"(",
"self",
".",
"tasks",
")",
"except",
"StopIteration",
":",
"pass",
"else",
":",
"log",
".",
"debug",
"(",
"'Putting %s on queue'",
",",
"task",
")",
"self",
".",
"tas... | 31 | 13.666667 |
def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1):
""" Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
"""
# If no bitvalue is set or rootname given, assume no mask is desired
# However, this name would be useful as the output mask from
# other processing, such as MultiDrizzle, so return it anyway.
#if bitvalue == None or rootname == None:
# return None
# build output name
maskname = output
# If an old version of the maskfile was present, remove it and rebuild it.
if fileutil.findFile(maskname):
fileutil.removeFile(maskname)
# Open input file with DQ array
fdq = fileutil.openImage(rootname, mode='readonly', memmap=False)
try:
_extn = fileutil.findExtname(fdq, extname, extver=extver)
if _extn is not None:
# Read in DQ array
dqarr = fdq[_extn].data
else:
dqarr = None
# For the case where there is no DQ array,
# create a mask image of all ones.
if dqarr is None:
# We need to get the dimensions of the output DQ array
# Since the DQ array is non-existent, look for the SCI extension
_sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver)
if _sci_extn is not None:
_shape = fdq[_sci_extn].data.shape
dqarr = np.zeros(_shape,dtype=np.uint16)
else:
raise Exception
# Build mask array from DQ array
maskarr = buildMask(dqarr,bitvalue)
#Write out the mask file as simple FITS file
fmask = fits.open(maskname, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data = maskarr)
fmask.append(maskhdu)
#Close files
fmask.close()
del fmask
fdq.close()
del fdq
except:
fdq.close()
del fdq
# Safeguard against leaving behind an incomplete file
if fileutil.findFile(maskname):
os.remove(maskname)
_errstr = "\nWarning: Problem creating MASK file for "+rootname+".\n"
#raise IOError, _errstr
print(_errstr)
return None
# Return the name of the mask image written out
return maskname | [
"def",
"buildMaskImage",
"(",
"rootname",
",",
"bitvalue",
",",
"output",
",",
"extname",
"=",
"'DQ'",
",",
"extver",
"=",
"1",
")",
":",
"# If no bitvalue is set or rootname given, assume no mask is desired",
"# However, this name would be useful as the output mask from",
"#... | 34.318182 | 19.787879 |
def comments(self): # pylint: disable=E0202
"""Return forest of comments, with top-level comments as tree roots.
May contain instances of MoreComment objects. To easily replace these
objects with Comment objects, use the replace_more_comments method then
fetch this attribute. Use comment replies to walk down the tree. To get
an unnested, flat list of comments from this attribute use
helpers.flatten_tree.
"""
if self._comments is None:
self.comments = Submission.from_url( # pylint: disable=W0212
self.reddit_session, self._api_link, comments_only=True)
return self._comments | [
"def",
"comments",
"(",
"self",
")",
":",
"# pylint: disable=E0202",
"if",
"self",
".",
"_comments",
"is",
"None",
":",
"self",
".",
"comments",
"=",
"Submission",
".",
"from_url",
"(",
"# pylint: disable=W0212",
"self",
".",
"reddit_session",
",",
"self",
"."... | 47.785714 | 22.714286 |
def p_return_expr(p):
""" statement : RETURN expr
"""
if not FUNCTION_LEVEL: # At less one level
syntax_error(p.lineno(1), 'Syntax Error: Returning value out of FUNCTION')
p[0] = None
return
if FUNCTION_LEVEL[-1].kind is None: # This function was not correctly declared.
p[0] = None
return
if FUNCTION_LEVEL[-1].kind != KIND.function:
syntax_error(p.lineno(1), 'Syntax Error: SUBs cannot return a value')
p[0] = None
return
if is_numeric(p[2]) and FUNCTION_LEVEL[-1].type_ == TYPE.string:
syntax_error(p.lineno(2), 'Type Error: Function must return a string, not a numeric value')
p[0] = None
return
if not is_numeric(p[2]) and FUNCTION_LEVEL[-1].type_ != TYPE.string:
syntax_error(p.lineno(2), 'Type Error: Function must return a numeric value, not a string')
p[0] = None
return
p[0] = make_sentence('RETURN', FUNCTION_LEVEL[-1],
make_typecast(FUNCTION_LEVEL[-1].type_, p[2],
p.lineno(1))) | [
"def",
"p_return_expr",
"(",
"p",
")",
":",
"if",
"not",
"FUNCTION_LEVEL",
":",
"# At less one level",
"syntax_error",
"(",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"'Syntax Error: Returning value out of FUNCTION'",
")",
"p",
"[",
"0",
"]",
"=",
"None",
"return... | 35.866667 | 27.533333 |
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggTheora(filething)
filething.fileobj.seek(0)
t.delete(filething) | [
"def",
"delete",
"(",
"filething",
")",
":",
"t",
"=",
"OggTheora",
"(",
"filething",
")",
"filething",
".",
"fileobj",
".",
"seek",
"(",
"0",
")",
"t",
".",
"delete",
"(",
"filething",
")"
] | 17.428571 | 19.142857 |
def pdf(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the likelihood (pdf) using it
.. math:
p(y|\\lambda(f))
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: likelihood evaluated for this point
:rtype: float
"""
if isinstance(self.gp_link, link_functions.Identity):
return self.pdf_link(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata) | [
"def",
"pdf",
"(",
"self",
",",
"f",
",",
"y",
",",
"Y_metadata",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"gp_link",
",",
"link_functions",
".",
"Identity",
")",
":",
"return",
"self",
".",
"pdf_link",
"(",
"f",
",",
"y",
",",
... | 35.9 | 20.3 |
def list_tokens(opts):
'''
List all tokens in the store.
:param opts: Salt master config options
:returns: List of dicts (tokens)
'''
ret = []
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(opts['token_dir']):
for token in filenames:
ret.append(token)
return ret | [
"def",
"list_tokens",
"(",
"opts",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"(",
"dirpath",
",",
"dirnames",
",",
"filenames",
")",
"in",
"salt",
".",
"utils",
".",
"path",
".",
"os_walk",
"(",
"opts",
"[",
"'token_dir'",
"]",
")",
":",
"for",
"token... | 26.583333 | 21.416667 |
def cwtmorlet(points, width):
"""complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
"""
omega = 5.0
s = points / (2.0 * omega * width)
return wavelets.morlet(points, omega, s, complete=True) | [
"def",
"cwtmorlet",
"(",
"points",
",",
"width",
")",
":",
"omega",
"=",
"5.0",
"s",
"=",
"points",
"/",
"(",
"2.0",
"*",
"omega",
"*",
"width",
")",
"return",
"wavelets",
".",
"morlet",
"(",
"points",
",",
"omega",
",",
"s",
",",
"complete",
"=",
... | 42.5 | 13 |
def main():
'''main routine'''
# process arguments
if len(sys.argv) < 3:
usage()
rgname = sys.argv[1]
vmss_name = sys.argv[2]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermonfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
instanceviewlist = azurerm.list_vmss_vm_instance_view(access_token, subscription_id, rgname,
vmss_name)
for vmi in instanceviewlist['value']:
instance_id = vmi['instanceId']
upgrade_domain = vmi['properties']['instanceView']['platformUpdateDomain']
fault_domain = vmi['properties']['instanceView']['platformFaultDomain']
print('Instance ID: ' + instance_id + ', UD: ' + str(upgrade_domain) + ', FD: '
+ str(fault_domain)) | [
"def",
"main",
"(",
")",
":",
"# process arguments",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<",
"3",
":",
"usage",
"(",
")",
"rgname",
"=",
"sys",
".",
"argv",
"[",
"1",
"]",
"vmss_name",
"=",
"sys",
".",
"argv",
"[",
"2",
"]",
"# Load Azure... | 36 | 23.625 |
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments | [
"def",
"get_adjustments",
"(",
"self",
",",
"zero_qtr_data",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"dates",
",",
"assets",
",",
"columns",
",",
"*",
"*",
"kwargs",
")",
":",
"zero_qtr_data",
".",
"sort_index",
"(",
"inplace",
"=",
"True",
")",... | 38.666667 | 18.7 |
def _get_config_file_in_folder(cls, path):
"""Look for a configuration file in `path`.
If exists return its full path, otherwise None.
"""
if os.path.isfile(path):
path = os.path.dirname(path)
for fn in cls.PROJECT_CONFIG_FILES:
config = RawConfigParser()
full_path = os.path.join(path, fn)
if config.read(full_path) and cls._get_section_name(config):
return full_path | [
"def",
"_get_config_file_in_folder",
"(",
"cls",
",",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"for",
"fn",
"in",
"cls",
".",
"PROJECT_CONFIG_FI... | 33 | 14 |
def _parse_settings_bond_1(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '1'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond | [
"def",
"_parse_settings_bond_1",
"(",
"opts",
",",
"iface",
",",
"bond_def",
")",
":",
"bond",
"=",
"{",
"'mode'",
":",
"'1'",
"}",
"for",
"binding",
"in",
"[",
"'miimon'",
",",
"'downdelay'",
",",
"'updelay'",
"]",
":",
"if",
"binding",
"in",
"opts",
... | 34.054054 | 20.972973 |
def create_prj(self, atypes=None, deps=None):
"""Create and return a new project
:param atypes: add the given atypes to the project
:type atypes: list | None
:param deps: add the given departmetns to the project
:type deps: list | None
:returns: The created project or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Project`
:raises: None
"""
dialog = ProjectCreatorDialog(parent=self)
dialog.exec_()
prj = dialog.project
if prj and atypes:
for at in atypes:
at.projects.add(prj)
at.save()
if prj and deps:
for dep in deps:
dep.projects.add(prj)
dep.save()
if prj:
prjdata = djitemdata.ProjectItemData(prj)
treemodel.TreeItem(prjdata, self.prjs_model.root)
return prj | [
"def",
"create_prj",
"(",
"self",
",",
"atypes",
"=",
"None",
",",
"deps",
"=",
"None",
")",
":",
"dialog",
"=",
"ProjectCreatorDialog",
"(",
"parent",
"=",
"self",
")",
"dialog",
".",
"exec_",
"(",
")",
"prj",
"=",
"dialog",
".",
"project",
"if",
"p... | 34.346154 | 13.923077 |
def comparator(self, x, y):
'''
simple comparator method
'''
indX=0
indY=0
for i in range(len(self.stable_names)):
if self.stable_names[i] == x[0].split('-')[0]:
indX=i
if self.stable_names[i] == y[0].split('-')[0]:
indY=i
if indX>indY:
return 1
if indX==indY:
return 0
if indX<indY:
return -1 | [
"def",
"comparator",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"indX",
"=",
"0",
"indY",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"stable_names",
")",
")",
":",
"if",
"self",
".",
"stable_names",
"[",
"i",
"]",
"==",
... | 22.05 | 22.25 |
def detach(self):
"""
Detach from parent.
@return: This element removed from its parent's
child list and I{parent}=I{None}
@rtype: L{Element}
"""
if self.parent is not None:
if self in self.parent.children:
self.parent.children.remove(self)
self.parent = None
return self | [
"def",
"detach",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent",
"is",
"not",
"None",
":",
"if",
"self",
"in",
"self",
".",
"parent",
".",
"children",
":",
"self",
".",
"parent",
".",
"children",
".",
"remove",
"(",
"self",
")",
"self",
".",
... | 30.666667 | 9.833333 |
def play(state):
""" Play sound for a given state.
:param state: a State value.
"""
filename = None
if state == SoundService.State.welcome:
filename = "pad_glow_welcome1.wav"
elif state == SoundService.State.goodbye:
filename = "pad_glow_power_off.wav"
elif state == SoundService.State.hotword_detected:
filename = "pad_soft_on.wav"
elif state == SoundService.State.asr_text_captured:
filename = "pad_soft_off.wav"
elif state == SoundService.State.error:
filename = "music_marimba_error_chord_2x.wav"
if filename is not None:
AudioPlayer.play_async("{}/{}".format(ABS_SOUND_DIR, filename)) | [
"def",
"play",
"(",
"state",
")",
":",
"filename",
"=",
"None",
"if",
"state",
"==",
"SoundService",
".",
"State",
".",
"welcome",
":",
"filename",
"=",
"\"pad_glow_welcome1.wav\"",
"elif",
"state",
"==",
"SoundService",
".",
"State",
".",
"goodbye",
":",
... | 38.157895 | 13.631579 |
def prepare_adiabatic_limit(slh, k=None):
"""Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system.
"""
if k is None:
k = symbols('k', positive=True)
Ld = slh.L.dag()
LdL = (Ld * slh.L)[0, 0]
K = (-LdL / 2 + I * slh.H).expand().simplify_scalar()
N = slh.S.dag()
B, A, Y = K.series_expand(k, 0, 2)
G, F = Ld.series_expand(k, 0, 1)
return Y, A, B, F, G, N | [
"def",
"prepare_adiabatic_limit",
"(",
"slh",
",",
"k",
"=",
"None",
")",
":",
"if",
"k",
"is",
"None",
":",
"k",
"=",
"symbols",
"(",
"'k'",
",",
"positive",
"=",
"True",
")",
"Ld",
"=",
"slh",
".",
"L",
".",
"dag",
"(",
")",
"LdL",
"=",
"(",
... | 29.818182 | 16.363636 |
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
:param must_create:
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
obj.save() | [
"def",
"save",
"(",
"self",
",",
"must_create",
"=",
"False",
")",
":",
"if",
"self",
".",
"session_key",
"is",
"None",
":",
"return",
"self",
".",
"create",
"(",
")",
"data",
"=",
"self",
".",
"_get_session",
"(",
"no_load",
"=",
"must_create",
")",
... | 36.285714 | 17.285714 |
def create_combination(list_of_sentences):
"""Generates all possible pair combinations for the input list of sentences.
For example:
input = ["paraphrase1", "paraphrase2", "paraphrase3"]
output = [("paraphrase1", "paraphrase2"),
("paraphrase1", "paraphrase3"),
("paraphrase2", "paraphrase3")]
Args:
list_of_sentences: the list of input sentences.
Returns:
the list of all possible sentence pairs.
"""
num_sentences = len(list_of_sentences) - 1
combinations = []
for i, _ in enumerate(list_of_sentences):
if i == num_sentences:
break
num_pairs = num_sentences - i
populated = num_pairs * [list_of_sentences[i]]
zipped = list(zip(populated, list_of_sentences[i + 1:]))
combinations += zipped
return combinations | [
"def",
"create_combination",
"(",
"list_of_sentences",
")",
":",
"num_sentences",
"=",
"len",
"(",
"list_of_sentences",
")",
"-",
"1",
"combinations",
"=",
"[",
"]",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"list_of_sentences",
")",
":",
"if",
"i",
"=... | 29.538462 | 16.576923 |
def register_service(self, short_name, long_name, allow_duplicate=True):
"""Register a new service with the service manager.
Args:
short_name (string): A unique short name for this service that functions
as an id
long_name (string): A user facing name for this service
allow_duplicate (boolean): Don't throw an error if this service is already
registered. This is important if the service is preregistered for example.
Raises:
ArgumentError: if the short_name is already taken
"""
self._loop.run_coroutine(self._client.register_service(short_name, long_name, allow_duplicate)) | [
"def",
"register_service",
"(",
"self",
",",
"short_name",
",",
"long_name",
",",
"allow_duplicate",
"=",
"True",
")",
":",
"self",
".",
"_loop",
".",
"run_coroutine",
"(",
"self",
".",
"_client",
".",
"register_service",
"(",
"short_name",
",",
"long_name",
... | 49 | 30.857143 |
def load_paired_notebook(notebook, fmt, nb_file, log):
"""Update the notebook with the inputs and outputs of the most recent paired files"""
formats = notebook.metadata.get('jupytext', {}).get('formats')
if not formats:
raise ValueError("'{}' is not a paired notebook".format(nb_file))
max_mtime_inputs = None
max_mtime_outputs = None
latest_inputs = None
latest_outputs = None
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats):
if not os.path.isfile(alt_path):
continue
info = os.lstat(alt_path)
if not max_mtime_inputs or info.st_mtime > max_mtime_inputs:
max_mtime_inputs = info.st_mtime
latest_inputs, input_fmt = alt_path, alt_fmt
if alt_path.endswith('.ipynb'):
if not max_mtime_outputs or info.st_mtime > max_mtime_outputs:
max_mtime_outputs = info.st_mtime
latest_outputs = alt_path
if latest_outputs and latest_outputs != latest_inputs:
log("[jupytext] Loading input cells from '{}'".format(latest_inputs))
inputs = notebook if latest_inputs == nb_file else readf(latest_inputs, input_fmt)
check_file_version(inputs, latest_inputs, latest_outputs)
log("[jupytext] Loading output cells from '{}'".format(latest_outputs))
outputs = notebook if latest_outputs == nb_file else readf(latest_outputs)
combine_inputs_with_outputs(inputs, outputs, input_fmt)
return inputs, latest_inputs, latest_outputs
log("[jupytext] Loading notebook from '{}'".format(latest_inputs))
if latest_inputs != nb_file:
notebook = readf(latest_inputs, input_fmt)
return notebook, latest_inputs, latest_outputs | [
"def",
"load_paired_notebook",
"(",
"notebook",
",",
"fmt",
",",
"nb_file",
",",
"log",
")",
":",
"formats",
"=",
"notebook",
".",
"metadata",
".",
"get",
"(",
"'jupytext'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'formats'",
")",
"if",
"not",
"formats",... | 46 | 21.594595 |
def _get_files():
"""General script to download file from online sources. Each remote file
should be specified in the list of dict REMOTE_FILES. Each entry in
REMOTE_FILES contains:
filename : the filename which is used by the test scripts
cached : the filename which is stored in the cache directory
url : the remote url. If str, it's a direct download. If it's a list, it's
run as a command.
zipped : if None, then the file is not zipped. If True, it extracts all
the files (but be careful about the folder name). If str, it extracts only
that specific file.
Returns
-------
returncode : int
code to send to shell (TODO: make sure you get 1 with exceptions)
"""
for remote in REMOTE_FILES:
final_file = DATA_PATH / remote['filename']
if not final_file.exists():
temp_file = DOWNLOADS_PATH / remote['cached']
if not temp_file.exists():
if remote['url'] is None:
print('missing URL, please contact developers')
return 1
elif isinstance(remote['url'], list):
print('Running: ' + ' '.join(remote['url']))
run(remote['url'])
else:
print('Downloading from ' + remote['url'])
_urlretrieve(remote['url'], temp_file)
if remote['zipped'] is None:
print('Copying ' + str(temp_file) + ' to ' + str(final_file))
copyfile(str(temp_file), str(final_file)) # or maybe symlink
elif remote['zipped'] is True: # explicit testing
with ZipFile(str(temp_file)) as zf:
print('Extracting all files in ' + remote['cached'] + ':\n\t' + '\n\t'.join(zf.namelist()))
zf.extractall(path=str(DATA_PATH))
else:
print('Extracting file ' + remote['zipped'] + ' to ' +
str(final_file))
with ZipFile(str(temp_file)) as zf:
extracted = Path(
zf.extract(remote['zipped'], path=str(DOWNLOADS_PATH)))
extracted.rename(final_file)
return 0 | [
"def",
"_get_files",
"(",
")",
":",
"for",
"remote",
"in",
"REMOTE_FILES",
":",
"final_file",
"=",
"DATA_PATH",
"/",
"remote",
"[",
"'filename'",
"]",
"if",
"not",
"final_file",
".",
"exists",
"(",
")",
":",
"temp_file",
"=",
"DOWNLOADS_PATH",
"/",
"remote... | 40.37037 | 23.12963 |
def parse(name, **kwargs):
""" Parse a C/C++ file
"""
idx = clang.cindex.Index.create()
assert os.path.exists(name)
tu = idx.parse(name, **kwargs)
return _ensure_parse_valid(tu) | [
"def",
"parse",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"idx",
"=",
"clang",
".",
"cindex",
".",
"Index",
".",
"create",
"(",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"name",
")",
"tu",
"=",
"idx",
".",
"parse",
"(",
"name"... | 27.857143 | 5.428571 |
def _generate_notebook_by_difficulty_body(notebook_object, dict_by_difficulty):
"""
Internal function that is used for generation of the page where notebooks are organized by
difficulty level.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the body will be created.
dict_by_difficulty : dict
Global Dictionary that groups Notebooks names/files by difficulty level.
"""
difficulty_keys = list(dict_by_difficulty.keys())
difficulty_keys.sort()
for difficulty in difficulty_keys:
markdown_cell = STAR_TABLE_HEADER
markdown_cell = _set_star_value(markdown_cell, int(difficulty))
for notebook_file in dict_by_difficulty[str(difficulty)]:
split_path = notebook_file.split("/")
notebook_type = split_path[-2]
notebook_name = split_path[-1].split("&")[0]
notebook_title = split_path[-1].split("&")[1]
markdown_cell += "\n\t<tr>\n\t\t<td width='20%' class='header_image_color_" + \
str(NOTEBOOK_KEYS[notebook_type]) + "'><img " \
"src='../../images/icons/" + notebook_type.title() +\
".png' width='15%'>\n\t\t</td>"
markdown_cell += "\n\t\t<td width='60%' class='center_cell open_cell_light'>" + \
notebook_title + "\n\t\t</td>"
markdown_cell += "\n\t\t<td width='20%' class='center_cell'>\n\t\t\t<a href='" \
"../" + notebook_type.title() + "/" + notebook_name + \
"'><div class='file_icon'></div></a>\n\t\t</td>\n\t</tr>"
markdown_cell += "</table>"
# ==================== Insertion of HTML table in a new Notebook cell ======================
notebook_object["cells"].append(nb.v4.new_markdown_cell(markdown_cell)) | [
"def",
"_generate_notebook_by_difficulty_body",
"(",
"notebook_object",
",",
"dict_by_difficulty",
")",
":",
"difficulty_keys",
"=",
"list",
"(",
"dict_by_difficulty",
".",
"keys",
"(",
")",
")",
"difficulty_keys",
".",
"sort",
"(",
")",
"for",
"difficulty",
"in",
... | 47.425 | 27.675 |
def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request.
"""
return log_(request, request_log, "info", trim=trim_log_values, **kwargs) | [
"def",
"log_request",
"(",
"self",
",",
"request",
":",
"str",
",",
"trim_log_values",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"return",
"log_",
"(",
"request",
",",
"request_log",
",",
"\"info\"",
",",
... | 32.727273 | 22.545455 |
def update_path(self):
"""
Tries to update the $PATH automatically.
"""
if WINDOWS:
return self.add_to_windows_path()
# Updating any profile we can on UNIX systems
export_string = self.get_export_string()
addition = "\n{}\n".format(export_string)
updated = []
profiles = self.get_unix_profiles()
for profile in profiles:
if not os.path.exists(profile):
continue
with open(profile, "r") as f:
content = f.read()
if addition not in content:
with open(profile, "a") as f:
f.write(addition)
updated.append(os.path.relpath(profile, HOME)) | [
"def",
"update_path",
"(",
"self",
")",
":",
"if",
"WINDOWS",
":",
"return",
"self",
".",
"add_to_windows_path",
"(",
")",
"# Updating any profile we can on UNIX systems",
"export_string",
"=",
"self",
".",
"get_export_string",
"(",
")",
"addition",
"=",
"\"\\n{}\\n... | 27.923077 | 15.769231 |
def distinct_column_values_at_locus(
self,
column,
feature,
contig,
position,
end=None,
strand=None):
"""
Gather all the distinct values for a property/column at some specified
locus.
Parameters
----------
column : str
Which property are we getting the values of.
feature : str
Which type of entry (e.g. transcript, exon, gene) is the property
associated with?
contig : str
Chromosome or unplaced contig name
position : int
Chromosomal position
end : int, optional
End position of a range, if unspecified assume we're only looking
at the single given position.
strand : str, optional
Either the positive ('+') or negative strand ('-'). If unspecified
then check for values on either strand.
"""
return self.column_values_at_locus(
column,
feature,
contig,
position,
end=end,
strand=strand,
distinct=True,
sorted=True) | [
"def",
"distinct_column_values_at_locus",
"(",
"self",
",",
"column",
",",
"feature",
",",
"contig",
",",
"position",
",",
"end",
"=",
"None",
",",
"strand",
"=",
"None",
")",
":",
"return",
"self",
".",
"column_values_at_locus",
"(",
"column",
",",
"feature... | 26.636364 | 20.545455 |
def normpath(path):
"""
Normalize ``path``, collapsing redundant separators and up-level refs.
"""
scheme, netloc, path_ = parse(path)
return unparse(scheme, netloc, os.path.normpath(path_)) | [
"def",
"normpath",
"(",
"path",
")",
":",
"scheme",
",",
"netloc",
",",
"path_",
"=",
"parse",
"(",
"path",
")",
"return",
"unparse",
"(",
"scheme",
",",
"netloc",
",",
"os",
".",
"path",
".",
"normpath",
"(",
"path_",
")",
")"
] | 34.166667 | 12.5 |
def cmd_changealt(self, args):
'''change target altitude'''
if len(args) < 1:
print("usage: changealt <relaltitude>")
return
relalt = float(args[0])
self.master.mav.mission_item_send(self.settings.target_system,
self.settings.target_component,
0,
3,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
3, 1, 0, 0, 0, 0,
0, 0, relalt)
print("Sent change altitude command for %.1f meters" % relalt) | [
"def",
"cmd_changealt",
"(",
"self",
",",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"<",
"1",
":",
"print",
"(",
"\"usage: changealt <relaltitude>\"",
")",
"return",
"relalt",
"=",
"float",
"(",
"args",
"[",
"0",
"]",
")",
"self",
".",
"master",... | 48.928571 | 17.5 |
def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX',
wave_unit=u.AA, flux_unit=units.FLAM):
"""Read FITS spectrum.
Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2``
keywords, respectively, from data table (not primary) header.
If these keywords are not present, units are taken from
``wave_unit`` and ``flux_unit`` instead.
Parameters
----------
filename : str or file pointer
Spectrum file name or pointer.
ext: int
FITS extension with table data. Default is 1.
wave_col, flux_col : str
Wavelength and flux column names (case-insensitive).
wave_unit, flux_unit : str or `~astropy.units.core.Unit`
Wavelength and flux units, which default to Angstrom and FLAM,
respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2``
keywords are not present in table (not primary) header.
Returns
-------
header : dict
Primary header only. Extension header is discarded.
wavelengths, fluxes : `~astropy.units.quantity.Quantity`
Wavelength and flux of the spectrum.
"""
fs = fits.open(filename)
header = dict(fs[str('PRIMARY')].header)
wave_dat = fs[ext].data.field(wave_col).copy()
flux_dat = fs[ext].data.field(flux_col).copy()
fits_wave_unit = fs[ext].header.get('TUNIT1')
fits_flux_unit = fs[ext].header.get('TUNIT2')
if fits_wave_unit is not None:
try:
wave_unit = units.validate_unit(fits_wave_unit)
except (exceptions.SynphotError, ValueError) as e: # pragma: no cover
warnings.warn(
'{0} from FITS header is not valid wavelength unit, using '
'{1}: {2}'.format(fits_wave_unit, wave_unit, e),
AstropyUserWarning)
if fits_flux_unit is not None:
try:
flux_unit = units.validate_unit(fits_flux_unit)
except (exceptions.SynphotError, ValueError) as e: # pragma: no cover
warnings.warn(
'{0} from FITS header is not valid flux unit, using '
'{1}: {2}'.format(fits_flux_unit, flux_unit, e),
AstropyUserWarning)
wave_unit = units.validate_unit(wave_unit)
flux_unit = units.validate_unit(flux_unit)
wavelengths = wave_dat * wave_unit
fluxes = flux_dat * flux_unit
if isinstance(filename, str):
fs.close()
return header, wavelengths, fluxes | [
"def",
"read_fits_spec",
"(",
"filename",
",",
"ext",
"=",
"1",
",",
"wave_col",
"=",
"'WAVELENGTH'",
",",
"flux_col",
"=",
"'FLUX'",
",",
"wave_unit",
"=",
"u",
".",
"AA",
",",
"flux_unit",
"=",
"units",
".",
"FLAM",
")",
":",
"fs",
"=",
"fits",
"."... | 34.913043 | 21.652174 |
def publish_topology_description_changed(self, previous_description,
new_description, topology_id):
"""Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of.
"""
event = TopologyDescriptionChangedEvent(previous_description,
new_description, topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.description_changed(event)
except Exception:
_handle_exception() | [
"def",
"publish_topology_description_changed",
"(",
"self",
",",
"previous_description",
",",
"new_description",
",",
"topology_id",
")",
":",
"event",
"=",
"TopologyDescriptionChangedEvent",
"(",
"previous_description",
",",
"new_description",
",",
"topology_id",
")",
"f... | 47.647059 | 20.705882 |
def _appendComponent(self, baseGlyph, transformation=None, identifier=None, **kwargs):
"""
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
"""
pointPen = self.getPointPen()
pointPen.addComponent(baseGlyph, transformation=transformation, identifier=identifier)
return self.components[-1] | [
"def",
"_appendComponent",
"(",
"self",
",",
"baseGlyph",
",",
"transformation",
"=",
"None",
",",
"identifier",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"pointPen",
"=",
"self",
".",
"getPointPen",
"(",
")",
"pointPen",
".",
"addComponent",
"(",
... | 38 | 17.75 |
def _labeled_uniform_sample(self, sample_size):
"""sample labeled entries uniformly"""
labeled_entries = self.dataset.get_labeled_entries()
samples = [labeled_entries[
self.random_state_.randint(0, len(labeled_entries))
]for _ in range(sample_size)]
return Dataset(*zip(*samples)) | [
"def",
"_labeled_uniform_sample",
"(",
"self",
",",
"sample_size",
")",
":",
"labeled_entries",
"=",
"self",
".",
"dataset",
".",
"get_labeled_entries",
"(",
")",
"samples",
"=",
"[",
"labeled_entries",
"[",
"self",
".",
"random_state_",
".",
"randint",
"(",
"... | 46.571429 | 8.571429 |
def envelope(component, **kwargs):
"""
Create parameters for an envelope (usually will be attached to two stars solRad
that they can share a common-envelope)
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_component`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
params = []
params += [FloatParameter(qualifier='abun', value=kwargs.get('abun', 0.), default_unit=u.dimensionless_unscaled, description='Metallicity')] # TODO: correct units??? check if log or not? (logabun = 0)
# params += [FloatParameter(qualifier='gravb_bol', value=kwargs.get('gravb_bol', 0.32), default_unit=u.dimensionless_unscaled, limits=(0.0,1.0), description='Bolometric gravity brightening')]
# params += [FloatParameter(qualifier='frac_refl_bol', value=kwargs.get('frac_refl_bol', 0.0), default_unit=u.dimensionless_unscaled, limits=(0.0,1.0), description='ratio of incident bolometric light that is used for reflection (heating without redistribution)')]
# params += [FloatParameter(qualifier='frac_heat_bol', value=kwargs.get('frac_heat_bol', 0.0), default_unit=u.dimensionless_unscaled, limits=(0.0,1.0), description='ratio of incident bolometric light that is used for heating')]
# params += [FloatParameter(qualifier='frac_scatt_bol', value=kwargs.get('frac_scatt_bol', 0.0), default_unit=u.dimensionless_unscaled, limits=(0.0,1.0), description='ratio of bolometric incident light that is scattered')]
# params += [FloatParameter(qualifier='frac_lost_bol', value=kwargs.get('frac_lost_bol', 1.0), default_unit=u.dimensionless_unscaled, limits=(0.0, 1.0), description='ratio of incident bolometric light that is lost/ignored')]
params += [FloatParameter(qualifier='fillout_factor', value=kwargs.get('fillout_factor', 0.5), default_unit=u.dimensionless_unscaled, limits=(0.0,1.0), description='Fillout-factor of the envelope')]
params += [FloatParameter(qualifier='pot', value=kwargs.get('pot', 3.5), default_unit=u.dimensionless_unscaled, limits=(0.0,None), description='Potential of the envelope (from the primary component\s reference)')]
params += [FloatParameter(qualifier='pot_min', value=kwargs.get('pot_min', 3.5), default_unit=u.dimensionless_unscaled, limits=(0.0,None), description='Critical (minimum) value of the potential to remain a contact')]
params += [FloatParameter(qualifier='pot_max', value=kwargs.get('pot_max', 3.5), default_unit=u.dimensionless_unscaled, limits=(0.0,None), description='Critical (maximum) value of the potential to remain a contact')]
# params += [FloatParameter(qualifier='intens_coeff1', value=kwargs.get('intens_coeff1', 1.0), default_unit=u.dimensionless_unscaled, description='')]
# params += [FloatParameter(qualifier='intens_coeff2', value=kwargs.get('intens_coeff2', 1.0), default_unit=u.dimensionless_unscaled, description='')]
# params += [FloatParameter(qualifier='intens_coeff3', value=kwargs.get('intens_coeff3', 1.0), default_unit=u.dimensionless_unscaled, description='')]
# params += [FloatParameter(qualifier='intens_coeff4', value=kwargs.get('intens_coeff4', 1.0), default_unit=u.dimensionless_unscaled, description='')]
# params += [FloatParameter(qualifier='intens_coeff5', value=kwargs.get('intens_coeff5', 1.0), default_unit=u.dimensionless_unscaled, description='')]
# params += [ChoiceParameter(qualifier='ld_func_bol', value=kwargs.get('ld_func_bol', 'logarithmic'), choices=_ld_func_choices_no_interp, description='Bolometric limb darkening model')]
# params += [FloatArrayParameter(qualifier='ld_coeffs_bol', value=kwargs.get('ld_coeffs_bol', [0.5, 0.5]), default_unit=u.dimensionless_unscaled, description='Bolometric limb darkening coefficients')]
constraints = []
# constraints handled by set hierarchy:
# potential_contact_min/max
# requiv_contact_min/max
return ParameterSet(params), constraints | [
"def",
"envelope",
"(",
"component",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"[",
"]",
"params",
"+=",
"[",
"FloatParameter",
"(",
"qualifier",
"=",
"'abun'",
",",
"value",
"=",
"kwargs",
".",
"get",
"(",
"'abun'",
",",
"0.",
")",
",",
"d... | 92.522727 | 77.295455 |
def swipe_by_percent(self, start_x, start_y, end_x, end_y, duration=1000):
"""
Swipe from one percent of the screen to another percent, for an optional duration.
Normal swipe fails to scale for different screen resolutions, this can be avoided using percent.
Args:
- start_x - x-percent at which to start
- start_y - y-percent at which to start
- end_x - x-percent distance from start_x at which to stop
- end_y - y-percent distance from start_y at which to stop
- duration - (optional) time to take the swipe, in ms.
Usage:
| Swipe By Percent | 90 | 50 | 10 | 50 | # Swipes screen from right to left. |
_*NOTE: *_
This also considers swipe acts different between iOS and Android.
New in AppiumLibrary 1.4.5
"""
width = self.get_window_width()
height = self.get_window_height()
x_start = float(start_x) / 100 * width
x_end = float(end_x) / 100 * width
y_start = float(start_y) / 100 * height
y_end = float(end_y) / 100 * height
x_offset = x_end - x_start
y_offset = y_end - y_start
platform = self._get_platform()
if platform == 'android':
self.swipe(x_start, y_start, x_end, y_end, duration)
else:
self.swipe(x_start, y_start, x_offset, y_offset, duration) | [
"def",
"swipe_by_percent",
"(",
"self",
",",
"start_x",
",",
"start_y",
",",
"end_x",
",",
"end_y",
",",
"duration",
"=",
"1000",
")",
":",
"width",
"=",
"self",
".",
"get_window_width",
"(",
")",
"height",
"=",
"self",
".",
"get_window_height",
"(",
")"... | 42.363636 | 20.727273 |
def _CreateUserIdentifier(identifier_type=None, value=None):
"""Creates a user identifier from the specified type and value.
Args:
identifier_type: a str specifying the type of user identifier.
value: a str value of the identifier; to be hashed using SHA-256 if needed.
Returns:
A dict specifying a user identifier, with a value hashed using SHA-256 if
needed.
"""
if identifier_type in _HASHED_IDENTIFIER_TYPES:
# If the user identifier type is a hashed type, normalize and hash the
# value.
value = hashlib.sha256(value.strip().lower()).hexdigest()
user_identifier = {
'userIdentifierType': identifier_type,
'value': value
}
return user_identifier | [
"def",
"_CreateUserIdentifier",
"(",
"identifier_type",
"=",
"None",
",",
"value",
"=",
"None",
")",
":",
"if",
"identifier_type",
"in",
"_HASHED_IDENTIFIER_TYPES",
":",
"# If the user identifier type is a hashed type, normalize and hash the",
"# value.",
"value",
"=",
"has... | 31.363636 | 25.454545 |
def write(context):
"""Starts a new article"""
config = context.obj
title = click.prompt('Title')
author = click.prompt('Author', default=config.get('DEFAULT_AUTHOR'))
slug = slugify(title)
creation_date = datetime.now()
basename = '{:%Y-%m-%d}_{}.md'.format(creation_date, slug)
meta = (
('Title', title),
('Date', '{:%Y-%m-%d %H:%M}:00'.format(creation_date)),
('Modified', '{:%Y-%m-%d %H:%M}:00'.format(creation_date)),
('Author', author),
)
file_content = ''
for key, value in meta:
file_content += '{}: {}\n'.format(key, value)
file_content += '\n\n'
file_content += 'Text...\n\n'
file_content += '\n\n'
file_content += 'Text...\n\n'
os.makedirs(config['CONTENT_DIR'], exist_ok=True)
path = os.path.join(config['CONTENT_DIR'], basename)
with click.open_file(path, 'w') as f:
f.write(file_content)
click.echo(path)
click.launch(path) | [
"def",
"write",
"(",
"context",
")",
":",
"config",
"=",
"context",
".",
"obj",
"title",
"=",
"click",
".",
"prompt",
"(",
"'Title'",
")",
"author",
"=",
"click",
".",
"prompt",
"(",
"'Author'",
",",
"default",
"=",
"config",
".",
"get",
"(",
"'DEFAU... | 30.030303 | 20.939394 |
def geo_contains(left, right):
"""
Check if the first geometry contains the second one
Parameters
----------
left : geometry
right : geometry
Returns
-------
contains : bool scalar
"""
op = ops.GeoContains(left, right)
return op.to_expr() | [
"def",
"geo_contains",
"(",
"left",
",",
"right",
")",
":",
"op",
"=",
"ops",
".",
"GeoContains",
"(",
"left",
",",
"right",
")",
"return",
"op",
".",
"to_expr",
"(",
")"
] | 18.266667 | 19.333333 |
def process_request(self, request, credential=None):
"""
Process a KMIP request message.
This routine is the main driver of the KmipEngine. It breaks apart and
processes the request header, handles any message errors that may
result, and then passes the set of request batch items on for
processing. This routine is thread-safe, allowing multiple client
connections to use the same KmipEngine.
Args:
request (RequestMessage): The request message containing the batch
items to be processed.
credential (string): Identifying information about the client
obtained from the client certificate. Optional, defaults to
None.
Returns:
ResponseMessage: The response containing all of the results from
the request batch items.
"""
self._client_identity = [None, None]
header = request.request_header
# Process the protocol version
self._set_protocol_version(header.protocol_version)
# Process the maximum response size
max_response_size = None
if header.maximum_response_size:
max_response_size = header.maximum_response_size.value
# Process the time stamp
now = int(time.time())
if header.time_stamp:
then = header.time_stamp.value
if (now >= then) and ((now - then) < 60):
self._logger.info("Received request at time: {0}".format(
time.strftime(
"%Y-%m-%d %H:%M:%S",
time.gmtime(then)
)
))
else:
if now < then:
self._logger.warning(
"Received request with future timestamp. Received "
"timestamp: {0}, Current timestamp: {1}".format(
then,
now
)
)
raise exceptions.InvalidMessage(
"Future request rejected by server."
)
else:
self._logger.warning(
"Received request with old timestamp. Possible "
"replay attack. Received timestamp: {0}, Current "
"timestamp: {1}".format(then, now)
)
raise exceptions.InvalidMessage(
"Stale request rejected by server."
)
else:
self._logger.info("Received request at time: {0}".format(
time.strftime(
"%Y-%m-%d %H:%M:%S",
time.gmtime(now)
)
))
# Process the asynchronous indicator
self.is_asynchronous = False
if header.asynchronous_indicator is not None:
self.is_asynchronous = header.asynchronous_indicator.value
if self.is_asynchronous:
raise exceptions.InvalidMessage(
"Asynchronous operations are not supported."
)
# Process the authentication credentials
if header.authentication:
if header.authentication.credentials:
auth_credentials = header.authentication.credentials[0]
else:
auth_credentials = None
else:
auth_credentials = None
self._verify_credential(auth_credentials, credential)
# Process the batch error continuation option
batch_error_option = enums.BatchErrorContinuationOption.STOP
if header.batch_error_cont_option is not None:
batch_error_option = header.batch_error_cont_option.value
if batch_error_option == enums.BatchErrorContinuationOption.UNDO:
raise exceptions.InvalidMessage(
"Undo option for batch handling is not supported."
)
# Process the batch order option
batch_order_option = False
if header.batch_order_option:
batch_order_option = header.batch_order_option.value
response_batch = self._process_batch(
request.batch_items,
batch_error_option,
batch_order_option
)
response = self._build_response(
header.protocol_version,
response_batch
)
return response, max_response_size, header.protocol_version | [
"def",
"process_request",
"(",
"self",
",",
"request",
",",
"credential",
"=",
"None",
")",
":",
"self",
".",
"_client_identity",
"=",
"[",
"None",
",",
"None",
"]",
"header",
"=",
"request",
".",
"request_header",
"# Process the protocol version",
"self",
"."... | 36.352459 | 19.352459 |
def identify_protocol(method, value):
# type: (str, Union[str, RequestType]) -> str
"""
Loop through protocols, import the protocol module and try to identify the id or request.
"""
for protocol_name in PROTOCOLS:
protocol = importlib.import_module(f"federation.protocols.{protocol_name}.protocol")
if getattr(protocol, f"identify_{method}")(value):
return protocol
else:
raise NoSuitableProtocolFoundError() | [
"def",
"identify_protocol",
"(",
"method",
",",
"value",
")",
":",
"# type: (str, Union[str, RequestType]) -> str",
"for",
"protocol_name",
"in",
"PROTOCOLS",
":",
"protocol",
"=",
"importlib",
".",
"import_module",
"(",
"f\"federation.protocols.{protocol_name}.protocol\"",
... | 41.636364 | 17.090909 |
def pack(self):
"""
Packs the field value into a byte string so it can be sent to the
server.
:param structure: The message structure class object
:return: A byte string of the packed field's value
"""
value = self._get_calculated_value(self.value)
packed_value = self._pack_value(value)
size = self._get_calculated_size(self.size, packed_value)
if len(packed_value) != size:
raise ValueError("Invalid packed data length for field %s of %d "
"does not fit field size of %d"
% (self.name, len(packed_value), size))
return packed_value | [
"def",
"pack",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"_get_calculated_value",
"(",
"self",
".",
"value",
")",
"packed_value",
"=",
"self",
".",
"_pack_value",
"(",
"value",
")",
"size",
"=",
"self",
".",
"_get_calculated_size",
"(",
"self",
"... | 39.823529 | 20.411765 |
def prune_urls(url_set, start_url, allowed_list, ignored_list):
"""Prunes URLs that should be ignored."""
result = set()
for url in url_set:
allowed = False
for allow_url in allowed_list:
if url.startswith(allow_url):
allowed = True
break
if not allowed:
continue
ignored = False
for ignore_url in ignored_list:
if url.startswith(ignore_url):
ignored = True
break
if ignored:
continue
prefix, suffix = (url.rsplit('.', 1) + [''])[:2]
if suffix.lower() in IGNORE_SUFFIXES:
continue
result.add(url)
return result | [
"def",
"prune_urls",
"(",
"url_set",
",",
"start_url",
",",
"allowed_list",
",",
"ignored_list",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"url",
"in",
"url_set",
":",
"allowed",
"=",
"False",
"for",
"allow_url",
"in",
"allowed_list",
":",
"if",
"... | 23.333333 | 19.966667 |
def show_messages(self):
"""Show all messages."""
string = ''
if self.static_message is not None:
string += self.static_message.to_text()
for message in self.dynamic_messages:
string += message.to_text()
print(string) | [
"def",
"show_messages",
"(",
"self",
")",
":",
"string",
"=",
"''",
"if",
"self",
".",
"static_message",
"is",
"not",
"None",
":",
"string",
"+=",
"self",
".",
"static_message",
".",
"to_text",
"(",
")",
"for",
"message",
"in",
"self",
".",
"dynamic_mess... | 27.4 | 15.6 |
def console_user(username=False):
'''
Gets the UID or Username of the current console user.
:return: The uid or username of the console user.
:param bool username: Whether to return the username of the console
user instead of the UID. Defaults to False
:rtype: Interger of the UID, or a string of the username.
Raises:
CommandExecutionError: If we fail to get the UID.
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.console_user()
'''
try:
# returns the 'st_uid' stat from the /dev/console file.
uid = os.stat('/dev/console')[4]
except (OSError, IndexError):
# we should never get here but raise an error if so
raise CommandExecutionError('Failed to get a UID for the console user.')
if username:
return pwd.getpwuid(uid)[0]
return uid | [
"def",
"console_user",
"(",
"username",
"=",
"False",
")",
":",
"try",
":",
"# returns the 'st_uid' stat from the /dev/console file.",
"uid",
"=",
"os",
".",
"stat",
"(",
"'/dev/console'",
")",
"[",
"4",
"]",
"except",
"(",
"OSError",
",",
"IndexError",
")",
"... | 27.28125 | 24.71875 |
def _run_checks(self):
'''basic sanity checks for the file name (and others if needed) before
attempting parsing.
'''
if self.recipe is not None:
# Does the recipe provided exist?
if not os.path.exists(self.recipe):
bot.error("Cannot find %s, is the path correct?" %self.recipe)
sys.exit(1)
# Ensure we carry fullpath
self.recipe = os.path.abspath(self.recipe) | [
"def",
"_run_checks",
"(",
"self",
")",
":",
"if",
"self",
".",
"recipe",
"is",
"not",
"None",
":",
"# Does the recipe provided exist?",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"recipe",
")",
":",
"bot",
".",
"error",
"(",
"\"C... | 35.769231 | 19.923077 |
def get_labels(input_dir):
"""Get a list of labels from preprocessed output dir."""
data_dir = _get_latest_data_dir(input_dir)
labels_file = os.path.join(data_dir, 'labels')
with file_io.FileIO(labels_file, 'r') as f:
labels = f.read().rstrip().split('\n')
return labels | [
"def",
"get_labels",
"(",
"input_dir",
")",
":",
"data_dir",
"=",
"_get_latest_data_dir",
"(",
"input_dir",
")",
"labels_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'labels'",
")",
"with",
"file_io",
".",
"FileIO",
"(",
"labels_file",
... | 39.714286 | 8.285714 |
def get_file_extension_type(filename):
"""
Return the group associated to the file
:param filename:
:return: str
"""
ext = get_file_extension(filename)
if ext:
for name, group in EXTENSIONS.items():
if ext in group:
return name
return "OTHER" | [
"def",
"get_file_extension_type",
"(",
"filename",
")",
":",
"ext",
"=",
"get_file_extension",
"(",
"filename",
")",
"if",
"ext",
":",
"for",
"name",
",",
"group",
"in",
"EXTENSIONS",
".",
"items",
"(",
")",
":",
"if",
"ext",
"in",
"group",
":",
"return"... | 24.916667 | 11.083333 |
def cors(origins,
methods=['HEAD', 'OPTIONS', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE'],
headers=['Accept', 'Accept-Language', 'Content-Language', 'Content-Type', 'X-Requested-With'],
max_age=None):
"""
Adds CORS headers to the decorated view function.
:param origins: Allowed origins (see below)
:param methods: A list of allowed HTTP methods
:param headers: A list of allowed HTTP headers
:param max_age: Duration in seconds for which the CORS response may be cached
The :obj:`origins` parameter may be one of:
1. A callable that receives the origin as a parameter.
2. A list of origins.
3. ``*``, indicating that this resource is accessible by any origin.
Example use::
from flask import Flask, Response
from coaster.views import cors
app = Flask(__name__)
@app.route('/any')
@cors('*')
def any_origin():
return Response()
@app.route('/static', methods=['GET', 'POST'])
@cors(['https://hasgeek.com'], methods=['GET'], headers=['Content-Type', 'X-Requested-With'],
max_age=3600)
def static_list():
return Response()
def check_origin(origin):
# check if origin should be allowed
return True
@app.route('/callable')
@cors(check_origin)
def callable_function():
return Response()
"""
def inner(f):
@wraps(f)
def wrapper(*args, **kwargs):
origin = request.headers.get('Origin')
if request.method not in methods:
abort(405)
if origins == '*':
pass
elif is_collection(origins) and origin in origins:
pass
elif callable(origins) and origins(origin):
pass
else:
abort(403)
if request.method == 'OPTIONS':
# pre-flight request
resp = Response()
else:
result = f(*args, **kwargs)
resp = make_response(result) if not isinstance(result,
(Response, WerkzeugResponse, current_app.response_class)) else result
resp.headers['Access-Control-Allow-Origin'] = origin if origin else ''
resp.headers['Access-Control-Allow-Methods'] = ', '.join(methods)
resp.headers['Access-Control-Allow-Headers'] = ', '.join(headers)
if max_age:
resp.headers['Access-Control-Max-Age'] = str(max_age)
# Add 'Origin' to the Vary header since response will vary by origin
if 'Vary' in resp.headers:
vary_values = [item.strip() for item in resp.headers['Vary'].split(',')]
if 'Origin' not in vary_values:
vary_values.append('Origin')
resp.headers['Vary'] = ', '.join(vary_values)
else:
resp.headers['Vary'] = 'Origin'
return resp
return wrapper
return inner | [
"def",
"cors",
"(",
"origins",
",",
"methods",
"=",
"[",
"'HEAD'",
",",
"'OPTIONS'",
",",
"'GET'",
",",
"'POST'",
",",
"'PUT'",
",",
"'PATCH'",
",",
"'DELETE'",
"]",
",",
"headers",
"=",
"[",
"'Accept'",
",",
"'Accept-Language'",
",",
"'Content-Language'",... | 34.686047 | 21.593023 |
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn | [
"def",
"ntoreturn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_counters_calculated",
":",
"self",
".",
"_counters_calculated",
"=",
"True",
"self",
".",
"_extract_counters",
"(",
")",
"return",
"self",
".",
"_ntoreturn"
] | 33 | 11.285714 |
def list_machine_group(self, project_name, offset=0, size=100):
""" list machine group names in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type offset: int
:param offset: the offset of all group name
:type size: int
:param size: the max return names count, -1 means all
:return: ListMachineGroupResponse
:raise: LogException
"""
# need to use extended method to get more
if int(size) == -1 or int(size) > MAX_LIST_PAGING_SIZE:
return list_more(self.list_machine_group, int(offset), int(size), MAX_LIST_PAGING_SIZE, project_name)
headers = {}
params = {}
resource = "/machinegroups"
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListMachineGroupResponse(resp, header) | [
"def",
"list_machine_group",
"(",
"self",
",",
"project_name",
",",
"offset",
"=",
"0",
",",
"size",
"=",
"100",
")",
":",
"# need to use extended method to get more\r",
"if",
"int",
"(",
"size",
")",
"==",
"-",
"1",
"or",
"int",
"(",
"size",
")",
">",
"... | 35.827586 | 21.275862 |
def set_meta_rdf(self, rdf, fmt='n3'):
"""Set the metadata for this Point in rdf fmt
"""
evt = self._client._request_point_meta_set(self._type, self.__lid, self.__pid, rdf, fmt=fmt)
self._client._wait_and_except_if_failed(evt) | [
"def",
"set_meta_rdf",
"(",
"self",
",",
"rdf",
",",
"fmt",
"=",
"'n3'",
")",
":",
"evt",
"=",
"self",
".",
"_client",
".",
"_request_point_meta_set",
"(",
"self",
".",
"_type",
",",
"self",
".",
"__lid",
",",
"self",
".",
"__pid",
",",
"rdf",
",",
... | 50.8 | 14.8 |
def saturation(self, value):
"""Volume of water to volume of voids"""
value = clean_float(value)
if value is None:
return
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self._pw
saturation = unit_moisture_volume / self._calc_unit_void_volume()
if saturation is not None and not ct.isclose(saturation, value, rel_tol=self._tolerance):
raise ModelError("New saturation (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, saturation))
except TypeError:
pass
old_value = self.saturation
self._saturation = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("saturation", value)
except ModelError as e:
self._saturation = old_value
raise ModelError(e) | [
"def",
"saturation",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"clean_float",
"(",
"value",
")",
"if",
"value",
"is",
"None",
":",
"return",
"try",
":",
"unit_moisture_weight",
"=",
"self",
".",
"unit_moist_weight",
"-",
"self",
".",
"unit_dry_wei... | 44.090909 | 20.181818 |
def _find_conflicts_within_selection_set(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_type, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set, # type: SelectionSet
):
# type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Find all conflicts found "within" a selection set, including those found via spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
field_map, fragment_names = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type, selection_set
)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
_collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
field_map,
)
# (B) Then collect conflicts between these fields and those represented by
# each spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other item
# in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1 :]:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
fragment_name,
other_fragment_name,
)
return conflicts | [
"def",
"_find_conflicts_within_selection_set",
"(",
"context",
",",
"# type: ValidationContext",
"cached_fields_and_fragment_names",
",",
"# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]",
"compared_fra... | 40.625 | 23.160714 |
def _key(self, block, name):
"""
Resolves `name` to a key, in the following form:
KeyValueStore.Key(
scope=field.scope,
user_id=student_id,
block_scope_id=block_id,
field_name=name,
block_family=block.entry_point,
)
"""
field = self._getfield(block, name)
if field.scope in (Scope.children, Scope.parent):
block_id = block.scope_ids.usage_id
user_id = None
else:
block_scope = field.scope.block
if block_scope == BlockScope.ALL:
block_id = None
elif block_scope == BlockScope.USAGE:
block_id = block.scope_ids.usage_id
elif block_scope == BlockScope.DEFINITION:
block_id = block.scope_ids.def_id
elif block_scope == BlockScope.TYPE:
block_id = block.scope_ids.block_type
if field.scope.user == UserScope.ONE:
user_id = block.scope_ids.user_id
else:
user_id = None
key = KeyValueStore.Key(
scope=field.scope,
user_id=user_id,
block_scope_id=block_id,
field_name=name,
block_family=block.entry_point,
)
return key | [
"def",
"_key",
"(",
"self",
",",
"block",
",",
"name",
")",
":",
"field",
"=",
"self",
".",
"_getfield",
"(",
"block",
",",
"name",
")",
"if",
"field",
".",
"scope",
"in",
"(",
"Scope",
".",
"children",
",",
"Scope",
".",
"parent",
")",
":",
"blo... | 31.390244 | 13.97561 |
def add_waveform(self, waveform):
"""
Add a waveform to the plot.
:param waveform: the waveform to be added
:type waveform: :class:`~aeneas.plotter.PlotWaveform`
:raises: TypeError: if ``waveform`` is not an instance of :class:`~aeneas.plotter.PlotWaveform`
"""
if not isinstance(waveform, PlotWaveform):
self.log_exc(u"waveform must be an instance of PlotWaveform", None, True, TypeError)
self.waveform = waveform
self.log(u"Added waveform") | [
"def",
"add_waveform",
"(",
"self",
",",
"waveform",
")",
":",
"if",
"not",
"isinstance",
"(",
"waveform",
",",
"PlotWaveform",
")",
":",
"self",
".",
"log_exc",
"(",
"u\"waveform must be an instance of PlotWaveform\"",
",",
"None",
",",
"True",
",",
"TypeError"... | 43.083333 | 18.75 |
def make_sgf(
move_history,
result_string,
ruleset="Chinese",
komi=7.5,
white_name=PROGRAM_IDENTIFIER,
black_name=PROGRAM_IDENTIFIER,
comments=[]
):
"""Turn a game into SGF.
Doesn't handle handicap games or positions with incomplete history.
Args:
move_history: iterable of PlayerMoves
result_string: "B+R", "W+0.5", etc.
comments: iterable of string/None. Will be zipped with move_history.
"""
boardsize = go.N
game_moves = ''.join(translate_sgf_move(*z)
for z in itertools.zip_longest(move_history, comments))
result = result_string
return SGF_TEMPLATE.format(**locals()) | [
"def",
"make_sgf",
"(",
"move_history",
",",
"result_string",
",",
"ruleset",
"=",
"\"Chinese\"",
",",
"komi",
"=",
"7.5",
",",
"white_name",
"=",
"PROGRAM_IDENTIFIER",
",",
"black_name",
"=",
"PROGRAM_IDENTIFIER",
",",
"comments",
"=",
"[",
"]",
")",
":",
"... | 28.782609 | 20.043478 |
def update(self, rid, data, raise_on_error=True):
"""Write updated cache data to the DataStore.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
"""
cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}
return self.ds.put(rid, cache_data, raise_on_error) | [
"def",
"update",
"(",
"self",
",",
"rid",
",",
"data",
",",
"raise_on_error",
"=",
"True",
")",
":",
"cache_data",
"=",
"{",
"'cache-date'",
":",
"self",
".",
"_dt_to_epoch",
"(",
"datetime",
".",
"now",
"(",
")",
")",
",",
"'cache-data'",
":",
"data",... | 39.692308 | 21.076923 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.