code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def __readimzmlmeta(self):
"""
This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from
the .imzML file.
This method reads only a subset of the available meta information and may be extended in the future. The keys
are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y",
"pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x",
"focus diameter y", "pulse energy", "pulse duration", "attenuation".
If a key is not found in the XML tree, it will not be in the dict either.
:return d:
dict containing above mentioned meta data
:rtype:
dict
:raises Warning:
if an xml attribute has a number format different from the imzML specification
"""
d = {}
scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl)
instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl)
supportedparams1 = [("max count of pixels x", int), ("max count of pixels y", int),
("max dimension x", int), ("max dimension y", int), ("pixel size x", float),
("pixel size y", float), ("matrix solution concentration", float)]
supportedparams2 = [("wavelength", float),
("focus diameter x", float), ("focus diameter y", float), ("pulse energy", float),
("pulse duration", float), ("attenuation", float)]
supportedaccessions1 = [("IMS:1000042", "value"), ("IMS:1000043", "value"),
("IMS:1000044", "value"), ("IMS:1000045", "value"),
("IMS:1000046", "value"), ("IMS:1000047", "value"), ("MS:1000835", "value")]
supportedaccessions2 = [("MS:1000843", "value"), ("MS:1000844", "value"),
("MS:1000845", "value"), ("MS:1000846", "value"), ("MS:1000847", "value"),
("MS:1000848", "value")]
for i in range(len(supportedparams1)):
acc, attr = supportedaccessions1[i]
elem = scan_settings_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc))
if elem is None:
break
name, T = supportedparams1[i]
try:
d[name] = T(elem.attrib[attr])
except ValueError:
warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name))
for i in range(len(supportedparams2)):
acc, attr = supportedaccessions2[i]
elem = instrument_config_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc))
if elem is None:
break
name, T = supportedparams2[i]
try:
d[name] = T(elem.attrib[attr])
except ValueError:
warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name))
return d
|
This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from
the .imzML file.
This method reads only a subset of the available meta information and may be extended in the future. The keys
are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y",
"pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x",
"focus diameter y", "pulse energy", "pulse duration", "attenuation".
If a key is not found in the XML tree, it will not be in the dict either.
:return d:
dict containing above mentioned meta data
:rtype:
dict
:raises Warning:
if an xml attribute has a number format different from the imzML specification
|
def extract_tree(self, labels, without, suppress_unifurcations=True):
'''Helper function for ``extract_tree_*`` functions'''
if not isinstance(suppress_unifurcations, bool):
raise TypeError("suppress_unifurcations must be a bool")
if labels is not None and not isinstance(labels, set):
try:
labels = set(labels)
except:
raise TypeError("labels must be iterable")
label_to_leaf = dict(); keep = set()
for node in self.traverse_leaves():
label_to_leaf[str(node)] = node
if labels is None or (without and str(node) not in labels) or (not without and str(node) in labels):
keep.add(node)
for node in list(keep):
for a in node.traverse_ancestors(include_self=False):
keep.add(a)
out = Tree(); out.root.label = self.root.label; out.root.edge_length = self.root.edge_length
q_old = deque(); q_old.append(self.root)
q_new = deque(); q_new.append(out.root)
while len(q_old) != 0:
n_old = q_old.popleft(); n_new = q_new.popleft()
for c_old in n_old.children:
if c_old in keep:
c_new = Node(label=str(c_old), edge_length=c_old.edge_length); n_new.add_child(c_new)
q_old.append(c_old); q_new.append(c_new)
if suppress_unifurcations:
out.suppress_unifurcations()
return out
|
Helper function for ``extract_tree_*`` functions
|
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
|
Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pre-trained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
|
def results(cls, function, group=None):
"""
Returns a numpy nparray representing the benchmark results of a function
in a group.
"""
return numpy.array(cls._results[group][function])
|
Returns a numpy nparray representing the benchmark results of a function
in a group.
|
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
"""The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
"""
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(
x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
output_shape[3] + 1,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(
dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])
out = out_mask[:, :, :, :3]
return out * mask + skip_connections[0] * (1 - mask)
|
The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
|
def process_request(self, request):
"""
The actual middleware method, called on all incoming requests.
This default implementation will ignore the middleware (return None) if the
conditions specified in is_resource_protected aren't met. If they are, it then
tests to see if the user should be denied access via the denied_access_condition
method, and calls deny_access (which implements failure behaviour) if so.
"""
if not self.is_resource_protected(request):
return
if self.deny_access_condition(request):
return self.deny_access(request)
|
The actual middleware method, called on all incoming requests.
This default implementation will ignore the middleware (return None) if the
conditions specified in is_resource_protected aren't met. If they are, it then
tests to see if the user should be denied access via the denied_access_condition
method, and calls deny_access (which implements failure behaviour) if so.
|
def critical_angle(pressure, u, v, heights, stormu, stormv):
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
"""
# Convert everything to m/s
u = u.to('m/s')
v = v.to('m/s')
stormu = stormu.to('m/s')
stormv = stormv.to('m/s')
sort_inds = np.argsort(pressure[::-1])
pressure = pressure[sort_inds]
heights = heights[sort_inds]
u = u[sort_inds]
v = v[sort_inds]
# Calculate sfc-500m shear vector
shr5 = bulk_shear(pressure, u, v, heights=heights, depth=500 * units('meter'))
# Make everything relative to the sfc wind orientation
umn = stormu - u[0]
vmn = stormv - v[0]
vshr = np.asarray([shr5[0].magnitude, shr5[1].magnitude])
vsm = np.asarray([umn.magnitude, vmn.magnitude])
angle_c = np.dot(vshr, vsm) / (np.linalg.norm(vshr) * np.linalg.norm(vsm))
critical_angle = np.arccos(angle_c) * units('radian')
return critical_angle.to('degrees')
|
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
|
def ccmod_setcoef(k):
"""Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
"""
# Set working coefficient maps for ccmod step and compute DFT of
# coefficient maps Z and Z^T S
mp_Zf[k] = sl.rfftn(mp_Z_Y[k], mp_cri.Nv, mp_cri.axisN)
mp_ZSf[k] = np.conj(mp_Zf[k]) * mp_Sf[k]
|
Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
|
def usage(asked_for=0):
'''Exit with a usage string, used for bad argument or with -h'''
exit = fsq.const('FSQ_SUCCESS') if asked_for else\
fsq.const('FSQ_FAIL_PERM')
f = sys.stdout if asked_for else sys.stderr
shout('{0} [opts] src_queue trg_queue host item_id [item_id [...]]'.format(
os.path.basename(_PROG)), f)
if asked_for:
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] '\
'[-i|--ignore-listener] <proto>://<host>:<port>/url'\
.format(os.path.basename(_PROG)), f)
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger]'\
'[-i|--ignore-listener] unix://var/sock/foo.sock'\
.format(os.path.basename(_PROG)), f)
shout(' src_queue trg_queue host_queue item [item [...]]', f)
return exit
|
Exit with a usage string, used for bad argument or with -h
|
def ack(self, message, subscription_id=None, **kwargs):
"""Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be acknowledged, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
"""
if isinstance(message, dict):
message_id = message.get("message-id")
if not subscription_id:
subscription_id = message.get("subscription")
else:
message_id = message
if not message_id:
raise workflows.Error("Cannot acknowledge message without " + "message ID")
if not subscription_id:
raise workflows.Error(
"Cannot acknowledge message without " + "subscription ID"
)
self.log.debug(
"Acknowledging message %s on subscription %s", message_id, subscription_id
)
self._ack(message_id, subscription_id, **kwargs)
|
Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be acknowledged, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
|
def get_md5(string):
"""Get a string's MD5"""
try:
hasher = hashlib.md5()
except BaseException:
hasher = hashlib.new('md5', usedForSecurity=False)
hasher.update(string)
return hasher.hexdigest()
|
Get a string's MD5
|
def copy_images(images, source, target):
"""
Copy images to converted topology
:param images: Images to copy
:param source: Old Topology Directory
:param target: Target topology files directory
:return: True when an image cannot be found, otherwise false
:rtype: bool
"""
image_err = False
if len(images) > 0:
images_dir = os.path.join(target, 'images')
os.makedirs(images_dir)
for image in images:
if os.path.isabs(image):
old_image_file = image
else:
old_image_file = os.path.join(source, image)
new_image_file = os.path.join(images_dir,
os.path.basename(image))
if os.path.isfile(os.path.abspath(old_image_file)):
shutil.copy(old_image_file, new_image_file)
else:
image_err = True
logging.error('Unable to find %s' % old_image_file)
return image_err
|
Copy images to converted topology
:param images: Images to copy
:param source: Old Topology Directory
:param target: Target topology files directory
:return: True when an image cannot be found, otherwise false
:rtype: bool
|
def refresh(self, item):
"""
Forces a refresh of a cached item.
:param item: Client name.
:type item: unicode | str
:return: Items in the cache.
:rtype: DockerHostItemCache.item_class
"""
client = self._clients[item].get_client()
self[item] = val = self.item_class(client)
return val
|
Forces a refresh of a cached item.
:param item: Client name.
:type item: unicode | str
:return: Items in the cache.
:rtype: DockerHostItemCache.item_class
|
def on_resize(width, height):
"""Setup 3D projection"""
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(30, 1.0*width/height, 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
|
Setup 3D projection
|
def _get_arguments_for_execution(self, function_name, serialized_args):
"""Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Arguments that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ray.ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayError: This exception is raised if a task that
created one of the arguments failed.
"""
arguments = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ObjectID):
# get the object from the local object store
argument = self.get_object([arg])[0]
if isinstance(argument, RayError):
raise argument
else:
# pass the argument by value
argument = arg
arguments.append(argument)
return arguments
|
Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Arguments that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ray.ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayError: This exception is raised if a task that
created one of the arguments failed.
|
def spline_base1d(length, nr_knots = 20, spline_order = 5, marginal = None):
"""Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribution of the input to be fitted.
If given, it is used to determine the positioning of knots, each
knot will cover the same amount of probability mass. If not given,
knots are equally spaced.
"""
if marginal is None:
knots = augknt(np.linspace(0,length+1, nr_knots), spline_order)
else:
knots = knots_from_marginal(marginal, nr_knots, spline_order)
x_eval = np.arange(1,length+1).astype(float)
Bsplines = spcol(x_eval,knots,spline_order)
return Bsplines, knots
|
Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribution of the input to be fitted.
If given, it is used to determine the positioning of knots, each
knot will cover the same amount of probability mass. If not given,
knots are equally spaced.
|
def add(self, url: str, anything: Any) -> None:
"""
Register a URL pattern into\
the routes for later matching.
It's possible to attach any kind of\
object to the pattern for later\
retrieving. A dict with methods and callbacks,\
for example. Anything really.
Registration order does not matter.\
Adding a URL first or last makes no difference.
:param url: URL
:param anything: Literally anything.
"""
url = normalize_url(url)
parts = url.split('/')
curr_partial_routes = self._routes
curr_key_parts = []
for part in parts:
if part.startswith(':*'):
curr_key_parts.append(part[2:])
part = self._VAR_ANY_NODE
self._max_depth = self._max_depth_custom
elif part.startswith(':'):
curr_key_parts.append(part[1:])
part = self._VAR_NODE
curr_partial_routes = (curr_partial_routes
.setdefault(part, {}))
curr_partial_routes[self._ROUTE_NODE] = _Route(
key_parts=curr_key_parts,
anything=anything)
self._max_depth = max(self._max_depth, depth_of(parts))
|
Register a URL pattern into\
the routes for later matching.
It's possible to attach any kind of\
object to the pattern for later\
retrieving. A dict with methods and callbacks,\
for example. Anything really.
Registration order does not matter.\
Adding a URL first or last makes no difference.
:param url: URL
:param anything: Literally anything.
|
def write_stream (stream, holders, defaultsection=None):
"""Very simple writing in ini format. The simple stringification of each value
in each Holder is printed, and no escaping is performed. (This is most
relevant for multiline values or ones containing pound signs.) `None` values are
skipped.
Arguments:
stream
A text stream to write to.
holders
An iterable of objects to write. Their fields will be
written as sections.
defaultsection=None
Section name to use if a holder doesn't contain a
`section` field.
"""
anybefore = False
for h in holders:
if anybefore:
print ('', file=stream)
s = h.get ('section', defaultsection)
if s is None:
raise ValueError ('cannot determine section name for item <%s>' % h)
print ('[%s]' % s, file=stream)
for k in sorted (x for x in six.iterkeys (h.__dict__) if x != 'section'):
v = h.get (k)
if v is None:
continue
print ('%s = %s' % (k, v), file=stream)
anybefore = True
|
Very simple writing in ini format. The simple stringification of each value
in each Holder is printed, and no escaping is performed. (This is most
relevant for multiline values or ones containing pound signs.) `None` values are
skipped.
Arguments:
stream
A text stream to write to.
holders
An iterable of objects to write. Their fields will be
written as sections.
defaultsection=None
Section name to use if a holder doesn't contain a
`section` field.
|
def precess_coordinates(ra, dec,
epoch_one, epoch_two,
jd=None,
mu_ra=0.0,
mu_dec=0.0,
outscalar=False):
'''Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`.
This takes into account the jd of the observations, as well as the proper
motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's
VARTOOLS/converttime.c [coordprecess].
Parameters
----------
ra,dec : float
The equatorial coordinates of the object at `epoch_one` to precess in
decimal degrees.
epoch_one : float
Origin epoch to precess from to target epoch. This is a float, like:
1985.0, 2000.0, etc.
epoch_two : float
Target epoch to precess from origin epoch. This is a float, like:
2000.0, 2018.0, etc.
jd : float
The full Julian date to use along with the propermotions in `mu_ra`, and
`mu_dec` to handle proper motion along with the coordinate frame
precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper
motion will not be used to calculate the final precessed coordinates.
mu_ra,mu_dec : float
The proper motion in mas/yr in right ascension and declination. If these
are provided along with `jd`, the total proper motion of the object will
be taken into account to calculate the final precessed coordinates.
outscalar : bool
If True, converts the output coordinates from one-element np.arrays to
scalars.
Returns
-------
precessed_ra, precessed_dec : float
A tuple of precessed equatorial coordinates in decimal degrees at
`epoch_two` taking into account proper motion if `jd`, `mu_ra`, and
`mu_dec` are provided.
'''
raproc, decproc = np.radians(ra), np.radians(dec)
if ((mu_ra != 0.0) and (mu_dec != 0.0) and jd):
jd_epoch_one = JD2000 + (epoch_one - epoch_two)*365.25
raproc = (
raproc +
(jd - jd_epoch_one)*mu_ra*MAS_P_YR_TO_RAD_P_DAY/np.cos(decproc)
)
decproc = decproc + (jd - jd_epoch_one)*mu_dec*MAS_P_YR_TO_RAD_P_DAY
ca = np.cos(raproc)
cd = np.cos(decproc)
sa = np.sin(raproc)
sd = np.sin(decproc)
if epoch_one != epoch_two:
t1 = 1.0e-3 * (epoch_two - epoch_one)
t2 = 1.0e-3 * (epoch_one - 2000.0)
a = ( t1*ARCSEC_TO_RADIANS * (23062.181 + t2*(139.656 + 0.0139*t2) +
t1*(30.188 - 0.344*t2+17.998*t1)) )
b = t1*t1*ARCSEC_TO_RADIANS*(79.280 + 0.410*t2 + 0.205*t1) + a
c = (
ARCSEC_TO_RADIANS*t1*(20043.109 - t2*(85.33 + 0.217*t2) +
t1*(-42.665 - 0.217*t2 - 41.833*t2))
)
sina, sinb, sinc = np.sin(a), np.sin(b), np.sin(c)
cosa, cosb, cosc = np.cos(a), np.cos(b), np.cos(c)
precmatrix = np.matrix([[cosa*cosb*cosc - sina*sinb,
sina*cosb + cosa*sinb*cosc,
cosa*sinc],
[-cosa*sinb - sina*cosb*cosc,
cosa*cosb - sina*sinb*cosc,
-sina*sinc],
[-cosb*sinc,
-sinb*sinc,
cosc]])
precmatrix = precmatrix.transpose()
x = (np.matrix([cd*ca, cd*sa, sd])).transpose()
x2 = precmatrix * x
outra = np.arctan2(x2[1],x2[0])
outdec = np.arcsin(x2[2])
outradeg = np.rad2deg(outra)
outdecdeg = np.rad2deg(outdec)
if outradeg < 0.0:
outradeg = outradeg + 360.0
if outscalar:
return float(outradeg), float(outdecdeg)
else:
return outradeg, outdecdeg
else:
# if the epochs are the same and no proper motion, this will be the same
# as the input values. if the epochs are the same, but there IS proper
# motion (and a given JD), then these will be perturbed from the input
# values of ra, dec by the appropriate amount of motion
return np.degrees(raproc), np.degrees(decproc)
|
Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`.
This takes into account the jd of the observations, as well as the proper
motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's
VARTOOLS/converttime.c [coordprecess].
Parameters
----------
ra,dec : float
The equatorial coordinates of the object at `epoch_one` to precess in
decimal degrees.
epoch_one : float
Origin epoch to precess from to target epoch. This is a float, like:
1985.0, 2000.0, etc.
epoch_two : float
Target epoch to precess from origin epoch. This is a float, like:
2000.0, 2018.0, etc.
jd : float
The full Julian date to use along with the propermotions in `mu_ra`, and
`mu_dec` to handle proper motion along with the coordinate frame
precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper
motion will not be used to calculate the final precessed coordinates.
mu_ra,mu_dec : float
The proper motion in mas/yr in right ascension and declination. If these
are provided along with `jd`, the total proper motion of the object will
be taken into account to calculate the final precessed coordinates.
outscalar : bool
If True, converts the output coordinates from one-element np.arrays to
scalars.
Returns
-------
precessed_ra, precessed_dec : float
A tuple of precessed equatorial coordinates in decimal degrees at
`epoch_two` taking into account proper motion if `jd`, `mu_ra`, and
`mu_dec` are provided.
|
def rr_history(self, ips):
"""Get the domains related to input ips.
Args:
ips: an enumerable of strings as ips
Returns:
An enumerable of resource records and features
"""
api_name = 'opendns-rr_history'
fmt_url_path = u'dnsdb/ip/a/{0}.json'
return self._multi_get(api_name, fmt_url_path, ips)
|
Get the domains related to input ips.
Args:
ips: an enumerable of strings as ips
Returns:
An enumerable of resource records and features
|
def install(zone, nodataset=False, brand_opts=None):
'''
Install the specified zone from the system.
zone : string
name of the zone
nodataset : boolean
do not create a ZFS file system
brand_opts : string
brand specific options to pass
CLI Example:
.. code-block:: bash
salt '*' zoneadm.install dolores
salt '*' zoneadm.install teddy True
'''
ret = {'status': True}
## install zone
res = __salt__['cmd.run_all']('zoneadm -z {zone} install{nodataset}{brand_opts}'.format(
zone=zone,
nodataset=' -x nodataset' if nodataset else '',
brand_opts=' {0}'.format(brand_opts) if brand_opts else '',
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
ret['message'] = ret['message'].replace('zoneadm: ', '')
if ret['message'] == '':
del ret['message']
return ret
|
Install the specified zone from the system.
zone : string
name of the zone
nodataset : boolean
do not create a ZFS file system
brand_opts : string
brand specific options to pass
CLI Example:
.. code-block:: bash
salt '*' zoneadm.install dolores
salt '*' zoneadm.install teddy True
|
def user_choice(prompt, choices=("yes", "no"), default=None):
"""
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
"""
assert default is None or default in choices
choice_list = ', '.join((choice.title() if choice == default else choice for choice in choices))
response = None
while response not in choices:
response = input(prompt + ' [' + choice_list + ']: ')
response = response.lower() if response else default
return response
|
Prompts the user for confirmation. The default value, if any, is capitalized.
:param prompt: Information to display to the user.
:param choices: an iterable of possible choices.
:param default: default choice
:return: the user's choice
|
def execute_migrations(self, show_traceback=True):
"""
Executes all pending migrations across all capable
databases
"""
all_migrations = get_pending_migrations(self.path, self.databases)
if not len(all_migrations):
sys.stdout.write("There are no migrations to apply.\n")
for db, migrations in all_migrations.iteritems():
connection = connections[db]
# init connection
cursor = connection.cursor()
cursor.close()
for migration in migrations:
migration_path = self._get_migration_path(db, migration)
with Transactional():
sys.stdout.write(
"Executing migration %r on %r...." %
(migration, db)
)
created_models = self._execute_migration(
db,
migration_path,
show_traceback=show_traceback
)
emit_post_sync_signal(
created_models=created_models,
verbosity=self.verbosity,
interactive=self.interactive,
db=db,
)
if self.load_initial_data:
sys.stdout.write(
"Running loaddata for initial_data fixtures on %r.\n" % db
)
call_command(
"loaddata",
"initial_data",
verbosity=self.verbosity,
database=db,
)
|
Executes all pending migrations across all capable
databases
|
def import_module(path):
"""
Import a module given a dotted *path* in the
form of ``.name(.name)*``, and returns the
last module (unlike ``__import__`` which just
returns the first module).
:param path: The dotted path to the module.
"""
mod = __import__(path, locals={}, globals={})
for item in path.split('.')[1:]:
try:
mod = getattr(mod, item)
except AttributeError:
raise ImportError('No module named %s' % path)
return mod
|
Import a module given a dotted *path* in the
form of ``.name(.name)*``, and returns the
last module (unlike ``__import__`` which just
returns the first module).
:param path: The dotted path to the module.
|
def all_cities():
"""
Get a list of all Backpage city names.
Returns:
list of city names as Strings
"""
cities = []
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
cities.append(row[0])
cities.sort()
return cities
|
Get a list of all Backpage city names.
Returns:
list of city names as Strings
|
def sector_shift(self):
"""
Property with current sector size shift. Actually sector size is
2 ** sector shift
"""
header = self.source.header
return header.mini_sector_shift if self._is_mini \
else header.sector_shift
|
Property with current sector size shift. Actually sector size is
2 ** sector shift
|
def _extend_str(class_node, rvalue):
"""function to extend builtin str/unicode class"""
code = dedent(
"""
class whatever(object):
def join(self, iterable):
return {rvalue}
def replace(self, old, new, count=None):
return {rvalue}
def format(self, *args, **kwargs):
return {rvalue}
def encode(self, encoding='ascii', errors=None):
return ''
def decode(self, encoding='ascii', errors=None):
return u''
def capitalize(self):
return {rvalue}
def title(self):
return {rvalue}
def lower(self):
return {rvalue}
def upper(self):
return {rvalue}
def swapcase(self):
return {rvalue}
def index(self, sub, start=None, end=None):
return 0
def find(self, sub, start=None, end=None):
return 0
def count(self, sub, start=None, end=None):
return 0
def strip(self, chars=None):
return {rvalue}
def lstrip(self, chars=None):
return {rvalue}
def rstrip(self, chars=None):
return {rvalue}
def rjust(self, width, fillchar=None):
return {rvalue}
def center(self, width, fillchar=None):
return {rvalue}
def ljust(self, width, fillchar=None):
return {rvalue}
"""
)
code = code.format(rvalue=rvalue)
fake = AstroidBuilder(MANAGER).string_build(code)["whatever"]
for method in fake.mymethods():
method.parent = class_node
method.lineno = None
method.col_offset = None
if "__class__" in method.locals:
method.locals["__class__"] = [class_node]
class_node.locals[method.name] = [method]
method.parent = class_node
|
function to extend builtin str/unicode class
|
def addcomment(accountable, body):
"""
Add a comment to the given issue key. Accepts a body argument to be used
as the comment's body.
"""
r = accountable.issue_add_comment(body)
headers = sorted(['author_name', 'body', 'updated'])
rows = [[v for k, v in sorted(r.items()) if k in headers]]
rows.insert(0, headers)
print_table(SingleTable(rows))
|
Add a comment to the given issue key. Accepts a body argument to be used
as the comment's body.
|
def insert_child(self, child_pid, index=-1):
"""Insert a new child into a PID concept.
Argument 'index' can take the following values:
0,1,2,... - insert child PID at the specified position
-1 - insert the child PID at the last position
None - insert child without order (no re-ordering is done)
NOTE: If 'index' is specified, all sibling relations should
have PIDRelation.index information.
"""
self._check_child_limits(child_pid)
if index is None:
index = -1
try:
with db.session.begin_nested():
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
child_relations = self._resolved_pid.child_relations.filter(
PIDRelation.relation_type == self.relation_type.id
).order_by(PIDRelation.index).all()
relation_obj = PIDRelation.create(
self._resolved_pid, child_pid, self.relation_type.id, None)
if index == -1:
child_relations.append(relation_obj)
else:
child_relations.insert(index, relation_obj)
for idx, c in enumerate(child_relations):
c.index = idx
except IntegrityError:
raise PIDRelationConsistencyError("PID Relation already exists.")
|
Insert a new child into a PID concept.
Argument 'index' can take the following values:
0,1,2,... - insert child PID at the specified position
-1 - insert the child PID at the last position
None - insert child without order (no re-ordering is done)
NOTE: If 'index' is specified, all sibling relations should
have PIDRelation.index information.
|
def uri(self):
"""Connection string to pass to `~pymongo.mongo_client.MongoClient`."""
if self._uds_path:
uri = 'mongodb://%s' % (quote_plus(self._uds_path),)
else:
uri = 'mongodb://%s' % (format_addr(self._address),)
return uri + '/?ssl=true' if self._ssl else uri
|
Connection string to pass to `~pymongo.mongo_client.MongoClient`.
|
def get_authors(self, language):
""" Return the list of this task's authors """
return self.gettext(language, self._author) if self._author else ""
|
Return the list of this task's authors
|
def get_last_scene_id(self, refresh=False):
"""Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh_complex_value('LastSceneID')
self.refresh_complex_value('sl_CentralScene')
val = self.get_complex_value('LastSceneID') or self.get_complex_value('sl_CentralScene')
return val
|
Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
|
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
|
Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
|
def byte_to_channels(self, byte):
"""
:return: list(int)
"""
# pylint: disable-msg=R0201
assert isinstance(byte, int)
assert byte >= 0
assert byte < 256
result = []
for offset in range(0, 8):
if byte & (1 << offset):
result.append(offset + 1)
return result
|
:return: list(int)
|
def index(path=None):
"""On all other routes, just return an example `curl` command."""
payload = {
"username": "soandso",
"message": "Hello bot",
"vars": {
"name": "Soandso",
}
}
return Response(r"""Usage: curl -i \
-H "Content-Type: application/json" \
-X POST -d '{}' \
http://localhost:5000/reply""".format(json.dumps(payload)),
mimetype="text/plain")
|
On all other routes, just return an example `curl` command.
|
def result_to_dict(raw_result):
"""
Parse raw result from fetcher into readable dictionary
Args:
raw_result (dict) - raw data from `fetcher`
Returns:
dict - readable dictionary
"""
result = {}
for channel_index, channel in enumerate(raw_result):
channel_id, channel_name = channel[0], channel[1]
channel_result = {
'id': channel_id,
'name': channel_name,
'movies': []
}
for movie in channel[2]:
channel_result['movies'].append({
'title': movie[1],
'start_time': datetime.fromtimestamp(movie[2]),
'end_time': datetime.fromtimestamp(movie[2] + movie[3]),
'inf': True if movie[3] else False,
})
result[channel_id] = channel_result
return result
|
Parse raw result from fetcher into readable dictionary
Args:
raw_result (dict) - raw data from `fetcher`
Returns:
dict - readable dictionary
|
def run(self) -> None:
"""
创建了 sock 的运行回调
"""
if self.loop is None:
return
create_server = asyncio.ensure_future(self._run(), loop=self.loop) # type: ignore
try:
self.loop.run_until_complete(create_server)
self.loop.run_until_complete(self._check_alive())
finally:
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
|
创建了 sock 的运行回调
|
def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs
|
calculate average s,sigma from list of "s"s.
|
def error_asymptotes(pca,**kwargs):
"""
Plots asymptotic error bounds for
hyperbola on a stereonet.
"""
ax = kwargs.pop("ax",current_axes())
lon,lat = pca.plane_errors('upper', n=1000)
ax.plot(lon,lat,'-')
lon,lat = pca.plane_errors('lower', n=1000)
ax.plot(lon,lat,'-')
ax.plane(*pca.strike_dip())
|
Plots asymptotic error bounds for
hyperbola on a stereonet.
|
def __get_constants(self):
"""
Gets the constants from the class that acts like a namespace for constants and adds them to the replace pairs.
"""
helper = ConstantClass(self._constants_class_name, self._io)
helper.reload()
constants = helper.constants()
for name, value in constants.items():
self._add_replace_pair(name, value, True)
self._io.text('Read {0} constants for substitution from <fso>{1}</fso>'.
format(len(constants), helper.file_name()))
|
Gets the constants from the class that acts like a namespace for constants and adds them to the replace pairs.
|
def console_print(con: tcod.console.Console, x: int, y: int, fmt: str) -> None:
"""Print a color formatted string on a console.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
fmt (AnyStr): A unicode or bytes string optionaly using color codes.
.. deprecated:: 8.5
Use :any:`Console.print_` instead.
"""
lib.TCOD_console_printf(_console(con), x, y, _fmt(fmt))
|
Print a color formatted string on a console.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
fmt (AnyStr): A unicode or bytes string optionaly using color codes.
.. deprecated:: 8.5
Use :any:`Console.print_` instead.
|
def _show_or_dump(self, dump=False, indent=3, lvl="", label_lvl="", first_call=True): # noqa: E501
"""
Internal method that shows or dumps a hierarchical view of a packet.
Called by show.
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:param first_call: determine if the current function is the first
:return: return a hierarchical view if dump, else print it
"""
if dump:
from scapy.themes import AnsiColorTheme
ct = AnsiColorTheme() # No color for dump output
else:
ct = conf.color_theme
s = "%s%s %s %s \n" % (label_lvl,
ct.punct("###["),
ct.layer_name(self.name),
ct.punct("]###"))
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
if isinstance(f, Emph) or f in conf.emph:
ncol = ct.emph_field_name
vcol = ct.emph_field_value
else:
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and isinstance(fvalue, list)): # noqa: E501
s += "%s \\%-10s\\\n" % (label_lvl + lvl, ncol(f.name))
fvalue_gen = SetGen(fvalue, _iterpacket=0)
for fvalue in fvalue_gen:
s += fvalue._show_or_dump(dump=dump, indent=indent, label_lvl=label_lvl + lvl + " |", first_call=False) # noqa: E501
else:
begn = "%s %-10s%s " % (label_lvl + lvl,
ncol(f.name),
ct.punct("="),)
reprval = f.i2repr(self, fvalue)
if isinstance(reprval, str):
reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) + # noqa: E501
len(lvl) +
len(f.name) +
4))
s += "%s%s\n" % (begn, vcol(reprval))
if self.payload:
s += self.payload._show_or_dump(dump=dump, indent=indent, lvl=lvl + (" " * indent * self.show_indent), label_lvl=label_lvl, first_call=False) # noqa: E501
if first_call and not dump:
print(s)
else:
return s
|
Internal method that shows or dumps a hierarchical view of a packet.
Called by show.
:param dump: determine if it prints or returns the string value
:param int indent: the size of indentation for each layer
:param str lvl: additional information about the layer lvl
:param str label_lvl: additional information about the layer fields
:param first_call: determine if the current function is the first
:return: return a hierarchical view if dump, else print it
|
def view_include(view_module, namespace=None, app_name=None):
"""
Includes view in the url, works similar to django include function.
Auto imports all class based views that are subclass of ``URLView`` and
all functional views that have been decorated with ``url_view``.
:param view_module: object of the module or string with importable path
:param namespace: name of the namespaces, it will be guessed otherwise
:param app_name: application name
:return: result of urls.include
"""
# since Django 1.8 patterns() are deprecated, list should be used instead
# {priority:[views,]}
view_dict = defaultdict(list)
if isinstance(view_module, six.string_types):
view_module = importlib.import_module(view_module)
# pylint:disable=unused-variable
for member_name, member in inspect.getmembers(view_module):
is_class_view = inspect.isclass(member) and issubclass(member, URLView)
is_func_view = (inspect.isfunction(member) and
hasattr(member, 'urljects_view') and
member.urljects_view)
if (is_class_view and member is not URLView) or is_func_view:
view_dict[member.url_priority].append(
url(member.url, member, name=member.url_name))
view_patterns = list(*[
view_dict[priority] for priority in sorted(view_dict)
])
return urls.include(
arg=view_patterns,
namespace=namespace,
app_name=app_name)
|
Includes view in the url, works similar to django include function.
Auto imports all class based views that are subclass of ``URLView`` and
all functional views that have been decorated with ``url_view``.
:param view_module: object of the module or string with importable path
:param namespace: name of the namespaces, it will be guessed otherwise
:param app_name: application name
:return: result of urls.include
|
def ParseOptions(cls, options, config_object, category=None, names=None):
"""Parses and validates arguments using the appropriate helpers.
Args:
options (argparse.Namespace): parser options.
config_object (object): object to be configured by an argument helper.
category (Optional[str]): category of helpers to apply to
the group, such as storage, output, where None will apply the
arguments to all helpers. The category can be used to add arguments
to a specific group of registered helpers.
names (Optional[list[str]]): names of argument helpers to apply,
where None will apply the arguments to all helpers.
"""
for helper_name, helper_class in cls._helper_classes.items():
if ((category and helper_class.CATEGORY != category) or
(names and helper_name not in names)):
continue
try:
helper_class.ParseOptions(options, config_object)
except errors.BadConfigObject:
pass
|
Parses and validates arguments using the appropriate helpers.
Args:
options (argparse.Namespace): parser options.
config_object (object): object to be configured by an argument helper.
category (Optional[str]): category of helpers to apply to
the group, such as storage, output, where None will apply the
arguments to all helpers. The category can be used to add arguments
to a specific group of registered helpers.
names (Optional[list[str]]): names of argument helpers to apply,
where None will apply the arguments to all helpers.
|
def update_check(self, entity, check, label=None, name=None, disabled=None,
metadata=None, monitoring_zones_poll=None, timeout=None,
period=None, target_alias=None, target_hostname=None,
target_receiver=None):
"""
Updates an existing check with any of the parameters.
"""
entity.update_check(check, label=label, name=name, disabled=disabled,
metadata=metadata, monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout, period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver)
|
Updates an existing check with any of the parameters.
|
def ucas_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs):
'''course page'''
html = get_content(url)
parts = re.findall( r'(getplaytitle.do\?.+)"', html)
assert parts, 'No part found!'
for part_path in parts:
ucas_download('http://v.ucas.ac.cn/course/' + part_path, output_dir=output_dir, merge=merge, info_only=info_only)
|
course page
|
def memoized_method(method=None, cache_factory=None):
""" Memoize a class's method.
Arguments are similar to to `memoized`, except that the cache container is
specified with `cache_factory`: a function called with no arguments to
create the caching container for the instance.
Note that, unlike `memoized`, the result cache will be stored on the
instance, so cached results will be deallocated along with the instance.
Example::
>>> class Person(object):
... def __init__(self, name):
... self._name = name
... @memoized_method
... def get_name(self):
... print("Calling get_name on %r" %(self._name, ))
... return self._name
>>> shazow = Person("shazow")
>>> shazow.get_name()
Calling get_name on 'shazow'
'shazow'
>>> shazow.get_name()
'shazow'
>>> shazow._get_name_cache
{((), ()): 'shazow'}
Example with a specific cache container::
>>> from unstdlib.standard.collections_ import RecentlyUsedContainer
>>> class Foo(object):
... @memoized_method(cache_factory=lambda: RecentlyUsedContainer(maxsize=2))
... def add(self, a, b):
... print("Calling add with %r and %r" %(a, b))
... return a + b
>>> foo = Foo()
>>> foo.add(1, 1)
Calling add with 1 and 1
2
>>> foo.add(1, 1)
2
>>> foo.add(2, 2)
Calling add with 2 and 2
4
>>> foo.add(3, 3)
Calling add with 3 and 3
6
>>> foo.add(1, 1)
Calling add with 1 and 1
2
"""
if method is None:
return lambda f: memoized_method(f, cache_factory=cache_factory)
cache_factory = cache_factory or dict
@wraps(method)
def memoized_method_property(self):
cache = cache_factory()
cache_attr = "_%s_cache" %(method.__name__, )
setattr(self, cache_attr, cache)
result = partial(
_memoized_call,
partial(method, self),
cache
)
result.memoize_cache = cache
return result
return memoized_property(memoized_method_property)
|
Memoize a class's method.
Arguments are similar to to `memoized`, except that the cache container is
specified with `cache_factory`: a function called with no arguments to
create the caching container for the instance.
Note that, unlike `memoized`, the result cache will be stored on the
instance, so cached results will be deallocated along with the instance.
Example::
>>> class Person(object):
... def __init__(self, name):
... self._name = name
... @memoized_method
... def get_name(self):
... print("Calling get_name on %r" %(self._name, ))
... return self._name
>>> shazow = Person("shazow")
>>> shazow.get_name()
Calling get_name on 'shazow'
'shazow'
>>> shazow.get_name()
'shazow'
>>> shazow._get_name_cache
{((), ()): 'shazow'}
Example with a specific cache container::
>>> from unstdlib.standard.collections_ import RecentlyUsedContainer
>>> class Foo(object):
... @memoized_method(cache_factory=lambda: RecentlyUsedContainer(maxsize=2))
... def add(self, a, b):
... print("Calling add with %r and %r" %(a, b))
... return a + b
>>> foo = Foo()
>>> foo.add(1, 1)
Calling add with 1 and 1
2
>>> foo.add(1, 1)
2
>>> foo.add(2, 2)
Calling add with 2 and 2
4
>>> foo.add(3, 3)
Calling add with 3 and 3
6
>>> foo.add(1, 1)
Calling add with 1 and 1
2
|
def _handle_log_rotations(self):
''' Rotate each handler's log file if necessary '''
for h in self.capture_handlers:
if self._should_rotate_log(h):
self._rotate_log(h)
|
Rotate each handler's log file if necessary
|
def get(session, api_key, **kwargs):
"""
Выполняет доступ к API.
session - модуль requests или сессия из него
api_key - строка ключа доступа к API
rate - тариф, может быть `informers` или `forecast`
lat, lon - широта и долгота
```
import yandex_weather_api
import requests as req
yandex_weather_api.get(req, "ЗАМЕНИ_МЕНЯ_КЛЮЧОМ", lat=55.10, lon=60.10)
```
"""
args, kwargs = validate_args(api_key, **kwargs)
resp = session.get(*args, **kwargs)
return WeatherAnswer.validate(resp.json())
|
Выполняет доступ к API.
session - модуль requests или сессия из него
api_key - строка ключа доступа к API
rate - тариф, может быть `informers` или `forecast`
lat, lon - широта и долгота
```
import yandex_weather_api
import requests as req
yandex_weather_api.get(req, "ЗАМЕНИ_МЕНЯ_КЛЮЧОМ", lat=55.10, lon=60.10)
```
|
def _check_conflict(cls, dirPath, name):
"""
Check whether the module of the given name conflicts with another module on the sys.path.
:param dirPath: the directory from which the module was originally loaded
:param name: the mpdule name
"""
old_sys_path = sys.path
try:
sys.path = [d for d in old_sys_path if os.path.realpath(d) != os.path.realpath(dirPath)]
try:
colliding_module = importlib.import_module(name)
except ImportError:
pass
else:
raise ResourceException(
"The user module '%s' collides with module '%s from '%s'." % (
name, colliding_module.__name__, colliding_module.__file__))
finally:
sys.path = old_sys_path
|
Check whether the module of the given name conflicts with another module on the sys.path.
:param dirPath: the directory from which the module was originally loaded
:param name: the mpdule name
|
def get_netloc_and_auth(self, netloc, scheme):
"""
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
"""
if scheme == 'ssh':
# The --username and --password options can't be used for
# svn+ssh URLs, so keep the auth information in the URL.
return super(Subversion, self).get_netloc_and_auth(
netloc, scheme)
return split_auth_from_netloc(netloc)
|
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
|
def encode(i, *, width=-1):
"""Encodes a nonnegative integer into syncsafe format
When width > 0, then len(result) == width
When width < 0, then len(result) >= abs(width)
"""
if i < 0:
raise ValueError("value is negative")
assert width != 0
data = bytearray()
while i:
data.append(i & 127)
i >>= 7
if width > 0 and len(data) > width:
raise ValueError("Integer too large")
if len(data) < abs(width):
data.extend([0] * (abs(width) - len(data)))
data.reverse()
return data
|
Encodes a nonnegative integer into syncsafe format
When width > 0, then len(result) == width
When width < 0, then len(result) >= abs(width)
|
def readline(self, size=-1):
'''This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. '''
if size == 0:
return self.string_type()
# delimiter default is EOF
index = self.expect([self.crlf, self.delimiter])
if index == 0:
return self.before + self.crlf
else:
return self.before
|
This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object.
|
def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
|
Verify the chunks in a list of low data structures
|
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
|
Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used.
|
def p_rst(p):
""" asm : RST expr
"""
val = p[2].eval()
if val not in (0, 8, 16, 24, 32, 40, 48, 56):
error(p.lineno(1), 'Invalid RST number %i' % val)
p[0] = None
return
p[0] = Asm(p.lineno(1), 'RST %XH' % val)
|
asm : RST expr
|
def del_calc(db, job_id, user):
"""
Delete a calculation and all associated outputs, if possible.
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: job ID, can be an integer or a string
:param user: username
:returns: None if everything went fine or an error message
"""
job_id = int(job_id)
dependent = db(
'SELECT id FROM job WHERE hazard_calculation_id=?x', job_id)
if dependent:
return {"error": 'Cannot delete calculation %d: there '
'are calculations '
'dependent from it: %s' % (job_id, [j.id for j in dependent])}
try:
owner, path = db('SELECT user_name, ds_calc_dir FROM job WHERE id=?x',
job_id, one=True)
except NotFound:
return {"error": 'Cannot delete calculation %d:'
' ID does not exist' % job_id}
deleted = db('DELETE FROM job WHERE id=?x AND user_name=?x',
job_id, user).rowcount
if not deleted:
return {"error": 'Cannot delete calculation %d: it belongs to '
'%s and you are %s' % (job_id, owner, user)}
# try to delete datastore and associated file
# path has typically the form /home/user/oqdata/calc_XXX
fname = path + ".hdf5"
try:
os.remove(fname)
except OSError as exc: # permission error
return {"error": 'Could not remove %s: %s' % (fname, exc)}
return {"success": fname}
|
Delete a calculation and all associated outputs, if possible.
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: job ID, can be an integer or a string
:param user: username
:returns: None if everything went fine or an error message
|
def _build(self, inputs):
"""Connects the `TileByDim` module into the graph.
Args:
inputs: `Tensor` to tile.
Returns:
The tiled tensor.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Builds default lists for multiples to pass to `tf.tile`.
full_multiples = [1] * rank
# Updates lists with what the user provided.
for dim, multiple in zip(self._dims, self._multiples):
full_multiples[dim] = multiple
return tf.tile(inputs, multiples=full_multiples)
|
Connects the `TileByDim` module into the graph.
Args:
inputs: `Tensor` to tile.
Returns:
The tiled tensor.
|
def combine_action_handlers(*handlers):
"""
This function combines the given action handlers into a single function
which will call all of them.
"""
# make sure each of the given handlers is callable
for handler in handlers:
# if the handler is not a function
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
# yell loudly
raise ValueError("Provided handler is not a coroutine: %s" % handler)
# the combined action handler
async def combined_handler(*args, **kwds):
# goes over every given handler
for handler in handlers:
# call the handler
await handler(*args, **kwds)
# return the combined action handler
return combined_handler
|
This function combines the given action handlers into a single function
which will call all of them.
|
def unpack_binary(self, offset, length=False):
"""
Returns raw binary data from the relative offset with the given length.
Arguments:
- `offset`: The relative offset from the start of the block.
- `length`: The length of the binary blob. If zero, the empty string
zero length is returned.
Throws:
- `OverrunBufferException`
"""
if not length:
return bytes("".encode("ascii"))
o = self._offset + offset
try:
return bytes(struct.unpack_from("<{}s".format(length), self._buf, o)[0])
except struct.error:
raise OverrunBufferException(o, len(self._buf))
|
Returns raw binary data from the relative offset with the given length.
Arguments:
- `offset`: The relative offset from the start of the block.
- `length`: The length of the binary blob. If zero, the empty string
zero length is returned.
Throws:
- `OverrunBufferException`
|
def get_deffacts(self):
"""Return the existing deffacts sorted by the internal order"""
return sorted(self._get_by_type(DefFacts), key=lambda d: d.order)
|
Return the existing deffacts sorted by the internal order
|
def finish(self):
"""
Finishes the load job. Called automatically when the connection closes.
:return: The exit code returned when applying rows to the table
"""
if self.finished:
return self.exit_code
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
# TODO(chris): should this happen every time?
if self.applied_count > 0:
self._end_acquisition()
self._apply_rows()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
self.finished = True
return self.exit_code
|
Finishes the load job. Called automatically when the connection closes.
:return: The exit code returned when applying rows to the table
|
def defer_sync(self, func):
"""
Arrange for `func()` to execute on :class:`Broker` thread, blocking the
current thread until a result or exception is available.
:returns:
Return value of `func()`.
"""
latch = Latch()
def wrapper():
try:
latch.put(func())
except Exception:
latch.put(sys.exc_info()[1])
self.defer(wrapper)
res = latch.get()
if isinstance(res, Exception):
raise res
return res
|
Arrange for `func()` to execute on :class:`Broker` thread, blocking the
current thread until a result or exception is available.
:returns:
Return value of `func()`.
|
def master_callback(self, m, master):
'''process mavlink message m on master, sending any messages to recipients'''
# see if it is handled by a specialised sysid connection
sysid = m.get_srcSystem()
mtype = m.get_type()
if sysid in self.mpstate.sysid_outputs:
self.mpstate.sysid_outputs[sysid].write(m.get_msgbuf())
if mtype == "GLOBAL_POSITION_INT":
for modname in 'map', 'asterix', 'NMEA', 'NMEA2':
mod = self.module(modname)
if mod is not None:
mod.set_secondary_vehicle_position(m)
return
if getattr(m, '_timestamp', None) is None:
master.post_message(m)
self.status.counters['MasterIn'][master.linknum] += 1
if mtype == 'GLOBAL_POSITION_INT':
# send GLOBAL_POSITION_INT to 2nd GCS for 2nd vehicle display
for sysid in self.mpstate.sysid_outputs:
self.mpstate.sysid_outputs[sysid].write(m.get_msgbuf())
if self.mpstate.settings.fwdpos:
for link in self.mpstate.mav_master:
if link != master:
link.write(m.get_msgbuf())
# and log them
if mtype not in dataPackets and self.mpstate.logqueue:
# put link number in bottom 2 bits, so we can analyse packet
# delay in saved logs
usec = self.get_usec()
usec = (usec & ~3) | master.linknum
self.mpstate.logqueue.put(bytearray(struct.pack('>Q', usec) + m.get_msgbuf()))
# keep the last message of each type around
self.status.msgs[mtype] = m
if mtype not in self.status.msg_count:
self.status.msg_count[mtype] = 0
self.status.msg_count[mtype] += 1
if m.get_srcComponent() == mavutil.mavlink.MAV_COMP_ID_GIMBAL and mtype == 'HEARTBEAT':
# silence gimbal heartbeat packets for now
return
if getattr(m, 'time_boot_ms', None) is not None and self.settings.target_system == m.get_srcSystem():
# update link_delayed attribute
self.handle_msec_timestamp(m, master)
if mtype in activityPackets:
if master.linkerror:
master.linkerror = False
self.say("link %s OK" % (self.link_label(master)))
self.status.last_message = time.time()
master.last_message = self.status.last_message
if master.link_delayed and self.mpstate.settings.checkdelay:
# don't process delayed packets that cause double reporting
if mtype in delayedPackets:
return
self.master_msg_handling(m, master)
# don't pass along bad data
if mtype != 'BAD_DATA':
# pass messages along to listeners, except for REQUEST_DATA_STREAM, which
# would lead a conflict in stream rate setting between mavproxy and the other
# GCS
if self.mpstate.settings.mavfwd_rate or mtype != 'REQUEST_DATA_STREAM':
if mtype not in self.no_fwd_types:
for r in self.mpstate.mav_outputs:
r.write(m.get_msgbuf())
sysid = m.get_srcSystem()
target_sysid = self.target_system
# pass to modules
for (mod,pm) in self.mpstate.modules:
if not hasattr(mod, 'mavlink_packet'):
continue
if not mod.multi_vehicle and sysid != target_sysid:
# only pass packets not from our target to modules that
# have marked themselves as being multi-vehicle capable
continue
try:
mod.mavlink_packet(m)
except Exception as msg:
if self.mpstate.settings.moddebug == 1:
print(msg)
elif self.mpstate.settings.moddebug > 1:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
|
process mavlink message m on master, sending any messages to recipients
|
def _templated(fn):
"""
Return a function which applies ``str.format(**ctl)`` to all results of
``fn(ctl)``.
"""
@functools.wraps(fn)
def inner(ctl):
return [i.format(**ctl) for i in fn(ctl)]
return inner
|
Return a function which applies ``str.format(**ctl)`` to all results of
``fn(ctl)``.
|
def get_snippet(self, snippet_id, timeout=None):
""" API call to get a specific Snippet """
return self._api_request(
self.SNIPPET_ENDPOINT % (snippet_id),
self.HTTP_GET,
timeout=timeout
)
|
API call to get a specific Snippet
|
def getlist(self, name: str, default: Any = None) -> List[Any]:
"""Return the entire list"""
return super().get(name, default)
|
Return the entire list
|
def accuracy(conf_matrix):
"""
Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
"""
total, correct = 0.0, 0.0
for true_response, guess_dict in conf_matrix.items():
for guess, count in guess_dict.items():
if true_response == guess:
correct += count
total += count
return correct/total
|
Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
|
def writeln(self, string='', *args, **kwargs):
"""Writes a string into the source code _and_ appends a new line,
applying indentation if required
"""
self.write(string + '\n', *args, **kwargs)
self.on_new_line = True
# If we're writing a block, increment indent for the next time
if string and string[-1] == ':':
self.current_indent += 1
# Clear state after the user adds a new line
self.auto_added_line = False
|
Writes a string into the source code _and_ appends a new line,
applying indentation if required
|
def get_rms(self):
"""Gets the root mean square of the score.
If this system is based on grades, the RMS of the output score
is returned.
return: (decimal) - the median score
*compliance: mandatory -- This method must be implemented.*
"""
return np.sqrt(np.mean(np.square(self._entry_scores)))
|
Gets the root mean square of the score.
If this system is based on grades, the RMS of the output score
is returned.
return: (decimal) - the median score
*compliance: mandatory -- This method must be implemented.*
|
def result_pretty(self, number_of_runs=0, time_str=None,
fbestever=None):
"""pretty print result.
Returns ``self.result()``
"""
if fbestever is None:
fbestever = self.best.f
s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
% number_of_runs if number_of_runs else ''
for k, v in self.stop().items():
print('termination on %s=%s%s' % (k, str(v), s +
(' (%s)' % time_str if time_str else '')))
print('final/bestever f-value = %e %e' % (self.best.last.f,
fbestever))
if self.N < 9:
print('incumbent solution: ' + str(list(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair))))
print('std deviation: ' + str(list(self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)))
else:
print('incumbent solution: %s ...]' % (str(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)[:8])[:-1]))
print('std deviations: %s ...]' % (str((self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)[:8])[:-1]))
return self.result()
|
pretty print result.
Returns ``self.result()``
|
def save(self, *args, **kwargs):
"""
saves creates or updates current resource
returns new resource
"""
self._pre_save(*args, **kwargs)
response = self._save(*args, **kwargs)
response = self._post_save(response, *args, **kwargs)
return response
|
saves creates or updates current resource
returns new resource
|
def String(self, str):
"""Get an interned string from the reader, allows for example
to speedup string name comparisons """
ret = libxml2mod.xmlTextReaderConstString(self._o, str)
return ret
|
Get an interned string from the reader, allows for example
to speedup string name comparisons
|
def delete_instance(self, instance_id, project_id=None):
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
|
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
|
def unpack_shards(shards, stream_arn, session):
"""List[Dict] -> Dict[shard_id, Shard].
Each Shards' parent/children are hooked up with the other Shards in the list.
"""
if not shards:
return {}
# When unpacking tokens, shard id key is "shard_id"
# When unpacking DescribeStream responses, shard id key is "ShardId"
if "ShardId" in shards[0]:
shards = _translate_shards(shards)
by_id = {shard_token["shard_id"]:
Shard(stream_arn=stream_arn, shard_id=shard_token["shard_id"],
iterator_type=shard_token.get("iterator_type"), sequence_number=shard_token.get("sequence_number"),
parent=shard_token.get("parent"), session=session)
for shard_token in shards}
for shard in by_id.values():
if shard.parent:
shard.parent = by_id[shard.parent]
shard.parent.children.append(shard)
return by_id
|
List[Dict] -> Dict[shard_id, Shard].
Each Shards' parent/children are hooked up with the other Shards in the list.
|
def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job
|
Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
|
def delete(event, saltenv='base', test=None):
'''
Delete a reactor
CLI Example:
.. code-block:: bash
salt-run reactor.delete 'salt/cloud/*/destroyed'
'''
sevent = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
master_key = salt.utils.master.get_master_key('root', __opts__)
__jid_event__.fire_event({'event': event, 'key': master_key}, 'salt/reactors/manage/delete')
res = sevent.get_event(wait=30, tag='salt/reactors/manage/delete-complete')
return res['result']
|
Delete a reactor
CLI Example:
.. code-block:: bash
salt-run reactor.delete 'salt/cloud/*/destroyed'
|
def auth_recv(self):
"""
Handle peer's IKE_AUTH response.
"""
id_r = auth_data = None
for p in self.packets[-1].payloads:
if p._type == payloads.Type.IDr:
id_r = p
logger.debug('Got responder ID: {}'.format(dump(bytes(p))))
if p._type == payloads.Type.AUTH:
auth_data = p._data
if p._type == payloads.Type.SA:
logger.debug('ESP_SPIin: {}'.format(p.spi))
self.esp_SPIin = p.spi
for proposal in p.proposals:
logger.debug("Proposal: {}".format(proposal.__dict__))
logger.debug(proposal.spi)
if id_r is None or auth_data is None:
raise IkeError('IDr missing from IKE_AUTH response')
message2 = bytes(self.packets[1])
authenticated = self.authenticate_peer(auth_data, id_r, message2)
assert authenticated
keymat = prfplus(self.SK_d, self.Ni + self.Nr, 4 * 32)
(self.esp_ei,
self.esp_ai,
self.esp_er,
self.esp_ar,
) = unpack("32s" * 4, keymat)
# TODO: Figure out the names for the params, they _ARE_ in correct places, just the names migth mismatch.
self.install_ipsec_sas()
|
Handle peer's IKE_AUTH response.
|
def connect_array(self, address, connection_key, connection_type, **kwargs):
"""Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"management_address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data)
|
Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
|
def write_sources_list(url, codename, filename='ceph.list', mode=0o644):
"""add deb repo to /etc/apt/sources.list.d/"""
repo_path = os.path.join('/etc/apt/sources.list.d', filename)
content = 'deb {url} {codename} main\n'.format(
url=url,
codename=codename,
)
write_file(repo_path, content.encode('utf-8'), mode)
|
add deb repo to /etc/apt/sources.list.d/
|
def update_context(self,
context,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the specified context.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.ContextsClient()
>>>
>>> # TODO: Initialize ``context``:
>>> context = {}
>>>
>>> response = client.update_context(context)
Args:
context (Union[dict, ~google.cloud.dialogflow_v2.types.Context]): Required. The context to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Context`
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Context` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_context' not in self._inner_api_calls:
self._inner_api_calls[
'update_context'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_context,
default_retry=self._method_configs['UpdateContext'].retry,
default_timeout=self._method_configs['UpdateContext']
.timeout,
client_info=self._client_info,
)
request = context_pb2.UpdateContextRequest(
context=context,
update_mask=update_mask,
)
return self._inner_api_calls['update_context'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
Updates the specified context.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.ContextsClient()
>>>
>>> # TODO: Initialize ``context``:
>>> context = {}
>>>
>>> response = client.update_context(context)
Args:
context (Union[dict, ~google.cloud.dialogflow_v2.types.Context]): Required. The context to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Context`
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Context` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def comunicar_certificado_icpbrasil(self, certificado):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.comunicar_certificado_icpbrasil`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
resp = self._http_post('comunicarcertificadoicpbrasil',
certificado=certificado)
conteudo = resp.json()
return RespostaSAT.comunicar_certificado_icpbrasil(
conteudo.get('retorno'))
|
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.comunicar_certificado_icpbrasil`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
|
def get_driver(self):
'''
Get an already running instance of Webdriver. If there is none, it will create one.
Returns:
Webdriver - Selenium Webdriver instance.
Usage::
driver = WTF_WEBDRIVER_MANAGER.new_driver()
driver.get("http://the-internet.herokuapp.com")
same_driver = WTF_WEBDRIVER_MANAGER.get_driver()
print(driver is same_driver) # True
'''
driver = self.__get_driver_for_channel(self.__get_channel())
if driver is None:
driver = self.new_driver()
return driver
|
Get an already running instance of Webdriver. If there is none, it will create one.
Returns:
Webdriver - Selenium Webdriver instance.
Usage::
driver = WTF_WEBDRIVER_MANAGER.new_driver()
driver.get("http://the-internet.herokuapp.com")
same_driver = WTF_WEBDRIVER_MANAGER.get_driver()
print(driver is same_driver) # True
|
def _child(details):
"""Child
A private function to figure out the child node type
Arguments:
details {dict} -- A dictionary describing a data point
Returns:
_NodeInterface
"""
# If the details are a list
if isinstance(details, list):
# Create a list of options for the key
return OptionsNode(details)
# Else if we got a dictionary
elif isinstance(details, dict):
# If array is present
if '__array__' in details:
return ArrayNode(details)
# Else if we have a hash
elif '__hash__' in details:
return HashNode(details)
# Else if we have a type
elif '__type__' in details:
# If the type is a dictionary or list, this is a complex type
if isinstance(details['__type__'], (dict,list)):
return _child(details['__type__'])
# Else it's just a Node
else:
return Node(details)
# Else it's most likely a parent
else:
return Parent(details)
# Else if we got a string
elif isinstance(details, basestring):
# Use the value as the type
return Node(details)
# Else raise an error
else:
raise TypeError('details')
|
Child
A private function to figure out the child node type
Arguments:
details {dict} -- A dictionary describing a data point
Returns:
_NodeInterface
|
def _get_type(self, policy):
"""
Returns the type of the given policy
:param string or dict policy: Policy data
:return PolicyTypes: Type of the given policy. None, if type could not be inferred
"""
# Must handle intrinsic functions. Policy could be a primitive type or an intrinsic function
# Managed policies are either string or an intrinsic function that resolves to a string
if isinstance(policy, string_types) or is_instrinsic(policy):
return PolicyTypes.MANAGED_POLICY
# Policy statement is a dictionary with the key "Statement" in it
if isinstance(policy, dict) and "Statement" in policy:
return PolicyTypes.POLICY_STATEMENT
# This could be a policy template then.
if self._is_policy_template(policy):
return PolicyTypes.POLICY_TEMPLATE
# Nothing matches. Don't take opinions on how to handle it. Instead just set the appropriate type.
return PolicyTypes.UNKNOWN
|
Returns the type of the given policy
:param string or dict policy: Policy data
:return PolicyTypes: Type of the given policy. None, if type could not be inferred
|
def _do_read_config(self, config_file, pommanipext):
"""Reads config for a single job defined by section."""
parser = InterpolationConfigParser()
dataset = parser.read(config_file)
if config_file not in dataset:
raise IOError("Config file %s not found." % config_file)
if parser.has_option('common','include'):
include = parser.get('common', 'include')
if include is not "":
sections_ = self.read_and_load(include)
for section_ in sections_:
if parser.has_section(section_):
raise DuplicateSectionError( "The config section [%s] is existed in %s and include %s cfg file" % ( section_, config_file, re.split("\\s+", include.strip())[1]))
parser._sections.update(sections_)
pom_manipulator_config = {}
common_section = {}
package_configs = {}
if pommanipext and pommanipext != '' and pommanipext != 'None': #TODO ref: remove none check, it is passed over cmd line in jenkins build
parse_pom_manipulator_ext(pom_manipulator_config, parser, pommanipext)
if not parser.has_section('common'):
logging.error('Mandatory common section missing from configuration file.')
raise NoSectionError('Mandatory common section missing from configuration file.')
common_section['tag'] = parser.get('common', 'tag')
common_section['target'] = parser.get('common', 'target')
common_section['jobprefix'] = parser.get('common', 'jobprefix')
common_section['jobciprefix'] = parser.get('common', 'jobciprefix')
common_section['jobjdk'] = parser.get('common', 'jobjdk')
if parser.has_option('common', 'mvnver'):
common_section['mvnver'] = parser.get('common', 'mvnver')
if parser.has_option('common', 'skiptests'):
common_section['skiptests'] = parser.get('common', 'skiptests')
if parser.has_option('common', 'base'):
common_section['base'] = parser.get('common', 'base')
if parser.has_option('common', 'citemplate'):
common_section['citemplate'] = parser.get('common', 'citemplate')
if parser.has_option('common', 'jenkinstemplate'):
common_section['jenkinstemplate'] = parser.get('common', 'jenkinstemplate')
if parser.has_option('common', 'product_name'):
common_section['product_name'] = parser.get('common', 'product_name')
if parser.has_option('common', 'include'):
common_section['include'] = parser.get('common', 'include')
common_section['jobfailureemail'] = parser.get('common', 'jobfailureemail')
config_dir = utils.get_dir(config_file)
#Jira
if parser.has_option('common', 'shared_config') and parser.get('common', 'shared_config') is not "":
parse_shared_config(common_section, config_dir, parser)
common_section['jobtimeout'] = parser.getint('common', 'jobtimeout')
common_section['options'] = {}
# If the configuration file has global properties insert these into the common properties map.
# These may be overridden later by particular properties.
if parser.has_option('common', 'globalproperties'):
common_section['options']['properties'] = dict(x.strip().split('=') for x in parser.get('common', 'globalproperties').replace(",\n", ",").split(','))
else:
# Always ensure properties has a valid dictionary so code below doesn't need multiple checks.
common_section['options']['properties'] = {}
# The same for global profiles
if parser.has_option('common', 'globalprofiles'):
common_section['options']['profiles'] = [x.strip() for x in parser.get('common', 'globalprofiles').split(',')]
else:
# Always ensure profiles has a valid list so code below doesn't need multiple checks.
common_section['options']['profiles'] = []
if os.path.dirname(config_file):
config_path = os.path.dirname(config_file)
else:
config_path = os.getcwd()
logging.info("Configuration file is %s and path %s", os.path.basename(config_file), config_path)
for section in parser.sections():
config_type = self.read_config_type(parser, section)
if section == 'common' or config_type == "bom-builder-meta":
logging.debug ('Skipping section due to meta-type %s', section)
continue
self._do_read_section(config_path, os.path.basename(config_file), package_configs, parser, section)
return (common_section, package_configs, pom_manipulator_config)
|
Reads config for a single job defined by section.
|
def list_(bank):
'''
Lists entries stored in the specified bank.
'''
redis_server = _get_redis_server()
bank_redis_key = _get_bank_redis_key(bank)
try:
banks = redis_server.smembers(bank_redis_key)
except (RedisConnectionError, RedisResponseError) as rerr:
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
rerr=rerr)
log.error(mesg)
raise SaltCacheError(mesg)
if not banks:
return []
return list(banks)
|
Lists entries stored in the specified bank.
|
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
|
Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
|
async def debug_create_unit(self, unit_spawn_commands: List[List[Union[UnitTypeId, int, Point2, Point3]]]):
""" Usage example (will spawn 1 marine in the center of the map for player ID 1):
await self._client.debug_create_unit([[UnitTypeId.MARINE, 1, self._game_info.map_center, 1]]) """
assert isinstance(unit_spawn_commands, list)
assert unit_spawn_commands
assert isinstance(unit_spawn_commands[0], list)
assert len(unit_spawn_commands[0]) == 4
assert isinstance(unit_spawn_commands[0][0], UnitTypeId)
assert unit_spawn_commands[0][1] > 0 # careful, in realtime=True this function may create more units
assert isinstance(unit_spawn_commands[0][2], (Point2, Point3))
assert 1 <= unit_spawn_commands[0][3] <= 2
await self._execute(
debug=sc_pb.RequestDebug(
debug=[
debug_pb.DebugCommand(
create_unit=debug_pb.DebugCreateUnit(
unit_type=unit_type.value,
owner=owner_id,
pos=common_pb.Point2D(x=position.x, y=position.y),
quantity=amount_of_units,
)
)
for unit_type, amount_of_units, position, owner_id in unit_spawn_commands
]
)
)
|
Usage example (will spawn 1 marine in the center of the map for player ID 1):
await self._client.debug_create_unit([[UnitTypeId.MARINE, 1, self._game_info.map_center, 1]])
|
def get_xml_type(val):
"""Returns the data type for the xml type attribute"""
if type(val).__name__ in ('str', 'unicode'):
return 'str'
if type(val).__name__ in ('int', 'long'):
return 'int'
if type(val).__name__ == 'float':
return 'float'
if type(val).__name__ == 'bool':
return 'bool'
if isinstance(val, numbers.Number):
return 'number'
if type(val).__name__ == 'NoneType':
return 'null'
if isinstance(val, dict):
return 'dict'
if isinstance(val, collections.Iterable):
return 'list'
return type(val).__name__
|
Returns the data type for the xml type attribute
|
def partition(pred, iterable):
"""Partition an iterable.
Arguments
---------
pred : function
A function that takes an element of the iterable and returns
a boolen indicating to which partition it belongs
iterable : iterable
Returns
-------
A two-tuple of lists with the first list containing the elements on which
the predicate indicated False and the second list containing the elements
on which the predicate indicated True.
Note that, unlike the recipe which returns generators, this version
returns lists.
"""
pos, neg = [], []
pos_append, neg_append = pos.append, neg.append
for elem in iterable:
if pred(elem):
pos_append(elem)
else:
neg_append(elem)
return neg, pos
|
Partition an iterable.
Arguments
---------
pred : function
A function that takes an element of the iterable and returns
a boolen indicating to which partition it belongs
iterable : iterable
Returns
-------
A two-tuple of lists with the first list containing the elements on which
the predicate indicated False and the second list containing the elements
on which the predicate indicated True.
Note that, unlike the recipe which returns generators, this version
returns lists.
|
def _push_tag_buffer(self, data):
"""Write a pending tag attribute from *data* to the stack."""
if data.context & data.CX_QUOTED:
self._emit_first(tokens.TagAttrQuote(char=data.quoter))
self._emit_all(self._pop())
buf = data.padding_buffer
self._emit_first(tokens.TagAttrStart(
pad_first=buf["first"], pad_before_eq=buf["before_eq"],
pad_after_eq=buf["after_eq"]))
self._emit_all(self._pop())
for key in data.padding_buffer:
data.padding_buffer[key] = ""
|
Write a pending tag attribute from *data* to the stack.
|
def calcinds(data, threshold, ignoret=None):
""" Find indexes for data above (or below) given threshold. """
inds = []
for i in range(len(data['time'])):
snr = data['snrs'][i]
time = data['time'][i]
if (threshold >= 0 and snr > threshold):
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
elif threshold < 0 and snr < threshold:
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
return inds
|
Find indexes for data above (or below) given threshold.
|
def activities(self, name=None, pk=None, scope=None, **kwargs):
# type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Activity]
"""Search for activities with optional name, pk and scope filter.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param pk: id (primary key) of the activity to retrieve
:type pk: basestring or None
:param name: filter the activities by name
:type name: basestring or None
:param scope: filter by scope id
:type scope: basestring or None
:return: list of :class:`models.Activity`
:raises NotFoundError: If no `Activities` are found
"""
request_params = {
'id': pk,
'name': name,
'scope': scope
}
# update the fields query params
# for 'kechain.core.wim >= 2.0.0' add additional API params
if self.match_app_version(label='wim', version='>=2.0.0', default=False):
request_params.update(API_EXTRA_PARAMS['activity'])
if kwargs:
request_params.update(**kwargs)
response = self._request('GET', self._build_url('activities'), params=request_params)
if response.status_code != requests.codes.ok: # pragma: no cover
raise NotFoundError("Could not retrieve activities. Server responded with {}".format(str(response)))
data = response.json()
# for 'kechain.core.wim >= 2.0.0' we return Activity2, otherwise Activity1
if self.match_app_version(label='wim', version='<2.0.0', default=True):
# WIM1
return [Activity(a, client=self) for a in data['results']]
else:
# WIM2
return [Activity2(a, client=self) for a in data['results']]
|
Search for activities with optional name, pk and scope filter.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param pk: id (primary key) of the activity to retrieve
:type pk: basestring or None
:param name: filter the activities by name
:type name: basestring or None
:param scope: filter by scope id
:type scope: basestring or None
:return: list of :class:`models.Activity`
:raises NotFoundError: If no `Activities` are found
|
def drop(self, index=None, columns=None):
"""Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
"""
if self._is_transposed:
return self.transpose().drop(index=columns, columns=index).transpose()
if index is None:
new_data = self.data
new_index = self.index
else:
def delitem(df, internal_indices=[]):
return df.drop(index=df.index[internal_indices])
numeric_indices = list(self.index.get_indexer_for(index))
new_data = self.data.apply_func_to_select_indices(
1, delitem, numeric_indices, keep_remaining=True
)
# We can't use self.index.drop with duplicate keys because in Pandas
# it throws an error.
new_index = self.index[~self.index.isin(index)]
if columns is None:
new_columns = self.columns
new_dtypes = self.dtypes
else:
def delitem(df, internal_indices=[]):
return df.drop(columns=df.columns[internal_indices])
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = new_data.apply_func_to_select_indices(
0, delitem, numeric_indices, keep_remaining=True
)
new_columns = self.columns[~self.columns.isin(columns)]
new_dtypes = self.dtypes.drop(columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new QueryCompiler.
|
def decrypt(self, ciphertext, encoder=encoding.RawEncoder):
"""
Decrypts the ciphertext using the ephemeral public key enclosed
in the ciphertext and the SealedBox private key, returning
the plaintext message.
:param ciphertext: [:class:`bytes`] The encrypted message to decrypt
:param encoder: The encoder used to decode the ciphertext.
:return bytes: The original plaintext
"""
# Decode our ciphertext
ciphertext = encoder.decode(ciphertext)
plaintext = nacl.bindings.crypto_box_seal_open(
ciphertext,
self._public_key,
self._private_key,
)
return plaintext
|
Decrypts the ciphertext using the ephemeral public key enclosed
in the ciphertext and the SealedBox private key, returning
the plaintext message.
:param ciphertext: [:class:`bytes`] The encrypted message to decrypt
:param encoder: The encoder used to decode the ciphertext.
:return bytes: The original plaintext
|
def _save(self):
"""Saves the current state of this AssessmentSection to database.
Should be called every time the question map changes.
"""
collection = JSONClientValidated('assessment',
collection='AssessmentSection',
runtime=self._runtime)
if '_id' in self._my_map: # This is the first time:
collection.save(self._my_map)
else:
insert_result = collection.insert_one(self._my_map)
self._my_map = collection.find_one({'_id': insert_result.inserted_id})
|
Saves the current state of this AssessmentSection to database.
Should be called every time the question map changes.
|
def init_instance(self, key):
"""
Create an empty instance if it doesn't exist.
If the instance already exists, this is a noop.
"""
with self._mor_lock:
if key not in self._mor:
self._mor[key] = {}
|
Create an empty instance if it doesn't exist.
If the instance already exists, this is a noop.
|
def _find_longest_parent_path(path_set, path):
"""Finds the longest "parent-path" of 'path' in 'path_set'.
This function takes and returns "path-like" strings which are strings
made of strings separated by os.sep. No file access is performed here, so
these strings need not correspond to actual files in some file-system..
This function returns the longest ancestor path
For example, for path_set=["/foo/bar", "/foo", "/bar/foo"] and
path="/foo/bar/sub_dir", returns "/foo/bar".
Args:
path_set: set of path-like strings -- e.g. a list of strings separated by
os.sep. No actual disk-access is performed here, so these need not
correspond to actual files.
path: a path-like string.
Returns:
The element in path_set which is the longest parent directory of 'path'.
"""
# This could likely be more efficiently implemented with a trie
# data-structure, but we don't want to add an extra dependency for that.
while path not in path_set:
if not path:
return None
path = os.path.dirname(path)
return path
|
Finds the longest "parent-path" of 'path' in 'path_set'.
This function takes and returns "path-like" strings which are strings
made of strings separated by os.sep. No file access is performed here, so
these strings need not correspond to actual files in some file-system..
This function returns the longest ancestor path
For example, for path_set=["/foo/bar", "/foo", "/bar/foo"] and
path="/foo/bar/sub_dir", returns "/foo/bar".
Args:
path_set: set of path-like strings -- e.g. a list of strings separated by
os.sep. No actual disk-access is performed here, so these need not
correspond to actual files.
path: a path-like string.
Returns:
The element in path_set which is the longest parent directory of 'path'.
|
def load(self, value):
""" enforce env > value when loading from file """
self.reset(
value,
validator=self.__dict__.get('validator'),
env=self.__dict__.get('env'),
)
|
enforce env > value when loading from file
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.