code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def set_meta_refresh_enabled(self, enabled):
"""
*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.
"""
warn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0. Set "
"Cluster.schema_metadata_enabled and Cluster.token_metadata_enabled instead.", DeprecationWarning)
self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled
|
*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.
|
def pivot_wavelength_ee(bpass):
"""Compute pivot wavelength assuming equal-energy convention.
`bpass` should have two properties, `resp` and `wlen`. The units of `wlen`
can be anything, and `resp` need not be normalized in any particular way.
"""
from scipy.integrate import simps
return np.sqrt(simps(bpass.resp, bpass.wlen) /
simps(bpass.resp / bpass.wlen**2, bpass.wlen))
|
Compute pivot wavelength assuming equal-energy convention.
`bpass` should have two properties, `resp` and `wlen`. The units of `wlen`
can be anything, and `resp` need not be normalized in any particular way.
|
def subdivide(self):
r"""Split the surface into four sub-surfaces.
Does so by taking the unit triangle (i.e. the domain
of the surface) and splitting it into four sub-triangles
.. image:: ../../images/surface_subdivide1.png
:align: center
Then the surface is re-parameterized via the map to / from the
given sub-triangles and the unit triangle.
For example, when a degree two surface is subdivided:
.. image:: ../../images/surface_subdivide2.png
:align: center
.. doctest:: surface-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-1.0, 0.5, 2.0, 0.25, 2.0, 0.0],
... [ 0.0, 0.5, 0.0, 1.75, 3.0, 4.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> _, sub_surface_b, _, _ = surface.subdivide()
>>> sub_surface_b
<Surface (degree=2, dimension=2)>
>>> sub_surface_b.nodes
array([[ 1.5 , 0.6875, -0.125 , 1.1875, 0.4375, 0.5 ],
[ 2.5 , 2.3125, 1.875 , 1.3125, 1.3125, 0.25 ]])
.. testcleanup:: surface-subdivide
import make_images
make_images.surface_subdivide1()
make_images.surface_subdivide2(surface, sub_surface_b)
Returns:
Tuple[Surface, Surface, Surface, Surface]: The lower left, central,
lower right and upper left sub-surfaces (in that order).
"""
nodes_a, nodes_b, nodes_c, nodes_d = _surface_helpers.subdivide_nodes(
self._nodes, self._degree
)
return (
Surface(nodes_a, self._degree, _copy=False),
Surface(nodes_b, self._degree, _copy=False),
Surface(nodes_c, self._degree, _copy=False),
Surface(nodes_d, self._degree, _copy=False),
)
|
r"""Split the surface into four sub-surfaces.
Does so by taking the unit triangle (i.e. the domain
of the surface) and splitting it into four sub-triangles
.. image:: ../../images/surface_subdivide1.png
:align: center
Then the surface is re-parameterized via the map to / from the
given sub-triangles and the unit triangle.
For example, when a degree two surface is subdivided:
.. image:: ../../images/surface_subdivide2.png
:align: center
.. doctest:: surface-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-1.0, 0.5, 2.0, 0.25, 2.0, 0.0],
... [ 0.0, 0.5, 0.0, 1.75, 3.0, 4.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> _, sub_surface_b, _, _ = surface.subdivide()
>>> sub_surface_b
<Surface (degree=2, dimension=2)>
>>> sub_surface_b.nodes
array([[ 1.5 , 0.6875, -0.125 , 1.1875, 0.4375, 0.5 ],
[ 2.5 , 2.3125, 1.875 , 1.3125, 1.3125, 0.25 ]])
.. testcleanup:: surface-subdivide
import make_images
make_images.surface_subdivide1()
make_images.surface_subdivide2(surface, sub_surface_b)
Returns:
Tuple[Surface, Surface, Surface, Surface]: The lower left, central,
lower right and upper left sub-surfaces (in that order).
|
def uv_to_color(uv, image):
"""
Get the color in a texture image.
Parameters
-------------
uv : (n, 2) float
UV coordinates on texture image
image : PIL.Image
Texture image
Returns
----------
colors : (n, 4) float
RGBA color at each of the UV coordinates
"""
if image is None or uv is None:
return None
# UV coordinates should be (n, 2) float
uv = np.asanyarray(uv, dtype=np.float64)
# get texture image pixel positions of UV coordinates
x = (uv[:, 0] * (image.width - 1))
y = ((1 - uv[:, 1]) * (image.height - 1))
# convert to int and wrap to image
# size in the manner of GL_REPEAT
x = x.round().astype(np.int64) % image.width
y = y.round().astype(np.int64) % image.height
# access colors from pixel locations
# make sure image is RGBA before getting values
colors = np.asanyarray(image.convert('RGBA'))[y, x]
# conversion to RGBA should have corrected shape
assert colors.ndim == 2 and colors.shape[1] == 4
return colors
|
Get the color in a texture image.
Parameters
-------------
uv : (n, 2) float
UV coordinates on texture image
image : PIL.Image
Texture image
Returns
----------
colors : (n, 4) float
RGBA color at each of the UV coordinates
|
def coords_to_vec(lon, lat):
""" Converts longitute and latitude coordinates to a unit 3-vector
return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines
"""
phi = np.radians(lon)
theta = (np.pi / 2) - np.radians(lat)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xVals = sin_t * np.cos(phi)
yVals = sin_t * np.sin(phi)
zVals = cos_t
# Stack them into the output array
out = np.vstack((xVals, yVals, zVals)).swapaxes(0, 1)
return out
|
Converts longitute and latitude coordinates to a unit 3-vector
return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines
|
def urls_old(self, protocol=Resource.Protocol.http):
'''
Iterate through all resources registered with this router
and create a list endpoint and a detail endpoint for each one.
Uses the router name as prefix and endpoint name of the resource when registered, to assemble the url pattern.
Uses the constructor-passed url method or class for generating urls
'''
url_patterns = []
for endpoint, resource_class in self._registry.items():
setattr(resource_class, 'api_name', self.name)
setattr(resource_class, 'resource_name', endpoint)
# append any nested resources the resource may have
nested = []
for route in resource_class.nested_routes('/%s/%s/' % (self.name, endpoint)):
route = route._replace(handler=resource_class.wrap_handler(route.handler, protocol))
nested.append(route)
url_patterns.extend(nested)
# append resource as list
url_patterns.append(Route(
path='/%s/%s/' % (self.name, endpoint),
handler=resource_class.as_list(protocol),
methods=resource_class.route_methods(),
name='{}_{}_list'.format(self.name, endpoint).replace('/', '_')
))
# append resource as detail
url_patterns.append(Route(
path='/%s/%s/%s/' % (self.name, endpoint, resource_class.route_param('pk')),
handler=resource_class.as_detail(protocol),
methods=resource_class.route_methods(),
name='{}_{}_detail'.format(self.name, endpoint).replace('/', '_')
))
return url_patterns
|
Iterate through all resources registered with this router
and create a list endpoint and a detail endpoint for each one.
Uses the router name as prefix and endpoint name of the resource when registered, to assemble the url pattern.
Uses the constructor-passed url method or class for generating urls
|
def write_to_buffer(self, buf):
"""Save the context to a buffer."""
doc = self.to_dict()
if config.rxt_as_yaml:
content = dump_yaml(doc)
else:
content = json.dumps(doc, indent=4, separators=(",", ": "))
buf.write(content)
|
Save the context to a buffer.
|
def filter_entities(self, model, context=None):
"""
Filter entities
Runs filters on entity properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None
"""
if model is None:
return
for property_name in self.entities:
prop = self.entities[property_name]
value = self.get(model, property_name)
filtered_value = prop.filter(
value=value,
model=model,
context=context
)
if value != filtered_value: # unless changed!
self.set(model, property_name, filtered_value)
prop.filter_with_schema(
model=value,
context=context
)
|
Filter entities
Runs filters on entity properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None
|
def get_common_properties(root):
"""Read common properties from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with common properties
"""
properties = {}
for elem in root.iterfind('commonProperties/property'):
name = elem.attrib['name']
if name == 'initial composition':
properties['composition'] = {'species': [], 'kind': None}
for child in elem.iter('component'):
spec = {}
spec['species-name'] = child.find('speciesLink').attrib['preferredKey']
units = child.find('amount').attrib['units']
# use InChI for unique species identifier (if present)
try:
spec['InChI'] = child.find('speciesLink').attrib['InChI']
except KeyError:
# TODO: add InChI validator/search
warn('Missing InChI for species ' + spec['species-name'])
pass
# If mole or mass fraction, just set value
if units in ['mole fraction', 'mass fraction', 'mole percent']:
spec['amount'] = [float(child.find('amount').text)]
elif units == 'percent':
# assume this means mole percent
warn('Assuming percent in composition means mole percent')
spec['amount'] = [float(child.find('amount').text)]
units = 'mole percent'
elif units == 'ppm':
# assume molar ppm, convert to mole fraction
warn('Assuming molar ppm in composition and converting to mole fraction')
spec['amount'] = [float(child.find('amount').text) * 1.e-6]
units = 'mole fraction'
elif units == 'ppb':
# assume molar ppb, convert to mole fraction
warn('Assuming molar ppb in composition and converting to mole fraction')
spec['amount'] = [float(child.find('amount').text) * 1.e-9]
units = 'mole fraction'
else:
raise KeywordError('Composition units need to be one of: mole fraction, '
'mass fraction, mole percent, percent, ppm, or ppb.'
)
properties['composition']['species'].append(spec)
# check consistency of composition type
if properties['composition']['kind'] is None:
properties['composition']['kind'] = units
elif properties['composition']['kind'] != units:
raise KeywordError('composition units ' + units +
' not consistent with ' +
properties['composition']['kind']
)
elif name in datagroup_properties:
field = name.replace(' ', '-')
units = elem.attrib['units']
if units == 'Torr':
units = 'torr'
quantity = 1.0 * unit_registry(units)
try:
quantity.to(property_units[field])
except pint.DimensionalityError:
raise KeywordError('units incompatible for property ' + name)
properties[field] = [' '.join([elem.find('value').text, units])]
else:
raise KeywordError('Property ' + name + ' not supported as common property')
return properties
|
Read common properties from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with common properties
|
def model_fn(hparams, seed):
"""Create a Keras model with the given hyperparameters.
Args:
hparams: A dict mapping hyperparameters in `HPARAMS` to values.
seed: A hashable object to be used as a random seed (e.g., to
construct dropout layers in the model).
Returns:
A compiled Keras model.
"""
rng = random.Random(seed)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(INPUT_SHAPE))
model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,))) # grayscale channel
# Add convolutional layers.
conv_filters = 8
for _ in xrange(hparams[HP_CONV_LAYERS]):
model.add(tf.keras.layers.Conv2D(
filters=conv_filters,
kernel_size=hparams[HP_CONV_KERNEL_SIZE],
padding="same",
activation="relu",
))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same"))
conv_filters *= 2
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))
# Add fully connected layers.
dense_neurons = 32
for _ in xrange(hparams[HP_DENSE_LAYERS]):
model.add(tf.keras.layers.Dense(dense_neurons, activation="relu"))
dense_neurons *= 2
# Add the final output layer.
model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=hparams[HP_OPTIMIZER],
metrics=["accuracy"],
)
return model
|
Create a Keras model with the given hyperparameters.
Args:
hparams: A dict mapping hyperparameters in `HPARAMS` to values.
seed: A hashable object to be used as a random seed (e.g., to
construct dropout layers in the model).
Returns:
A compiled Keras model.
|
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
"""
try:
model = get_model(app_label, model_name)
except LookupError:
model = None
if model is None:
raise http.Http404("App %r, model %r, not found." %
(app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered "
"with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
|
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
|
def OnChar(self, event):
""" on Character event"""
key = event.GetKeyCode()
entry = wx.TextCtrl.GetValue(self).strip()
pos = wx.TextCtrl.GetSelection(self)
# really, the order here is important:
# 1. return sends to ValidateEntry
if key == wx.WXK_RETURN:
if not self.is_valid:
wx.TextCtrl.SetValue(self, self.format % set_float(self.__bound_val))
else:
self.SetValue(entry)
return
# 2. other non-text characters are passed without change
if (key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255):
event.Skip()
return
# 3. check for multiple '.' and out of place '-' signs and ignore these
# note that chr(key) will now work due to return at #2
has_minus = '-' in entry
ckey = chr(key)
if ((ckey == '.' and (self.__prec == 0 or '.' in entry) ) or
(ckey == '-' and (has_minus or pos[0] != 0)) or
(ckey != '-' and has_minus and pos[0] == 0)):
return
# 4. allow digits, but not other characters
if chr(key) in self.__digits:
event.Skip()
|
on Character event
|
def get_docargs(self, args=None, prt=None):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
# docargs = self.objdoc.get_docargs(args, exp_letters=set(['o', 't', 'p', 'c']))
docargs = self.objdoc.get_docargs(args, prt)
self._chk_docopts(docargs)
return docargs
|
Pare down docopt. Return a minimal dictionary and a set containing runtime arg values.
|
def clean_virtualenv(self):
"""
Empty our virtualenv so that new (or older) dependencies may be
installed
"""
self.user_run_script(
script=scripts.get_script_path('clean_virtualenv.sh'),
args=[],
rw_venv=True,
)
|
Empty our virtualenv so that new (or older) dependencies may be
installed
|
def get_session(account_info):
"""Get a boto3 sesssion potentially cross account sts assumed
assumed sessions are automatically refreshed.
"""
s = getattr(CONN_CACHE, '%s-session' % account_info['name'], None)
if s is not None:
return s
if account_info.get('role'):
s = assumed_session(account_info['role'], SESSION_NAME)
else:
s = boto3.Session()
setattr(CONN_CACHE, '%s-session' % account_info['name'], s)
return s
|
Get a boto3 sesssion potentially cross account sts assumed
assumed sessions are automatically refreshed.
|
def fitSphere(coords):
"""
Fits a sphere to a set of points.
Extra info is stored in ``actor.info['radius']``, ``actor.info['center']``, ``actor.info['residue']``.
.. hint:: Example: |fitspheres1.py|_
|fitspheres2| |fitspheres2.py|_
"""
coords = np.array(coords)
n = len(coords)
A = np.zeros((n, 4))
A[:, :-1] = coords * 2
A[:, 3] = 1
f = np.zeros((n, 1))
x = coords[:, 0]
y = coords[:, 1]
z = coords[:, 2]
f[:, 0] = x * x + y * y + z * z
C, residue, rank, sv = np.linalg.lstsq(A, f) # solve AC=f
if rank < 4:
return None
t = (C[0] * C[0]) + (C[1] * C[1]) + (C[2] * C[2]) + C[3]
radius = np.sqrt(t)[0]
center = np.array([C[0][0], C[1][0], C[2][0]])
if len(residue):
residue = np.sqrt(residue[0]) / n
else:
residue = 0
s = vs.Sphere(center, radius, c="r", alpha=1).wire(1)
s.info["radius"] = radius
s.info["center"] = center
s.info["residue"] = residue
return s
|
Fits a sphere to a set of points.
Extra info is stored in ``actor.info['radius']``, ``actor.info['center']``, ``actor.info['residue']``.
.. hint:: Example: |fitspheres1.py|_
|fitspheres2| |fitspheres2.py|_
|
def setup():
"""
Install handlers for Mitogen loggers to redirect them into the Ansible
display framework. Ansible installs its own logging framework handlers when
C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers.
"""
l_mitogen = logging.getLogger('mitogen')
l_mitogen_io = logging.getLogger('mitogen.io')
l_ansible_mitogen = logging.getLogger('ansible_mitogen')
for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen:
logger.handlers = [Handler(display.vvv)]
logger.propagate = False
if display.verbosity > 2:
l_ansible_mitogen.setLevel(logging.DEBUG)
l_mitogen.setLevel(logging.DEBUG)
else:
# Mitogen copies the active log level into new children, allowing them
# to filter tiny messages before they hit the network, and therefore
# before they wake the IO loop. Explicitly setting INFO saves ~4%
# running against just the local machine.
l_mitogen.setLevel(logging.ERROR)
l_ansible_mitogen.setLevel(logging.ERROR)
if display.verbosity > 3:
l_mitogen_io.setLevel(logging.DEBUG)
|
Install handlers for Mitogen loggers to redirect them into the Ansible
display framework. Ansible installs its own logging framework handlers when
C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers.
|
def export_saved_model(sess, export_dir, tag_set, signatures):
"""Convenience function to export a saved_model using provided arguments
The caller specifies the saved_model signatures in a simplified python dictionary form, as follows::
signatures = {
'signature_def_key': {
'inputs': { 'input_tensor_alias': input_tensor_name },
'outputs': { 'output_tensor_alias': output_tensor_name },
'method_name': 'method'
}
}
And this function will generate the `signature_def_map` and export the saved_model.
Args:
:sess: a tf.Session instance
:export_dir: path to save exported saved_model
:tag_set: string tag_set to identify the exported graph
:signatures: simplified dictionary representation of a TensorFlow signature_def_map
Returns:
A saved_model exported to disk at ``export_dir``.
"""
import tensorflow as tf
g = sess.graph
g._unsafe_unfinalize() # https://github.com/tensorflow/serving/issues/363
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
logging.info("===== signatures: {}".format(signatures))
signature_def_map = {}
for key, sig in signatures.items():
signature_def_map[key] = tf.saved_model.signature_def_utils.build_signature_def(
inputs={name: tf.saved_model.utils.build_tensor_info(tensor) for name, tensor in sig['inputs'].items()},
outputs={name: tf.saved_model.utils.build_tensor_info(tensor) for name, tensor in sig['outputs'].items()},
method_name=sig['method_name'] if 'method_name' in sig else key)
logging.info("===== signature_def_map: {}".format(signature_def_map))
builder.add_meta_graph_and_variables(
sess,
tag_set.split(','),
signature_def_map=signature_def_map,
clear_devices=True)
g.finalize()
builder.save()
|
Convenience function to export a saved_model using provided arguments
The caller specifies the saved_model signatures in a simplified python dictionary form, as follows::
signatures = {
'signature_def_key': {
'inputs': { 'input_tensor_alias': input_tensor_name },
'outputs': { 'output_tensor_alias': output_tensor_name },
'method_name': 'method'
}
}
And this function will generate the `signature_def_map` and export the saved_model.
Args:
:sess: a tf.Session instance
:export_dir: path to save exported saved_model
:tag_set: string tag_set to identify the exported graph
:signatures: simplified dictionary representation of a TensorFlow signature_def_map
Returns:
A saved_model exported to disk at ``export_dir``.
|
def worker_wrapper(worker_instance, pid_path):
"""
A wrapper to start RQ worker as a new process.
:param worker_instance: RQ's worker instance
:param pid_path: A file to check if the worker
is running or not
"""
def exit_handler(*args):
"""
Remove pid file on exit
"""
if len(args) > 0:
print("Exit py signal {signal}".format(signal=args[0]))
remove(pid_path)
atexit.register(exit_handler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
worker_instance.work()
# Remove pid file if the process can not catch signals
exit_handler(2)
|
A wrapper to start RQ worker as a new process.
:param worker_instance: RQ's worker instance
:param pid_path: A file to check if the worker
is running or not
|
def require_instance(obj, types=None, name=None, type_name=None, truncate_at=80):
"""
Raise an exception if obj is not an instance of one of the specified types.
Similarly to isinstance, 'types' may be either a single type or a tuple of
types.
If name or type_name is provided, it is used in the exception message.
The object's string representation is also included in the message,
truncated to 'truncate_at' number of characters.
"""
if not isinstance(obj, types):
obj_string = str(obj)
if len(obj_string) > truncate_at:
obj_string = obj_string[:truncate_at - 3] + "..."
if type_name is None:
try:
type_name = "one of " + ", ".join(str(t) for t in types)
except TypeError:
type_name = str(types)
name_string = ("%s: " % name) if name else ""
error_message = "%sexpected %s. Got: '%s' of type '%s'" % (
name_string, type_name, obj_string, type(obj))
raise TypeError(error_message)
|
Raise an exception if obj is not an instance of one of the specified types.
Similarly to isinstance, 'types' may be either a single type or a tuple of
types.
If name or type_name is provided, it is used in the exception message.
The object's string representation is also included in the message,
truncated to 'truncate_at' number of characters.
|
def hsv_to_rgb(hsv):
"""Converts a tuple of hue, saturation, value to a tuple of red, green blue.
Hue should be an angle from 0.0 to 359.0. Saturation and value should be a
value from 0.0 to 1.0, where saturation controls the intensity of the hue and
value controls the brightness.
"""
# Algorithm adapted from http://www.cs.rit.edu/~ncs/color/t_convert.html
h, s, v = hsv
if s == 0:
return (v, v, v)
h /= 60.0
i = math.floor(h)
f = h-i
p = v*(1.0-s)
q = v*(1.0-s*f)
t = v*(1.0-s*(1.0-f))
if i == 0:
return (v, t, p)
elif i == 1:
return (q, v, p)
elif i == 2:
return (p, v, t)
elif i == 3:
return (p, q, v)
elif i == 4:
return (t, p, v)
else:
return (v, p, q)
|
Converts a tuple of hue, saturation, value to a tuple of red, green blue.
Hue should be an angle from 0.0 to 359.0. Saturation and value should be a
value from 0.0 to 1.0, where saturation controls the intensity of the hue and
value controls the brightness.
|
def eigen(X, P, NSIG=None, method='music', threshold=None, NFFT=default_NFFT,
criteria='aic', verbose=False):
r"""Pseudo spectrum using eigenvector method (EV or Music)
This function computes either the Music or EigenValue (EV) noise
subspace frequency estimator.
First, an autocorrelation matrix of order `P` is computed from
the data. Second, this matrix is separated into vector subspaces,
one a signal subspace and the other a noise
subspace using a SVD method to obtain the eigen values and vectors.
From the eigen values :math:`\lambda_i`, and eigen vectors :math:`v_k`,
the **pseudo spectrum** (see note below) is computed as follows:
.. math:: P_{ev}(f) = \frac{1}{e^H(f)\left(\sum\limits_{k=M+1}^{p} \frac{1}{\lambda_k}v_kv_k^H\right)e(f)}
The separation of the noise and signal subspaces requires expertise
of the signal. However, AIC and MDL criteria may be used to automatically
perform this task.
You still need to provide the parameter `P` to indicate the maximum number
of eigen values to be computed. The criteria will just select a subset
to estimate the pseudo spectrum (see :func:`~spectrum.criteria.aic_eigen`
and :func:`~spectrum.criteria.mdl_eigen` for details.
.. note:: **pseudo spectrum**. func:`eigen` does not compute a PSD estimate.
Indeed, the method does not preserve the measured process power.
:param X: Array data samples
:param int P: maximum number of eigen values to compute. NSIG (if
specified) must therefore be less than P.
:param str method: 'music' or 'ev'.
:param int NSIG: If specified, the signal sub space uses NSIG eigen values.
:param float threshold: If specified, the signal sub space is made of the
eigen values larger than :math:`\rm{threshold} \times \lambda_{min}`,
where :math:`\lambda_{min}` is the minimum eigen values.
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:return:
* PSD: Array of real frequency estimator values (two sided for
complex data and one sided for real data)
* S, the eigen values
.. plot::
:width: 80%
:include-source:
from spectrum import eigen, marple_data
from pylab import plot, log10, linspace, legend, axis
psd, ev = eigen(marple_data, 15, NSIG=11)
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)), label='User defined')
psd, ev = eigen(marple_data, 15, threshold=2)
plot(f, 10 * log10(psd/max(psd)), label='threshold method (100)')
psd, ev = eigen(marple_data, 15)
plot(f, 10 * log10(psd/max(psd)), label='AIC method (8)')
legend()
axis([-0.5, 0.5, -120, 0])
.. seealso::
:func:`pev`,
:func:`pmusic`,
:func:`~spectrum.criteria.aic_eigen`
:References: [Marple]_, Chap 13
.. todo:: for developers:
* what should be the second argument of the criteria N, N-P, P...?
* what should be the max value of NP
"""
if method not in ['music', 'ev']:
raise ValueError("method must be 'music' or 'ev'")
if NSIG != None and threshold != None:
raise ValueError("NSIG and threshold cannot be provided together")
if NSIG is not None:
if NSIG < 0:
raise ValueError('NSIG must be positive')
if NSIG >= P:
raise ValueError("NSIG must be stricly less than IP")
#
N = len(X)
NP = N - P
assert 2 * NP > P-1, 'decrease the second argument'
if NP > 100:
NP = 100
FB = np.zeros((2*NP, P), dtype=complex)
#FB = numpy.zeros((MAXU, IP), dtype=complex)
Z = np.zeros(NFFT, dtype=complex)
PSD = np.zeros(NFFT)
# These loops can surely be replaced by a function that create such matrix
for I in range(0, NP):
for K in range(0, P):
FB[I, K] = X[I-K+P-1]
FB[I+NP, K] = X[I+K+1].conjugate()
# This commented line produces the correct FB, as the 2 for loops above
# It is more elegant but slower...corrmtx needs to be optimised (20/4/11)
#FB2 = spectrum.linalg.corrmtx(X, P-1, method='modified')
#Compute the eigen values / vectors
_U, S, V = svd (FB)
# U and V are not the same as in Marple. Real or Imaginary absolute values
# are correct but signs are not. This is wierd because the svd function
# gives the same result as cvsd in Marple. Is FB correct ? it seems so.
# The following operation has to be done. Otherwise, the resulting PSD is
# not corect
V = -V.transpose()
NSIG = _get_signal_space(S, 2*NP,
verbose=verbose, threshold=threshold,
NSIG=NSIG, criteria=criteria)
#C AI or Expert Knowledge to choose "signal" singular values, or input
#C NSIG at this point
for I in range(NSIG, P):
Z[0:P] = V[0:P, I]
Z[P:NFFT] = 0
Z = fft(Z, NFFT)
if method == 'music':
PSD = PSD + abs(Z)**2.
elif method == 'ev' :
PSD = PSD + abs(Z)**2. / S[I]
PSD = 1./PSD
# for some reasons, we need to rearrange the output. this is related to
# the way U and V are order in the routine svd
nby2 = int(NFFT/2)
#return PSD, S
newpsd = np.append(PSD[nby2:0:-1], PSD[nby2*2-1:nby2-1:-1])
return newpsd, S
|
r"""Pseudo spectrum using eigenvector method (EV or Music)
This function computes either the Music or EigenValue (EV) noise
subspace frequency estimator.
First, an autocorrelation matrix of order `P` is computed from
the data. Second, this matrix is separated into vector subspaces,
one a signal subspace and the other a noise
subspace using a SVD method to obtain the eigen values and vectors.
From the eigen values :math:`\lambda_i`, and eigen vectors :math:`v_k`,
the **pseudo spectrum** (see note below) is computed as follows:
.. math:: P_{ev}(f) = \frac{1}{e^H(f)\left(\sum\limits_{k=M+1}^{p} \frac{1}{\lambda_k}v_kv_k^H\right)e(f)}
The separation of the noise and signal subspaces requires expertise
of the signal. However, AIC and MDL criteria may be used to automatically
perform this task.
You still need to provide the parameter `P` to indicate the maximum number
of eigen values to be computed. The criteria will just select a subset
to estimate the pseudo spectrum (see :func:`~spectrum.criteria.aic_eigen`
and :func:`~spectrum.criteria.mdl_eigen` for details.
.. note:: **pseudo spectrum**. func:`eigen` does not compute a PSD estimate.
Indeed, the method does not preserve the measured process power.
:param X: Array data samples
:param int P: maximum number of eigen values to compute. NSIG (if
specified) must therefore be less than P.
:param str method: 'music' or 'ev'.
:param int NSIG: If specified, the signal sub space uses NSIG eigen values.
:param float threshold: If specified, the signal sub space is made of the
eigen values larger than :math:`\rm{threshold} \times \lambda_{min}`,
where :math:`\lambda_{min}` is the minimum eigen values.
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:return:
* PSD: Array of real frequency estimator values (two sided for
complex data and one sided for real data)
* S, the eigen values
.. plot::
:width: 80%
:include-source:
from spectrum import eigen, marple_data
from pylab import plot, log10, linspace, legend, axis
psd, ev = eigen(marple_data, 15, NSIG=11)
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)), label='User defined')
psd, ev = eigen(marple_data, 15, threshold=2)
plot(f, 10 * log10(psd/max(psd)), label='threshold method (100)')
psd, ev = eigen(marple_data, 15)
plot(f, 10 * log10(psd/max(psd)), label='AIC method (8)')
legend()
axis([-0.5, 0.5, -120, 0])
.. seealso::
:func:`pev`,
:func:`pmusic`,
:func:`~spectrum.criteria.aic_eigen`
:References: [Marple]_, Chap 13
.. todo:: for developers:
* what should be the second argument of the criteria N, N-P, P...?
* what should be the max value of NP
|
def needs_refresh(self, source):
"""Has the (persisted) source expired in the store
Will return True if the source is not in the store at all, if it's
TTL is set to None, or if more seconds have passed than the TTL.
"""
now = time.time()
if source._tok in self:
s0 = self[source._tok]
if self[source._tok].metadata.get('ttl', None):
then = s0.metadata['timestamp']
if s0.metadata['ttl'] < then - now:
return True
return False
return True
|
Has the (persisted) source expired in the store
Will return True if the source is not in the store at all, if it's
TTL is set to None, or if more seconds have passed than the TTL.
|
def build(self, corpus, state_size):
"""
Build a Python representation of the Markov model. Returns a dict
of dicts where the keys of the outer dict represent all possible states,
and point to the inner dicts. The inner dicts represent all possibilities
for the "next" item in the chain, along with the count of times it
appears.
"""
# Using a DefaultDict here would be a lot more convenient, however the memory
# usage is far higher.
model = {}
for run in corpus:
items = ([ BEGIN ] * state_size) + run + [ END ]
for i in range(len(run) + 1):
state = tuple(items[i:i+state_size])
follow = items[i+state_size]
if state not in model:
model[state] = {}
if follow not in model[state]:
model[state][follow] = 0
model[state][follow] += 1
return model
|
Build a Python representation of the Markov model. Returns a dict
of dicts where the keys of the outer dict represent all possible states,
and point to the inner dicts. The inner dicts represent all possibilities
for the "next" item in the chain, along with the count of times it
appears.
|
def identify_triggers(
cfg,
sources,
sinks,
lattice,
nosec_lines
):
"""Identify sources, sinks and sanitisers in a CFG.
Args:
cfg(CFG): CFG to find sources, sinks and sanitisers in.
sources(tuple): list of sources, a source is a (source, sanitiser) tuple.
sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.
nosec_lines(set): lines with # nosec whitelisting
Returns:
Triggers tuple with sink and source nodes and a sanitiser node dict.
"""
assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode)
tainted_nodes = filter_cfg_nodes(cfg, TaintedNode)
tainted_trigger_nodes = [
TriggerNode(
Source('Framework function URL parameter'),
cfg_node=node
) for node in tainted_nodes
]
sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines)
sources_in_file.extend(tainted_trigger_nodes)
find_secondary_sources(assignment_nodes, sources_in_file, lattice)
sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines)
sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file)
return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)
|
Identify sources, sinks and sanitisers in a CFG.
Args:
cfg(CFG): CFG to find sources, sinks and sanitisers in.
sources(tuple): list of sources, a source is a (source, sanitiser) tuple.
sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.
nosec_lines(set): lines with # nosec whitelisting
Returns:
Triggers tuple with sink and source nodes and a sanitiser node dict.
|
def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71):
"""
Estimates the propensity score with covariates selected using
the algorithm suggested by [1]_.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Parameters
----------
lin_B: list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in [1]_.
C_qua: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
[1]_.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
lin_basic = parse_lin_terms(self.raw_data['K'], lin_B)
self.propensity = PropensitySelect(self.raw_data, lin_basic,
C_lin, C_qua)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
|
Estimates the propensity score with covariates selected using
the algorithm suggested by [1]_.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Parameters
----------
lin_B: list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in [1]_.
C_qua: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
[1]_.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
|
def combined_download(accounts, days=60):
"""Download OFX files and combine them into one
It expects an 'accounts' list of ofxclient.Account objects
as well as an optional 'days' specifier which defaults to 60
"""
client = Client(institution=None)
out_file = StringIO()
out_file.write(client.header())
out_file.write('<OFX>')
for a in accounts:
ofx = a.download(days=days).read()
stripped = ofx.partition('<OFX>')[2].partition('</OFX>')[0]
out_file.write(stripped)
out_file.write("</OFX>")
out_file.seek(0)
return out_file
|
Download OFX files and combine them into one
It expects an 'accounts' list of ofxclient.Account objects
as well as an optional 'days' specifier which defaults to 60
|
def set_ioloop(self, ioloop=None):
"""Set the tornado IOLoop to use.
Sets the tornado.ioloop.IOLoop instance to use, defaulting to
IOLoop.current(). If set_ioloop() is never called the IOLoop is
started in a new thread, and will be stopped if self.stop() is called.
Notes
-----
Must be called before start() is called.
"""
self._server.set_ioloop(ioloop)
self.ioloop = self._server.ioloop
|
Set the tornado IOLoop to use.
Sets the tornado.ioloop.IOLoop instance to use, defaulting to
IOLoop.current(). If set_ioloop() is never called the IOLoop is
started in a new thread, and will be stopped if self.stop() is called.
Notes
-----
Must be called before start() is called.
|
def add_samples(self, samples: Iterable[Sample]) -> None:
"""Add samples in an iterable to this :class:`SampleSheet`."""
for sample in samples:
self.add_sample(sample)
|
Add samples in an iterable to this :class:`SampleSheet`.
|
def save_state(self):
"""Store the options into the user's stored session info."""
# Save boolean settings
for key, check_box in list(self.boolean_settings.items()):
self.save_boolean_setting(key, check_box)
# Save text settings
for key, line_edit in list(self.text_settings.items()):
self.save_text_setting(key, line_edit)
set_setting(
'north_arrow_path', self.leNorthArrowPath.text(), self.settings)
set_setting(
'organisation_logo_path',
self.organisation_logo_path_line_edit.text(),
self.settings)
set_setting(
'reportTemplatePath',
self.leReportTemplatePath.text(),
self.settings)
set_setting(
'reportDisclaimer',
self.txtDisclaimer.toPlainText(),
self.settings)
set_setting(
'defaultUserDirectory',
self.leUserDirectoryPath.text(),
self.settings)
index = self.earthquake_function.currentIndex()
value = self.earthquake_function.itemData(index)
set_setting('earthquake_function', value, qsettings=self.settings)
currency_index = self.currency_combo_box.currentIndex()
currency_key = self.currency_combo_box.itemData(currency_index)
set_setting('currency', currency_key, qsettings=self.settings)
# Save InaSAFE default values
self.save_default_values()
# Save population parameters
self.save_population_parameters()
|
Store the options into the user's stored session info.
|
def send_exception(self, code, exc_info=None, headers=None):
"send an error response including a backtrace to the client"
if headers is None:
headers = {}
if not exc_info:
exc_info = sys.exc_info()
self.send_error_msg(code,
traceback.format_exception(*exc_info),
headers)
|
send an error response including a backtrace to the client
|
def intersection(self,other):
"""
Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet
"""
if self.everything:
if other.everything:
return DiscreteSet()
else:
return DiscreteSet(other.elements)
else:
if other.everything:
return DiscreteSet(self.elements)
else:
return DiscreteSet(self.elements.intersection(other.elements))
|
Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet
|
def generate(self, dir_pattern, file_pattern, action_ch='g', recursively=False, force=False):
"""
Main method to generate (source code) files from templates.
See documentation about the directory and file patterns and their possible combinations.
Args:
dir_pattern: ``glob`` pattern taken from the root directory. **Only** used for directories.
file_pattern: ``fnmatch`` pattern taken from all matching directories. **Only** used for files.
action (char): Denote action to be taken. Can be:
- g: Generate all files that match both directory and file patterns. This is the default behavior.
- d: Same as `g` but with doing anything, i.e. dry run.
- c: Same as `g` but erasing the generated files instead, i.e. clean.
recursively: Do we do the actions in the sub-directories? Note that in this case **only** the file pattern applies as **all**
the subdirectories are visited.
force (boolean): Do we force the generation or not?
"""
# directories to visit
# using heavy machinery to extract absolute cleaned paths... to avoid any problem...
directories = [os.path.abspath(directory) for directory in glob.glob(os.path.join(self.__root_directory, dir_pattern)) if os.path.isdir(directory)]
# list of extensions
extensions = self.__extensions.keys()
for directory in directories:
for b, f in find_files(directory, file_pattern, recursively=recursively):
# test if some template files can be processed
file_basename, file_ext = os.path.splitext(f)
if file_ext in extensions:
# try to find corresponding action
rel_path = os.path.relpath(os.path.join(b,f), self.__root_directory)
rel_basename, rel_filename = os.path.split(rel_path)
rel_filename_without_ext, rel_ext = os.path.splitext(rel_filename)
# template absolute filename
in_file_name = os.path.join(b, f)
generator_action_container = self.__actions.retrieve_element_or_default(rel_basename, None)
action = None
if generator_action_container is not None:
action = generator_action_container.get_compatible_generator_action(f)
# is there a default action if needed?
if action is None:
action = self.__default_action
if action:
if action_ch == 'd':
print("Process file '%s' with function '%s':" % (rel_path, action.action_function_name()))
for filename_end, context in action.run():
# generated absolute file name
out_file_name = os.path.join(b, rel_filename_without_ext + filename_end + self.__extensions[file_ext])
if action_ch == 'g':
self.__generate_file(template_filename=in_file_name, context=context, generated_filename=out_file_name, force=force)
elif action_ch == 'c':
try:
os.remove(out_file_name)
self.log_info("Removed file '%s'" % out_file_name)
except OSError:
pass
elif action_ch == 'd':
# we only print relative path
print(" -> %s" % os.path.join(rel_basename, rel_filename_without_ext + filename_end + self.__extensions[file_ext]))
|
Main method to generate (source code) files from templates.
See documentation about the directory and file patterns and their possible combinations.
Args:
dir_pattern: ``glob`` pattern taken from the root directory. **Only** used for directories.
file_pattern: ``fnmatch`` pattern taken from all matching directories. **Only** used for files.
action (char): Denote action to be taken. Can be:
- g: Generate all files that match both directory and file patterns. This is the default behavior.
- d: Same as `g` but with doing anything, i.e. dry run.
- c: Same as `g` but erasing the generated files instead, i.e. clean.
recursively: Do we do the actions in the sub-directories? Note that in this case **only** the file pattern applies as **all**
the subdirectories are visited.
force (boolean): Do we force the generation or not?
|
def _parse_authors(html_chunk):
"""
Parse authors of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
list: List of :class:`structures.Author` objects. Blank if no author \
found.
"""
authors = html_chunk.match(
["div", {"class": "comment"}],
"h3",
"a",
)
if not authors:
return []
authors = map(
lambda x: Author( # create Author objects
x.getContent().strip(),
normalize_url(BASE_URL, x.params.get("href", None))
),
authors
)
return filter(lambda x: x.name.strip(), authors)
|
Parse authors of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
list: List of :class:`structures.Author` objects. Blank if no author \
found.
|
def build_from_queue(cls, input_queue, replay_size, batch_size):
"""Builds a `ReplayableQueue` that draws from a regular `input_queue`.
Args:
input_queue: The queue to draw from.
replay_size: The size of the replay buffer.
batch_size: The size of each batch.
Returns:
A ReplayableQueue.
"""
return cls(
lambda: input_queue.dequeue_many(batch_size),
replay_size,
batch_size=batch_size)
|
Builds a `ReplayableQueue` that draws from a regular `input_queue`.
Args:
input_queue: The queue to draw from.
replay_size: The size of the replay buffer.
batch_size: The size of each batch.
Returns:
A ReplayableQueue.
|
def search_index_advanced(self, index, query):
'''
Advanced search query against an entire index
> query = ElasticQuery().query_string(query='imchi')
> search = ElasticSearch()
'''
request = self.session
url = 'http://%s:%s/%s/_search' % (self.host, self.port, index)
if self.params:
content = dict(query=query, **self.params)
else:
content = dict(query=query)
if self.verbose:
print content
response = request.post(url,content)
return response
|
Advanced search query against an entire index
> query = ElasticQuery().query_string(query='imchi')
> search = ElasticSearch()
|
def readPopulations(inputFileName, requiredPopulation):
"""Reads a population file.
:param inputFileName: the name of the population file.
:param requiredPopulation: the required population.
:type inputFileName: str
:type requiredPopulation: list
:returns: a :py:class:`dict` containing the population of each samples.
"""
populations = {}
requiredPopulation = set(requiredPopulation)
with open(inputFileName, "r") as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split("\t")
# Getting the informations
famID = row[0]
indID = row[1]
pop = row[2]
# Check if we already saw this sample
if (famID, indID) in populations:
if pop != populations[(famID, indID)]:
msg = ("{} {}: sample has multiple population ({} and "
"{})".format(famID, indID, pop,
populations[(famID, indID)]))
raise ProgramError(msg)
# Save the population if we need it
if pop in requiredPopulation:
# We need this population
populations[(famID, indID)] = pop
popMissing = requiredPopulation - set(populations.values())
if len(popMissing) != 0:
msg = "Population that were asked for doesn't exists in " \
"population file: %s" % str(popMissing)
raise ProgramError(msg)
return populations
|
Reads a population file.
:param inputFileName: the name of the population file.
:param requiredPopulation: the required population.
:type inputFileName: str
:type requiredPopulation: list
:returns: a :py:class:`dict` containing the population of each samples.
|
def reprcall(name, args=(), kwargs=(), keywords='', sep=', ',
argfilter=repr):
"""Format a function call for display."""
if keywords:
keywords = ((', ' if (args or kwargs) else '') +
'**' + keywords)
argfilter = argfilter or repr
return "{name}({args}{sep}{kwargs}{keywords})".format(
name=name, args=reprargs(args, filter=argfilter),
sep=(args and kwargs) and sep or "",
kwargs=reprkwargs(kwargs, sep), keywords=keywords or '')
|
Format a function call for display.
|
def get_account_invitation(self, account_id, invitation_id, **kwargs): # noqa: E501
"""Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501
return data
|
Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread.
|
def _to_array(value):
"""As a convenience, turn Python lists and tuples into NumPy arrays."""
if isinstance(value, (tuple, list)):
return array(value)
elif isinstance(value, (float, int)):
return np.float64(value)
else:
return value
|
As a convenience, turn Python lists and tuples into NumPy arrays.
|
def _set_igmp_snooping_state(self, v, load=False):
"""
Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmp_snooping_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmp_snooping_state() directly.
YANG Description: IGMP Snooping Root MO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmp_snooping_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True)""",
})
self.__igmp_snooping_state = t
if hasattr(self, '_set'):
self._set()
|
Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmp_snooping_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmp_snooping_state() directly.
YANG Description: IGMP Snooping Root MO
|
def kitchen_delete(backend, kitchen):
"""
Provide the name of the kitchen to delete
"""
click.secho('%s - Deleting kitchen %s' % (get_datetime(), kitchen), fg='green')
master = 'master'
if kitchen.lower() != master.lower():
check_and_print(DKCloudCommandRunner.delete_kitchen(backend.dki, kitchen))
else:
raise click.ClickException('Cannot delete the kitchen called %s' % master)
|
Provide the name of the kitchen to delete
|
def ToPhotlam(self, wave, flux, **kwargs):
"""Convert to ``photlam``.
Since there is no real conversion necessary, this returns
a copy of input flux (if array) or just the input (if scalar).
An input array is copied to avoid modifying the input
in subsequent **pysynphot** processing.
Parameters
----------
wave, flux : number or array_like
Wavelength (not used) and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
if hasattr(flux,'copy'):
return flux.copy() # No conversion, just copy the array.
else:
return flux
|
Convert to ``photlam``.
Since there is no real conversion necessary, this returns
a copy of input flux (if array) or just the input (if scalar).
An input array is copied to avoid modifying the input
in subsequent **pysynphot** processing.
Parameters
----------
wave, flux : number or array_like
Wavelength (not used) and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
|
def from_rest(model, props):
""" Map the REST data onto the model
Additionally, perform the following tasks:
* set all blank strings to None where needed
* purge all fields not allowed as incoming data
* purge all unknown fields from the incoming data
* lowercase certain fields that need it
* merge new data with existing & validate
* mutate the existing model
* abort on validation errors
* coerce all the values
"""
req = goldman.sess.req
_from_rest_blank(model, props)
_from_rest_hide(model, props)
_from_rest_ignore(model, props)
_from_rest_lower(model, props)
if req.is_posting:
_from_rest_on_create(model, props)
elif req.is_patching:
_from_rest_on_update(model, props)
model.merge(props, validate=True)
if req.is_patching:
_from_rest_reject_update(model)
|
Map the REST data onto the model
Additionally, perform the following tasks:
* set all blank strings to None where needed
* purge all fields not allowed as incoming data
* purge all unknown fields from the incoming data
* lowercase certain fields that need it
* merge new data with existing & validate
* mutate the existing model
* abort on validation errors
* coerce all the values
|
def function(self, x, y, amp, alpha, beta, center_x, center_y):
"""
returns Moffat profile
"""
x_shift = x - center_x
y_shift = y - center_y
return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta)
|
returns Moffat profile
|
def add_input_stream(self, datastream, data_type, options, data):
"""
To add data stream to a Datastream
:param datastream: string
:param data_type: string
:param options: dict
:param data: Stream
"""
url = self.get_add_input_data_url(datastream, options)
form_data = {
'files': {
'data': (
Utils.random_string(10)+('.json' if data_type == 'json' else '.csv'),
data,
'text/plain;charset=UTF-8',
{'Expires': '0'}
)
}
}
response = self.http.upstream(url, form_data)
return response
|
To add data stream to a Datastream
:param datastream: string
:param data_type: string
:param options: dict
:param data: Stream
|
def __check_mapping(self, landmarks):
"""
Checks whether the image, from which the supplied landmarks were extracted, can
be transformed to the learned standard intensity space without loss of
information.
"""
sc_udiff = numpy.asarray(self.__sc_umaxs)[1:] - numpy.asarray(self.__sc_umins)[:-1]
l_diff = numpy.asarray(landmarks)[1:] - numpy.asarray(landmarks)[:-1]
return numpy.all(sc_udiff > numpy.asarray(l_diff))
|
Checks whether the image, from which the supplied landmarks were extracted, can
be transformed to the learned standard intensity space without loss of
information.
|
def libvlc_media_set_user_data(p_md, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
'''
f = _Cfunctions.get('libvlc_media_set_user_data', None) or \
_Cfunction('libvlc_media_set_user_data', ((1,), (1,),), None,
None, Media, ctypes.c_void_p)
return f(p_md, p_new_user_data)
|
Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
|
def remove_dataset_from_collection(dataset_id, collection_id, **kwargs):
"""
Add a single dataset to a dataset collection.
"""
collection_i = _get_collection(collection_id)
collection_item = _get_collection_item(collection_id, dataset_id)
if collection_item is None:
raise HydraError("Dataset %s is not in collection %s.",
dataset_id,
collection_id)
db.DBSession.delete(collection_item)
db.DBSession.flush()
db.DBSession.expunge_all()
return 'OK'
|
Add a single dataset to a dataset collection.
|
def address(cls, address, bits = None):
"""
@type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text output.
"""
if bits is None:
address_size = cls.address_size
bits = win32.bits
else:
address_size = bits / 4
if address < 0:
address = ((2 ** bits) - 1) ^ ~address
return ('%%.%dX' % address_size) % address
|
@type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text output.
|
def open_readable(self, name):
"""Open cur_dir/name for reading.
Note: we read everything into a buffer that supports .read().
Args:
name (str): file name, located in self.curdir
Returns:
file-like (must support read() method)
"""
# print("FTP open_readable({})".format(name))
assert compat.is_native(name)
out = SpooledTemporaryFile(max_size=self.MAX_SPOOL_MEM, mode="w+b")
self.ftp.retrbinary(
"RETR {}".format(name), out.write, FtpTarget.DEFAULT_BLOCKSIZE
)
out.seek(0)
return out
|
Open cur_dir/name for reading.
Note: we read everything into a buffer that supports .read().
Args:
name (str): file name, located in self.curdir
Returns:
file-like (must support read() method)
|
def post_event_access_code(self, id, access_code_id, **data):
"""
POST /events/:id/access_codes/:access_code_id/
Updates an access code; returns the result as a :format:`access_code` as the
key ``access_code``.
"""
return self.post("/events/{0}/access_codes/{0}/".format(id,access_code_id), data=data)
|
POST /events/:id/access_codes/:access_code_id/
Updates an access code; returns the result as a :format:`access_code` as the
key ``access_code``.
|
def _udf_cell(args, js):
"""Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
"""
variable_name = args['module']
if not variable_name:
raise Exception('Declaration must be of the form %%bigquery udf --module <variable name>')
# Parse out the input and output specification
spec_pattern = r'\{\{([^}]+)\}\}'
spec_part_pattern = r'[a-z_][a-z0-9_]*'
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception('The JavaScript must declare the input row and output emitter parameters '
'using valid jsdoc format comments.\n'
'The input row param declaration must be typed as {{field:type, field2:type}} '
'and the output emitter param declaration must be typed as '
'function({{field:type, field2:type}}.')
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception('Invalid input row param declaration. The jsdoc type expression must '
'define an object with field and type pairs.')
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception('Invalid output emitter param declaration. The jsdoc type expression must '
'define a function accepting an an object with field and type pairs.')
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.
# Object names can contain any characters except \r and \n.
import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)'
imports = re.findall(import_pattern, js)
# Split the cell if necessary. We look for a 'function(' with no name and a header comment
# block with @param and assume this is the primary function, up to a closing '}' at the start
# of the line. The remaining cell content is used as support code.
split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)'
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code = ''
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
# Finally build the UDF object
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf
|
Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
|
def load_json_file(json_file):
""" load json file and check file content format
"""
with io.open(json_file, encoding='utf-8') as data_file:
try:
json_content = json.load(data_file)
except exceptions.JSONDecodeError:
err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
_check_format(json_file, json_content)
return json_content
|
load json file and check file content format
|
def select_all(self, table, limit=MAX_ROWS_PER_QUERY, execute=True):
"""Query all rows and columns from a table."""
# Determine if a row per query limit should be set
num_rows = self.count_rows(table)
if num_rows > limit:
return self._select_batched(table, '*', num_rows, limit, execute=execute)
else:
return self.select(table, '*', execute=execute)
|
Query all rows and columns from a table.
|
def intervalSum(self, a, b):
"""
:param int a b: with 1 <= a <= b
:returns: t[a] + ... + t[b]
"""
return self.prefixSum(b) - self.prefixSum(a-1)
|
:param int a b: with 1 <= a <= b
:returns: t[a] + ... + t[b]
|
def _build(self, leaves):
"""Private helper function to create the next aggregation level and put all references in place.
"""
new, odd = [], None
# check if even number of leaves, promote odd leaf to next level, if not
if len(leaves) % 2 == 1:
odd = leaves.pop(-1)
for i in range(0, len(leaves), 2):
newnode = Node(leaves[i].val + leaves[i + 1].val)
newnode.l, newnode.r = leaves[i], leaves[i + 1]
leaves[i].side, leaves[i + 1].side, leaves[i].p, leaves[i + 1].p = 'L', 'R', newnode, newnode
leaves[i].sib, leaves[i + 1].sib = leaves[i + 1], leaves[i]
new.append(newnode)
if odd:
new.append(odd)
return new
|
Private helper function to create the next aggregation level and put all references in place.
|
def pypackable(name, pytype, format):
"""
Create a "mix-in" class with a python type and a
Packable with the given struct format
"""
size, items = _formatinfo(format)
return type(Packable)(name, (pytype, Packable), {
'_format_': format,
'_size_': size,
'_items_': items,
})
|
Create a "mix-in" class with a python type and a
Packable with the given struct format
|
def get_python_json(scala_json):
"""Return a JSON dict from a org.json4s.JsonAST"""
def convert_node(node):
if node.__class__.__name__ in ('org.json4s.JsonAST$JValue',
'org.json4s.JsonAST$JObject'):
# Make a dictionary and then convert each value
values_raw = get_python_dict(node.values())
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__.startswith('scala.collection.immutable.Map') or \
node.__class__.__name__ == \
'scala.collection.immutable.HashMap$HashTrieMap':
values_raw = get_python_dict(node)
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__ == 'org.json4s.JsonAST$JArray':
entries_raw = get_python_list(node.values())
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == 'scala.collection.immutable.$colon$colon':
entries_raw = get_python_list(node)
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == 'scala.math.BigInt':
return node.intValue()
elif node.__class__.__name__ == 'scala.None$':
return None
elif node.__class__.__name__ == 'scala.collection.immutable.Nil$':
return []
elif isinstance(node, (str, int, float)):
return node
else:
logger.error('Cannot convert %s into Python' %
node.__class__.__name__)
return node.__class__.__name__
python_json = convert_node(scala_json)
return python_json
|
Return a JSON dict from a org.json4s.JsonAST
|
def acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
"""Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping.
"""
num_cores = int((3 * cpu_count()) / 4)
segments = set()
for x in path_mapping:
segments.update(x)
if multiprocessing:
cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
else:
cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
return asim
|
Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping.
|
def export_table(datatable_code, **kwargs):
"""Downloads an entire table as a zip file.
:param str datatable_code: The datatable code to download, such as MER/F1
:param str filename: The filename for the download. \
If not specified, will download to the current working directory
:param str api_key: Most databases require api_key for bulk download
"""
# discourage users from using authtoken
if 'authtoken' in kwargs:
raise InvalidRequestError(Message.ERROR_AUTHTOKEN_NOT_SUPPORTED)
ApiKeyUtil.init_api_key_from_args(kwargs)
filename = kwargs.pop('filename', '.')
return Datatable(datatable_code).download_file(filename, **kwargs)
|
Downloads an entire table as a zip file.
:param str datatable_code: The datatable code to download, such as MER/F1
:param str filename: The filename for the download. \
If not specified, will download to the current working directory
:param str api_key: Most databases require api_key for bulk download
|
def data_to_df(self, sysbase=False):
"""
Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base
"""
p_dict_comp = self.data_to_dict(sysbase=sysbase)
self._check_pd()
self.param_df = pd.DataFrame(data=p_dict_comp).set_index('idx')
return self.param_df
|
Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base
|
def to_char(token):
"""Transforms the ASCII control character symbols to their real char.
Note: If the token is not an ASCII control character symbol, just
return the token.
Keyword arguments:
token -- the token to transform
"""
if ord(token) in _range(9216, 9229 + 1):
token = _unichr(ord(token) - 9216)
return token
|
Transforms the ASCII control character symbols to their real char.
Note: If the token is not an ASCII control character symbol, just
return the token.
Keyword arguments:
token -- the token to transform
|
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
|
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
|
async def _get_response(self, msg):
"""Perform the request, get the response."""
try:
protocol = await self._get_protocol()
pr = protocol.request(msg)
r = await pr.response
return pr, r
except ConstructionRenderableError as e:
raise ClientError("There was an error with the request.", e)
except RequestTimedOut as e:
await self._reset_protocol(e)
raise RequestTimeout('Request timed out.', e)
except (OSError, socket.gaierror, Error) as e:
# aiocoap sometimes raises an OSError/socket.gaierror too.
# aiocoap issue #124
await self._reset_protocol(e)
raise ServerError("There was an error with the request.", e)
except asyncio.CancelledError as e:
await self._reset_protocol(e)
raise e
|
Perform the request, get the response.
|
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
|
iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
|
def json(self):
""" Return a JSON-serializable representation of this result.
The output of this function can be converted to a serialized string
with :any:`json.dumps`.
"""
data = super(CalTRACKHourlyModel, self).json()
data.update(
{
"occupancy_lookup": self.occupancy_lookup.to_json(orient="split"),
"temperature_bins": self.temperature_bins.to_json(orient="split"),
}
)
return data
|
Return a JSON-serializable representation of this result.
The output of this function can be converted to a serialized string
with :any:`json.dumps`.
|
def p(self, path):
"""
provide absolute path within the container
:param path: path with container
:return: str
"""
if path.startswith("/"):
path = path[1:]
p = os.path.join(self.mount_point, path)
logger.debug("path = %s", p)
return p
|
provide absolute path within the container
:param path: path with container
:return: str
|
def set_pixel(self,x,y,state):
"""Set pixel at "x,y" to "state" where state can be one of "ON", "OFF"
or "TOGGLE"
"""
self.send_cmd("P"+str(x+1)+","+str(y+1)+","+state)
|
Set pixel at "x,y" to "state" where state can be one of "ON", "OFF"
or "TOGGLE"
|
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
|
Return the number of bytes needed to store this parameter.
|
def acknowledged_by(self):
"""Username of the acknowledger."""
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField('acknowledgedBy')):
return self._proto.acknowledgeInfo.acknowledgedBy
return None
|
Username of the acknowledger.
|
def update_iteration(self):
"""Update the last experiment group's iteration with experiment performance."""
iteration_config = self.get_iteration_config()
if not iteration_config:
return
experiments_metrics = self.experiment_group.get_experiments_metrics(
experiment_ids=iteration_config.experiment_ids,
metric=self.get_metric_name()
)
experiments_configs = self.experiment_group.get_experiments_declarations(
experiment_ids=iteration_config.experiment_ids
)
iteration_config.experiments_configs = list(experiments_configs)
iteration_config.experiments_metrics = [m for m in experiments_metrics if m[1] is not None]
self._update_config(iteration_config)
|
Update the last experiment group's iteration with experiment performance.
|
def apply(self, dir_or_plan=None, input=False, skip_plan=False, no_color=IsFlagged,
**kwargs):
"""
refer to https://terraform.io/docs/commands/apply.html
no-color is flagged by default
:param no_color: disable color of stdout
:param input: disable prompt for a missing variable
:param dir_or_plan: folder relative to working folder
:param skip_plan: force apply without plan (default: false)
:param kwargs: same as kwags in method 'cmd'
:returns return_code, stdout, stderr
"""
default = kwargs
default['input'] = input
default['no_color'] = no_color
default['auto-approve'] = (skip_plan == True)
option_dict = self._generate_default_options(default)
args = self._generate_default_args(dir_or_plan)
return self.cmd('apply', *args, **option_dict)
|
refer to https://terraform.io/docs/commands/apply.html
no-color is flagged by default
:param no_color: disable color of stdout
:param input: disable prompt for a missing variable
:param dir_or_plan: folder relative to working folder
:param skip_plan: force apply without plan (default: false)
:param kwargs: same as kwags in method 'cmd'
:returns return_code, stdout, stderr
|
def calculate_output(self, variable_name, period):
"""
Calculate the value of a variable using the ``calculate_output`` attribute of the variable.
"""
variable = self.tax_benefit_system.get_variable(variable_name, check_existence = True)
if variable.calculate_output is None:
return self.calculate(variable_name, period)
return variable.calculate_output(self, variable_name, period)
|
Calculate the value of a variable using the ``calculate_output`` attribute of the variable.
|
def get_event_q(self, event_name):
"""Obtain the queue storing events of the specified name.
If no event of this name has been polled, wait for one to.
Returns:
A queue storing all the events of the specified name.
None if timed out.
Raises:
queue.Empty: Raised if the queue does not exist and timeout has
passed.
"""
self.lock.acquire()
if not event_name in self.event_dict or self.event_dict[
event_name] is None:
self.event_dict[event_name] = queue.Queue()
self.lock.release()
event_queue = self.event_dict[event_name]
return event_queue
|
Obtain the queue storing events of the specified name.
If no event of this name has been polled, wait for one to.
Returns:
A queue storing all the events of the specified name.
None if timed out.
Raises:
queue.Empty: Raised if the queue does not exist and timeout has
passed.
|
def build_columns(self, X, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
splines = self._terms[0].build_columns(X, verbose=verbose)
for term in self._terms[1:]:
marginal_splines = term.build_columns(X, verbose=verbose)
splines = tensor_product(splines, marginal_splines)
if self.by is not None:
splines *= X[:, self.by][:, np.newaxis]
return sp.sparse.csc_matrix(splines)
|
construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
|
def retcode_pillar(pillar_name):
'''
Run one or more nagios plugins from pillar data and get the result of cmd.retcode
The pillar have to be in this format::
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
webserver is the role to check, the next keys are the group and the items
the check with the arguments if needed
You must to group different checks(one o more) and always it will return
the highest value of all the checks
CLI Example:
.. code-block:: bash
salt '*' nagios.retcode webserver
'''
groups = __salt__['pillar.get'](pillar_name)
check = {}
data = {}
for group in groups:
commands = groups[group]
for command in commands:
# Check if is a dict to get the arguments
# in command if not set the arguments to empty string
if isinstance(command, dict):
plugin = next(six.iterkeys(command))
args = command[plugin]
else:
plugin = command
args = ''
check.update(retcode(plugin, args, group))
current_value = 0
new_value = int(check[group]['status'])
if group in data:
current_value = int(data[group]['status'])
if (new_value > current_value) or (group not in data):
if group not in data:
data[group] = {}
data[group]['status'] = new_value
return data
|
Run one or more nagios plugins from pillar data and get the result of cmd.retcode
The pillar have to be in this format::
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
webserver is the role to check, the next keys are the group and the items
the check with the arguments if needed
You must to group different checks(one o more) and always it will return
the highest value of all the checks
CLI Example:
.. code-block:: bash
salt '*' nagios.retcode webserver
|
def send_media_file(self, filename):
"""
Function used to send media files from the media folder to the browser.
"""
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.config['MEDIA_FOLDER'], filename,
cache_timeout=cache_timeout)
|
Function used to send media files from the media folder to the browser.
|
def accept(self, addr):
"""
Add an address to the set of addresses this proxy is permitted
to introduce.
:param addr: The address to add.
"""
# Add the address to the set
ip_addr = _parse_ip(addr)
if ip_addr is None:
LOG.warn("Cannot add address %r to proxy %s: "
"invalid address" % (addr, self.address))
else:
self.accepted.add(addr)
|
Add an address to the set of addresses this proxy is permitted
to introduce.
:param addr: The address to add.
|
def line_line(origins,
directions,
plane_normal=None):
"""
Find the intersection between two lines.
Uses terminology from:
http://geomalgorithms.com/a05-_intersect-1.html
line 1: P(s) = p_0 + sU
line 2: Q(t) = q_0 + tV
Parameters
---------
origins: (2, d) float, points on lines (d in [2,3])
directions: (2, d) float, direction vectors
plane_normal: (3, ) float, if not passed computed from cross
Returns
---------
intersects: boolean, whether the lines intersect.
In 2D, false if the lines are parallel
In 3D, false if lines are not coplanar
intersection: if intersects: (d) length point of intersection
else: None
"""
# check so we can accept 2D or 3D points
origins, is_2D = stack_3D(origins, return_2D=True)
directions, is_2D = stack_3D(directions, return_2D=True)
# unitize direction vectors
directions /= np.linalg.norm(directions,
axis=1).reshape((-1, 1))
# exit if values are parallel
if np.sum(np.abs(np.diff(directions,
axis=0))) < tol.zero:
return False, None
# using notation from docstring
q_0, p_0 = origins
v, u = directions
w = p_0 - q_0
# recompute plane normal if not passed
if plane_normal is None:
# the normal of the plane given by the two direction vectors
plane_normal = np.cross(u, v)
plane_normal /= np.linalg.norm(plane_normal)
# vectors perpendicular to the two lines
v_perp = np.cross(v, plane_normal)
v_perp /= np.linalg.norm(v_perp)
# if the vector from origin to origin is on the plane given by
# the direction vector, the dot product with the plane normal
# should be within floating point error of zero
coplanar = abs(np.dot(plane_normal, w)) < tol.zero
if not coplanar:
return False, None
# value of parameter s where intersection occurs
s_I = (np.dot(-v_perp, w) /
np.dot(v_perp, u))
# plug back into the equation of the line to find the point
intersection = p_0 + s_I * u
return True, intersection[:(3 - is_2D)]
|
Find the intersection between two lines.
Uses terminology from:
http://geomalgorithms.com/a05-_intersect-1.html
line 1: P(s) = p_0 + sU
line 2: Q(t) = q_0 + tV
Parameters
---------
origins: (2, d) float, points on lines (d in [2,3])
directions: (2, d) float, direction vectors
plane_normal: (3, ) float, if not passed computed from cross
Returns
---------
intersects: boolean, whether the lines intersect.
In 2D, false if the lines are parallel
In 3D, false if lines are not coplanar
intersection: if intersects: (d) length point of intersection
else: None
|
def removeCMSPadding(str, blocksize=AES_blocksize):
'''CMS padding: Remove padding with bytes containing the number of padding bytes '''
try:
pad_len = ord(str[-1]) # last byte contains number of padding bytes
except TypeError:
pad_len = str[-1]
assert pad_len <= blocksize, 'padding error'
assert pad_len <= len(str), 'padding error'
return str[:-pad_len]
|
CMS padding: Remove padding with bytes containing the number of padding bytes
|
def drawRect(self, x1, y1, x2, y2, angle=0):
"""
Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing.
"""
vertices = [[x1,y1],[x2,y1],[x2,y2],[x1,y2],]
rotatedVertices = rotateMatrix(vertices, (x1+x2)*0.5, (y1+y2)*0.5, angle)
self.drawClosedPath(rotatedVertices)
|
Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing.
|
def drop_network_by_id(self, network_id: int) -> None:
"""Drop a network by its database identifier."""
network = self.session.query(Network).get(network_id)
self.drop_network(network)
|
Drop a network by its database identifier.
|
def append_copy(self, elem):
"""Append a copy of the specified element as a child."""
return XMLElement(lib.lsl_append_copy(self.e, elem.e))
|
Append a copy of the specified element as a child.
|
def _put(self, item: SQLBaseObject):
"""Puts a item into the database. Updates lastUpdate column"""
if item._dto_type in self._expirations and self._expirations[item._dto_type] == 0:
# The expiration time has been set to 0 -> shoud not be cached
return
item.updated()
self._session().merge(item)
|
Puts a item into the database. Updates lastUpdate column
|
def save_to_file(self, filename, format=None, **kwargs):
""" Save the object to file given by filename.
"""
if format is None:
# try to derive protocol from file extension
format = format_from_extension(filename)
with file(filename, 'wb') as fp:
self.save_to_file_like(fp, format, **kwargs)
|
Save the object to file given by filename.
|
def incr(self, name, amount=1):
"""自增key的对应的值,当key不存在时则为默认值,否则在基础上自增整数amount
:param name: key
:param amount: 默认值
:return: 返回自增后的值
"""
return self.client.incr(name, amount=amount)
|
自增key的对应的值,当key不存在时则为默认值,否则在基础上自增整数amount
:param name: key
:param amount: 默认值
:return: 返回自增后的值
|
def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]:
"""Divide data into training, validation and test subsets"""
if len(prefixes) < 3:
raise PersephoneException(
"{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes))
)
Ratios = namedtuple("Ratios", ["train", "valid", "test"])
ratios=Ratios(.90, .05, .05)
train_end = int(ratios.train*len(prefixes))
valid_end = int(train_end + ratios.valid*len(prefixes))
# We must make sure that at least one element exists in test
if valid_end == len(prefixes):
valid_end -= 1
# If train_end and valid_end are the same we end up with no valid_prefixes
# so we must ensure at least one prefix is placed in this category
if train_end == valid_end:
train_end -= 1
random.seed(seed)
random.shuffle(prefixes)
train_prefixes = prefixes[:train_end]
valid_prefixes = prefixes[train_end:valid_end]
test_prefixes = prefixes[valid_end:]
assert train_prefixes, "Got empty set for training data"
assert valid_prefixes, "Got empty set for validation data"
assert test_prefixes, "Got empty set for testing data"
return train_prefixes, valid_prefixes, test_prefixes
|
Divide data into training, validation and test subsets
|
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_mac = ET.SubElement(vcs_node_info, "node-switch-mac")
node_switch_mac.text = kwargs.pop('node_switch_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_octets(self, octets=None, timeout=1.0):
"""Get NDEF message octets from a SNEP Server.
.. versionadded:: 0.13
If the client has not yet a data link connection with a SNEP
Server, it temporarily connects to the default SNEP Server,
sends the message octets, disconnects after the server
response, and returns the received message octets.
"""
if octets is None:
# Send NDEF Message with one empty Record.
octets = b'\xd0\x00\x00'
if not self.socket:
try:
self.connect('urn:nfc:sn:snep')
except nfc.llcp.ConnectRefused:
return None
else:
self.release_connection = True
else:
self.release_connection = False
try:
request = struct.pack('>BBLL', 0x10, 0x01, 4 + len(octets),
self.acceptable_length) + octets
if not send_request(self.socket, request, self.send_miu):
return None
response = recv_response(
self.socket, self.acceptable_length, timeout)
if response is not None:
if response[1] != 0x81:
raise SnepError(response[1])
return response[6:]
finally:
if self.release_connection:
self.close()
|
Get NDEF message octets from a SNEP Server.
.. versionadded:: 0.13
If the client has not yet a data link connection with a SNEP
Server, it temporarily connects to the default SNEP Server,
sends the message octets, disconnects after the server
response, and returns the received message octets.
|
def make_pdb(self, alt_states=False, inc_ligands=True):
"""Generates a PDB string for the `Polymer`.
Parameters
----------
alt_states : bool, optional
Include alternate conformations for `Monomers` in PDB.
inc_ligands : bool, optional
Includes `Ligands` in PDB.
Returns
-------
pdb_str : str
String of the pdb for the `Polymer`. Generated using information
from the component `Monomers`.
"""
if any([False if x.id else True for x in self._monomers]):
self.relabel_monomers()
if self.ligands and inc_ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
pdb_str = write_pdb(monomers, self.id, alt_states=alt_states)
return pdb_str
|
Generates a PDB string for the `Polymer`.
Parameters
----------
alt_states : bool, optional
Include alternate conformations for `Monomers` in PDB.
inc_ligands : bool, optional
Includes `Ligands` in PDB.
Returns
-------
pdb_str : str
String of the pdb for the `Polymer`. Generated using information
from the component `Monomers`.
|
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).iteritems():
exec "self.config.%s = v"%k in locals(), globals()
|
self.parsed_data->self.config
|
def on_to_position(self, speed, position, brake=True, block=True):
"""
Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units.
"""
speed = self._speed_native_units(speed)
self.speed_sp = int(round(speed))
self.position_sp = position
self._set_brake(brake)
self.run_to_abs_pos()
if block:
self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT)
self.wait_until_not_moving()
|
Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units.
|
def list_role_binding_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
return data
|
list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread.
|
def clean_value(self):
"""
Populates json serialization ready data.
This is the method used to serialize and store the object data in to DB
Returns:
List of dicts.
"""
result = []
for mdl in self:
result.append(super(ListNode, mdl).clean_value())
return result
|
Populates json serialization ready data.
This is the method used to serialize and store the object data in to DB
Returns:
List of dicts.
|
def download_document(self, document: Document, overwrite=True, path=None):
"""
Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check
"""
if not path:
path = os.path.join(os.path.expanduser(c["base_path"]), document.path)
if (self.modified(document) and overwrite) or not os.path.exists(join(path, document.title)):
log.info("Downloading %s" % join(path, document.title))
file = self._get('/api/documents/%s/download' % document.id, stream=True)
os.makedirs(path, exist_ok=True)
with open(join(path, document.title), 'wb') as f:
shutil.copyfileobj(file.raw, f)
|
Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check
|
def before_export(self):
"""
Set the attributes nbytes
"""
# sanity check that eff_ruptures have been set, i.e. are not -1
try:
csm_info = self.datastore['csm_info']
except KeyError:
csm_info = self.datastore['csm_info'] = self.csm.info
for sm in csm_info.source_models:
for sg in sm.src_groups:
assert sg.eff_ruptures != -1, sg
for key in self.datastore:
self.datastore.set_nbytes(key)
self.datastore.flush()
|
Set the attributes nbytes
|
def compressBWTPoolProcess(tup):
'''
During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header
'''
#pull the tuple info
inputFN = tup[0]
startIndex = tup[1]
endIndex = tup[2]
tempFN = tup[3]
#this shouldn't happen
if startIndex == endIndex:
print 'ERROR: EQUAL INDICES'
return None
#load the file
bwt = np.load(inputFN, 'r')
#create bit spacings
letterBits = 3
numberBits = 8-letterBits
numPower = 2**numberBits
mask = 255 >> letterBits
#search for the places they're different
whereSol = np.add(startIndex+1, np.where(bwt[startIndex:endIndex-1] != bwt[startIndex+1:endIndex])[0])
#this is the difference between two adjacent ones
deltas = np.zeros(dtype='<u4', shape=(whereSol.shape[0]+1,))
if whereSol.shape[0] == 0:
deltas[0] = endIndex-startIndex
else:
deltas[0] = whereSol[0]-startIndex
deltas[1:-1] = np.subtract(whereSol[1:], whereSol[0:-1])
deltas[-1] = endIndex - whereSol[-1]
#calculate the number of bytes we need to store this information
size = 0
byteCount = 0
lastCount = 1
while lastCount > 0:
lastCount = np.where(deltas >= 2**(numberBits*byteCount))[0].shape[0]
size += lastCount
byteCount += 1
#create the file
ret = np.lib.format.open_memmap(tempFN, 'w+', '<u1', (size,))
retIndex = 0
c = bwt[startIndex]
startChar = c
delta = deltas[0]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
#fill in the values based on the bit functions
for i in xrange(0, whereSol.shape[0]):
c = bwt[whereSol[i]]
delta = deltas[i+1]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
endChar = c
#return a lot of information so we can easily combine the results
return (size, startChar, deltas[0], endChar, deltas[-1], tempFN)
|
During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header
|
def get_foreign_key(self, name):
"""
Returns the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
:rtype: ForeignKeyConstraint
"""
name = self._normalize_identifier(name)
if not self.has_foreign_key(name):
raise ForeignKeyDoesNotExist(name, self._name)
return self._fk_constraints[name]
|
Returns the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
:rtype: ForeignKeyConstraint
|
def style_from_dict(style_dict, include_defaults=True):
"""
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
"""
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
# Expand token inheritance and turn style description into Attrs.
token_to_attrs = {}
# (Loop through the tokens in order. Sorting makes sure that
# we process the parent first.)
for ttype, styledef in sorted(style_dict.items()):
# Start from parent Attrs or default Attrs.
attrs = DEFAULT_ATTRS
if 'noinherit' not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
# Now update with the given attributes.
for part in styledef.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs)
|
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.