text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
from __future__ import absolute_import
import numpy as np
from scipy.stats import pearsonr
from scipy import signal
from scipy import interpolate
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import warnings
def generate_index_distribution(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified blocks and that the block indices describe a coherent
partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification,
uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and
uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
if all(k in params for k in ('uq_train_fr', 'uq_valid_fr', 'uq_test_fr')):
# specification by fraction
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params)
elif all(k in params for k in ('uq_train_vec', 'uq_valid_vec', 'uq_test_vec')):
# specification by block list
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params)
elif all(k in params for k in ('uq_train_bks', 'uq_valid_bks', 'uq_test_bks')):
# specification by block size
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError("No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)")
def generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
It checks that the fractions provided are (0, 1) and add up to 1.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
tol = 1e-7
# Extract required parameters
fractionTrain = params['uq_train_fr']
fractionValidation = params['uq_valid_fr']
fractionTest = params['uq_test_fr']
if (fractionTrain < 0.) or (fractionTrain > 1.):
raise ValueError('uq_train_fr is not in (0, 1) range. uq_train_fr: ', fractionTrain)
if (fractionValidation < 0.) or (fractionValidation > 1.):
raise ValueError('uq_valid_fr is not in (0, 1) range. uq_valid_fr: ', fractionValidation)
if (fractionTest < 0.) or (fractionTest > 1.):
raise ValueError('uq_test_fr is not in (0, 1) range. uq_test_fr: ', fractionTest)
fractionSum = fractionTrain + fractionValidation + fractionTest
# if (fractionSum > 1.) or (fractionSum < 1.):
if abs(fractionSum - 1.) > tol:
raise ValueError('Specified UQ fractions (uq_train_fr, uq_valid_fr, uq_test_fr) do not add up to 1. No cross-validation partition is computed ! sum:', fractionSum)
# Determine data size and block size
if fractionTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
sizeTraining = int(np.round(numData * fractionTrain))
sizeValidation = int(np.round(numData * fractionValidation))
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if fractionValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if fractionTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified block quantities and that the block quantities describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_bks, uq_valid_bks, uq_test_bks)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
numBlocksTrain = params['uq_train_bks']
numBlocksValidation = params['uq_valid_bks']
numBlocksTest = params['uq_test_bks']
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
# Determine data size and block size
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Testing (if specified) or Validation (if specified) will use different block size.")
sizeTraining = numBlocksTrain * blockSize
sizeValidation = numBlocksValidation * blockSize
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if numBlocksTest > 0:
indexTest = Folds[sizeTraining + sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified list of blocks and that the block indices describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_vec, uq_valid_vec, uq_test_vec)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
blocksTrain = params['uq_train_vec']
blocksValidation = params['uq_valid_vec']
blocksTest = params['uq_test_vec']
# Determine data size and block size
numBlocksTrain = len(blocksTrain)
numBlocksValidation = len(blocksValidation)
numBlocksTest = len(blocksTest)
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Last block will have different size.")
if remainder < 0:
remainder = 0
# Fill partition indices
# Fill train partition
maxSizeTrain = blockSize * numBlocksTrain + remainder
indexTrain = fill_array(blocksTrain, maxSizeTrain, numData, numBlocksTotal, blockSize)
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
maxSizeValidation = blockSize * numBlocksValidation + remainder
indexValidation = fill_array(blocksValidation, maxSizeValidation, numData, numBlocksTotal, blockSize)
# Fill test partition
indexTest = None
if numBlocksTest > 0:
maxSizeTest = blockSize * numBlocksTest + remainder
indexTest = fill_array(blocksTest, maxSizeTest, numData, numBlocksTotal, blockSize)
return indexTrain, indexValidation, indexTest
def compute_limits(numdata, numblocks, blocksize, blockn):
""" Generates the limit of indices corresponding to a
specific block. It takes into account the non-exact
divisibility of numdata into numblocks letting the
last block to take the extra chunk.
Parameters
----------
numdata : int
Total number of data points to distribute
numblocks : int
Total number of blocks to distribute into
blocksize : int
Size of data per block
blockn : int
Index of block, from 0 to numblocks-1
Return
----------
start : int
Position to start assigning indices
end : int
One beyond position to stop assigning indices
"""
start = blockn * blocksize
end = start + blocksize
if blockn == (numblocks - 1): # last block gets the extra
end = numdata
return start, end
def fill_array(blocklist, maxsize, numdata, numblocks, blocksize):
""" Fills a new array of integers with the indices corresponding
to the specified block structure.
Parameters
----------
blocklist : list
List of integers describen the block indices that
go into the array
maxsize : int
Maximum possible length for the partition (the size of the
common block size plus the remainder, if any).
numdata : int
Total number of data points to distribute
numblocks : int
Total number of blocks to distribute into
blocksize : int
Size of data per block
Return
----------
indexArray : int numpy array
Indices for specific data partition. Resizes the array
to the correct length.
"""
indexArray = np.zeros(maxsize, np.int)
offset = 0
for i in blocklist:
start, end = compute_limits(numdata, numblocks, blocksize, i)
length = end - start
indexArray[offset:offset + length] = np.arange(start, end)
offset += length
return indexArray[:offset]
# UTILS for COMPUTATION OF EMPIRICAL CALIBRATION
def compute_statistics_homoscedastic_summary(df_data,
col_true=0,
col_pred=6,
col_std_pred=7,
):
""" Extracts ground truth, mean prediction, error and
standard deviation of prediction from inference
data frame. The latter includes the statistics
over all the inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current CANDLE inference
experiments. Indices are hard coded to agree with
current CANDLE version. (The inference file usually
has the name: <model>_pred.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 0, index in current CANDLE format).
col_pred : integer
Index of the column in the data frame where the predicted
value is stored (Default: 6, index in current CANDLE format).
col_std_pred : integer
Index of the column in the data frame where the standard
deviation of the predicted values is stored (Default: 7,
index in current CANDLE format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred_mean : numpy array
Array with predicted values (mean from summary).
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
pred_name = df_data.columns[col_true]
Ypred_mean = df_data.iloc[:, col_pred].values
Ypred_std = df_data.iloc[:, col_std_pred].values
yerror = Ytrue - Ypred_mean
sigma = Ypred_std # std
MSE = mean_squared_error(Ytrue, Ypred_mean)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
MAE = mean_absolute_error(Ytrue, Ypred_mean)
print('MAE: ', MAE)
r2 = r2_score(Ytrue, Ypred_mean)
print('R2: ', r2)
# p-value 'not entirely reliable, reasonable for datasets > 500'
pearson_cc, pval = pearsonr(Ytrue, Ypred_mean)
print('Pearson CC: %f, p-value: %e' % (pearson_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_homoscedastic(df_data,
col_true=4,
col_pred_start=6
):
""" Extracts ground truth, mean prediction, error and
standard deviation of prediction from inference
data frame. The latter includes all the individual
inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current CANDLE inference
experiments. Indices are hard coded to agree with
current CANDLE version. (The inference file usually
has the name: <model>.predicted_INFER.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current HOM format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored (Default: 6 index, in current HOM format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred_mean : numpy array
Array with predicted values (mean of predictions).
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start:], axis=1)
Ypred_mean = Ypred_mean_.values
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start:], axis=1)
Ypred_std = Ypred_std_.values
yerror = Ytrue - Ypred_mean
sigma = Ypred_std # std
MSE = mean_squared_error(Ytrue, Ypred_mean)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
MAE = mean_absolute_error(Ytrue, Ypred_mean)
print('MAE: ', MAE)
r2 = r2_score(Ytrue, Ypred_mean)
print('R2: ', r2)
# p-value 'not entirely reliable, reasonable for datasets > 500'
pearson_cc, pval = pearsonr(Ytrue, Ypred_mean)
print('Pearson CC: %f, p-value: %e' % (pearson_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_heteroscedastic(df_data,
col_true=4,
col_pred_start=6,
col_std_pred_start=7,
):
""" Extracts ground truth, mean prediction, error, standard
deviation of prediction and predicted (learned) standard
deviation from inference data frame. The latter includes
all the individual inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current heteroscedastic inference
experiments. Indices are hard coded to agree with
current version. (The inference file usually
has the name: <model>.predicted_INFER_HET.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current HET format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored and are interspaced with standard deviation
predictions (Default: 6 index, step 2, in current HET format).
col_std_pred_start : integer
Index of the column in the data frame where the first predicted
standard deviation value is stored. All the predicted values
during inference are stored and are interspaced with predictions
(Default: 7 index, step 2, in current HET format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred_mean : numpy array
Array with predicted values (mean of predictions).
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For heteroscedastic inference this corresponds to the
sqrt(exp(s^2)) with s^2 predicted value.
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_mean = Ypred_mean_.values
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_std = Ypred_std_.values
yerror = Ytrue - Ypred_mean
s_ = df_data.iloc[:, col_std_pred_start::2]
s_mean = np.mean(s_, axis=1)
var = np.exp(s_mean.values) # variance
sigma = np.sqrt(var) # std
MSE = mean_squared_error(Ytrue, Ypred_mean)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
MAE = mean_absolute_error(Ytrue, Ypred_mean)
print('MAE: ', MAE)
r2 = r2_score(Ytrue, Ypred_mean)
print('R2: ', r2)
# p-value 'not entirely reliable, reasonable for datasets > 500'
pearson_cc, pval = pearsonr(Ytrue, Ypred_mean)
print('Pearson CC: %f, p-value: %e' % (pearson_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_quantile(df_data,
sigma_divisor=2.56,
col_true=4,
col_pred_start=6
):
""" Extracts ground truth, 50th percentile mean prediction,
low percentile and high percentile mean prediction
(usually 1st decile and 9th decile respectively),
error (using 5th decile), standard deviation of
prediction (using 5th decile) and predicted (learned)
standard deviation from interdecile range in inference data frame.
The latter includes all the individual inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current quantile inference
experiments. Indices are hard coded to agree with
current version. (The inference file usually
has the name: <model>.predicted_INFER_QTL.tsv).
sigma_divisor : float
Divisor to convert from the intercedile range to the corresponding
standard deviation for a Gaussian distribution.
(Default: 2.56, consisten with an interdecile range computed from
the difference between the 9th and 1st deciles).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current QTL format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored and are interspaced with other percentile
predictions (Default: 6 index, step 3, in current QTL format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values (based on the 50th percentile).
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. This corresponds to the interdecile range divided
by the sigma divisor.
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
Ypred_Lp_mean : numpy array
Array with predicted values of the lower percentile
(usually the 1st decile).
Ypred_Hp_mean : numpy array
Array with predicted values of the higher percentile
(usually the 9th decile).
"""
Ytrue = df_data.iloc[:, col_true].values
pred_name = df_data.columns[col_true]
Ypred_5d_mean = np.mean(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_mean = Ypred_5d_mean.values
Ypred_Lp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 1::3], axis=1)
Ypred_Hp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 2::3], axis=1)
Ypred_Lp_mean = Ypred_Lp_mean_.values
Ypred_Hp_mean = Ypred_Hp_mean_.values
interdecile_range = Ypred_Hp_mean - Ypred_Lp_mean
sigma = interdecile_range / sigma_divisor
yerror = Ytrue - Ypred_mean
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_std = Ypred_std_.values
MSE = mean_squared_error(Ytrue, Ypred_mean)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
MAE = mean_absolute_error(Ytrue, Ypred_mean)
print('MAE: ', MAE)
r2 = r2_score(Ytrue, Ypred_mean)
print('R2: ', r2)
# p-value 'not entirely reliable, reasonable for datasets > 500'
pearson_cc, pval = pearsonr(Ytrue, Ypred_mean)
print('Pearson CC: %f, p-value: %e' % (pearson_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name, Ypred_Lp_mean, Ypred_Hp_mean
def split_data_for_empirical_calibration(Ytrue, Ypred, sigma, cal_split=0.8):
""" Extracts a portion of the arrays provided for the computation
of the calibration and reserves the remainder portion
for testing.
Parameters
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
sigma : numpy array
Array with standard deviations learned with deep learning
model (or std value computed from prediction if homoscedastic
inference).
cal_split : float
Split of data to use for estimating the calibration relationship.
It is assumet that it will be a value in (0, 1).
(Default: use 80% of predictions to generate empirical
calibration).
Return
----------
index_perm_total : numpy array
Random permutation of the array indices. The first 'num_cal'
of the indices correspond to the samples that are used for
calibration, while the remainder are the samples reserved
for calibration testing.
pSigma_cal : numpy array
Part of the input sigma array to use for calibration.
pSigma_test : numpy array
Part of the input sigma array to reserve for testing.
pPred_cal : numpy array
Part of the input Ypred array to use for calibration.
pPred_test : numpy array
Part of the input Ypred array to reserve for testing.
true_cal : numpy array
Part of the input Ytrue array to use for calibration.
true_test : numpy array
Part of the input Ytrue array to reserve for testing.
"""
# shuffle data for calibration
num_pred_total = sigma.shape[0]
num_cal = np.int(num_pred_total * cal_split)
index_perm_total = np.random.permutation(range(num_pred_total))
# Permute data
pSigma_perm_all = sigma[index_perm_total]
pPred_perm_all = Ypred[index_perm_total]
true_perm_all = Ytrue[index_perm_total]
# Split in calibration and testing
pSigma_cal = pSigma_perm_all[:num_cal]
pSigma_test = pSigma_perm_all[num_cal:]
pPred_cal = pPred_perm_all[:num_cal]
pPred_test = pPred_perm_all[num_cal:]
true_cal = true_perm_all[:num_cal]
true_test = true_perm_all[num_cal:]
print('Size of calibration set: ', true_cal.shape)
print('Size of test set: ', true_test.shape)
return index_perm_total, pSigma_cal, pSigma_test, pPred_cal, pPred_test, true_cal, true_test
def compute_empirical_calibration_interpolation(pSigma_cal, pPred_cal, true_cal, cv=10):
""" Use the arrays provided to estimate an empirical mapping
between standard deviation and absolute value of error,
both of which have been observed during inference. Since
most of the times the prediction statistics are very noisy,
two smoothing steps (based on scipy's savgol filter) are performed.
Cubic Hermite splines (PchipInterpolator) are constructed for
interpolation. This type of splines preserves the monotonicity
in the interpolation data and does not overshoot if the data is
not smooth. The overall process of constructing a spline
to express the mapping from standard deviation to error is
composed of smoothing-interpolation-smoothing-interpolation.
Parameters
----------
pSigma_cal : numpy array
Part of the standard deviations array to use for calibration.
pPred_cal : numpy array
Part of the predictions array to use for calibration.
true_cal : numpy array
Part of the true (observed) values array to use for calibration.
cv : int
Number of cross validations folds to run to determine a 'good'
fit.
Return
----------
splineobj_best : scipy.interpolate python object
A python object from scipy.interpolate that computes a
cubic Hermite splines (PchipInterpolator) constructed
to express the mapping from standard deviation to error after a
'drastic' smoothing of the predictions. A 'good' fit is
determined by taking the spline for the fold that produces
the smaller mean absolute error in testing data (not used
for the smoothing / interpolation).
splineobj2 : scipy.interpolate python object
A python object from scipy.interpolate that computes a
cubic Hermite splines (PchipInterpolator) constructed
to express the mapping from standard deviation to error. This
spline is generated for interpolating the samples generated
after the smoothing of the first interpolation spline (i.e.
splineobj_best).
"""
xs3 = pSigma_cal # std
z3 = np.abs(true_cal - pPred_cal) # abs error
test_split = 1.0 / cv
xmin = np.min(pSigma_cal)
xmax = np.max(pSigma_cal)
warnings.filterwarnings("ignore")
print('--------------------------------------------')
print('Using CV for selecting calibration smoothing')
print('--------------------------------------------')
min_error = np.inf
for cv_ in range(cv):
# Split data for the different folds
X_train, X_test, y_train, y_test = train_test_split(xs3, z3, test_size=test_split, shuffle=True)
# Order x to apply smoothing and interpolation
xindsort = np.argsort(X_train)
# Smooth abs error
# z3smooth = signal.savgol_filter(y_train[xindsort], 31, 1, mode='nearest')
z3smooth = signal.savgol_filter(y_train[xindsort], 21, 1, mode='nearest')
# Compute Piecewise Cubic Hermite Interpolating Polynomial
splineobj = interpolate.PchipInterpolator(X_train[xindsort], z3smooth, extrapolate=True)
# Compute prediction from interpolator
ytest = splineobj(X_test)
# compute MAE of true ABS error vs predicted ABS error
mae = mean_absolute_error(y_test, ytest)
print('MAE: ', mae)
if mae < min_error: # store spline interpolator for best fold
min_error = mae
splineobj_best = splineobj
# Smooth again
xp23 = np.linspace(xmin, xmax, 200)
# Predict using best interpolator from folds
yp23 = splineobj_best(xp23)
# Smooth the ABS error predicted
yp23smooth = signal.savgol_filter(yp23, 15, 1, mode='nearest')
# Compute spline over second level of smoothing
splineobj2 = interpolate.PchipInterpolator(xp23, yp23smooth, extrapolate=True)
return splineobj_best, splineobj2
| ECP-CANDLE/Benchmarks | common/uq_utils.py | Python | mit | 33,264 | [
"Gaussian"
] | eabbdecff5d5c09c51235459ecd43fc8d4ca505d8e837c8239fecbf98d482dec |
#!/usr/bin/python
"""Simple shallow test of the CASTEP interface"""
import os
import shutil
import tempfile
import traceback
from ase.test.castep import installed
assert installed()
# check if we can import everything
ase_castep_dir = "ase"
try:
castep_calc = __import__(ase_castep_dir + ".calculators.castep", globals(), locals(), ["Castep", "CastepParam", "create_castep_keywords"])
Castep = castep_calc.Castep
CastepParam = castep_calc.CastepParam
create_castep_keywords = castep_calc.create_castep_keywords
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Castep calculator module could not be loaded'
try:
__import__(ase_castep_dir + ".io.castep")
except Exception, e:
assert False, 'Castep io module could not be loaded'
tmp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
from ase.calculators.castep import Castep
try:
c = Castep(directory=tmp_dir, label='test_label')
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Could not instantiate castep calculator'
try:
c.xc_functional = 'PBE'
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Setting xc_functional failed'
import ase.lattice.cubic
lattice = ase.lattice.cubic.BodyCenteredCubic('Li' )
print('For the sake of evaluating this test, warnings')
print('about auto-generating pseudo-potentials are')
print('normal behavior and can be safely ignored')
try:
lattice.set_calculator(c)
except Exception, e:
traceback.print_exc()
print(e)
assert False, 'Setting the calculator %s failed' % c
try:
create_castep_keywords(
castep_command=os.environ['CASTEP_COMMAND'],
path=tmp_dir,
fetch_only=20)
except Exception, e:
traceback.print_exc()
print(e)
assert False, "Cannot create castep_keywords, this usually means a bug"\
+ " in the interface or the castep binary cannot be called"
param_fn = os.path.join(tmp_dir, 'myParam.param')
param = open(param_fn,'w')
param.write('XC_FUNCTIONAL : PBE #comment\n')
param.write('XC_FUNCTIONAL : PBE #comment\n')
param.write('#comment\n')
param.write('CUT_OFF_ENERGY : 450.\n')
param.close()
try:
c.merge_param(param_fn)
except Exception, e:
traceback.print_exc()
print(e)
assert False,"Error in merge_param_filename, go figure"
# check if the CastepOpt, CastepCell comparison mechanism works
p1 = CastepParam()
p2 = CastepParam()
assert p1._options == p2._options, "Print two newly created CastepParams are not the same"
p1._options['xc_functional'].value = 'PBE'
p1.xc_functional = 'PBE'
assert not p1._options == p2._options, "Changed one CastepParam, but the still look the same"
assert c.calculation_required(lattice), 'Calculator does not fetch that a calculation is required'
if not c.dryrun_ok():
print(c._error)
assert False, "Dryrun_ok does not work, where it should"
else:
print("Dryrun is ok")
c.prepare_input_files(lattice)
os.chdir(cwd)
shutil.rmtree(tmp_dir)
print("Test finished without errors")
| conwayje/ase-python | ase/test/castep/castep_interface.py | Python | gpl-2.0 | 3,044 | [
"ASE",
"CASTEP"
] | 7b4af872c94b5c55b37bc9897a33f4474576542ad964ef7d50bf26391719c255 |
# $HeadURL$
"""
DISETSubRequest Class encapsulates a request definition to accomplish a DISET
RPC call
:deprecated:
"""
__RCSID__ = "$Id$"
import commands
from DIRAC.Core.Utilities import DEncode
from DIRAC import Time
from DIRAC.Core.Utilities.File import makeGuid
class DISETSubRequest:
#############################################################################
def __init__( self, rpcStub = None, executionOrder = 0 ):
"""Instantiates the Workflow object and some default parameters.
"""
self.subAttributeNames = ['Status', 'SubRequestID', 'Operation', 'ExecutionOrder', 'CreationTime', 'LastUpdate', 'Arguments']
self.subAttributes = {}
for attr in self.subAttributeNames:
self.subAttributes[attr] = "Unknown"
# Some initial values
self.subAttributes['Status'] = "Waiting"
self.subAttributes['SubRequestID'] = makeGuid()
self.subAttributes['CreationTime'] = Time.toString()
self.subAttributes['ExecutionOrder'] = executionOrder
if rpcStub:
self.subAttributes['Arguments'] = DEncode.encode( rpcStub )
self.subAttributes['Operation'] = rpcStub[1]
def setRPCStub( self, rpcStub ):
""" Define the RPC call details
"""
self.subAttributes['Operation'] = rpcStub[1]
self.subAttributes['Arguments'] = DEncode.encode( rpcStub )
def getDictionary( self ):
""" Get the request representation as a dictionary
"""
resultDict = {}
resultDict['Attributes'] = self.subAttributes
return resultDict
| calancha/DIRAC | RequestManagementSystem/Client/DISETSubRequest.py | Python | gpl-3.0 | 1,516 | [
"DIRAC"
] | bcd5a99ef4954d164029065ddef979f92e57bf1a4d408d712fa7a637ba35b111 |
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
import keyword
import sys
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
from astroid import nodes
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.utils import WarningScope, OPTION_RGX
_CONTINUATION_BLOCK_OPENERS = ['elif', 'except', 'for', 'if', 'while', 'def', 'class']
_KEYWORD_TOKENS = ['assert', 'del', 'elif', 'except', 'for', 'if', 'in', 'not',
'raise', 'return', 'while', 'yield']
if sys.version_info < (3, 0):
_KEYWORD_TOKENS.append('print')
_SPACED_OPERATORS = ['==', '<', '>', '!=', '<>', '<=', '>=',
'+=', '-=', '*=', '**=', '/=', '//=', '&=', '|=', '^=',
'%=', '>>=', '<<=']
_OPENING_BRACKETS = ['(', '[', '{']
_CLOSING_BRACKETS = [')', ']', '}']
_TAB_LENGTH = 8
_EOL = frozenset([tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT])
_JUNK_TOKENS = (tokenize.COMMENT, tokenize.NL)
# Whitespace checking policy constants
_MUST = 0
_MUST_NOT = 1
_IGNORE = 2
# Whitespace checking config constants
_DICT_SEPARATOR = 'dict-separator'
_TRAILING_COMMA = 'trailing-comma'
_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR]
MSGS = {
'C0301': ('Line too long (%s/%s)',
'line-too-long',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s)', # was W0302
'too-many-lines',
'Used when a module has too much lines, reducing its readability.'
),
'C0303': ('Trailing whitespace',
'trailing-whitespace',
'Used when there is whitespace between the end of a line and the '
'newline.'),
'C0304': ('Final newline missing',
'missing-final-newline',
'Used when the last line in a file is missing a newline.'),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'bad-indentation',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'C0330': ('Wrong %s indentation%s.\n%s%s',
'bad-continuation',
'TODO'),
'W0312': ('Found indentation with %ss instead of %ss',
'mixed-indentation',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'unnecessary-semicolon',
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'C0321': ('More than one statement on a single line',
'multiple-statements',
'Used when more than on statement are found on the same line.',
{'scope': WarningScope.NODE}),
'C0325' : ('Unnecessary parens after %r keyword',
'superfluous-parens',
'Used when a single item in parentheses follows an if, for, or '
'other keyword.'),
'C0326': ('%s space %s %s %s\n%s',
'bad-whitespace',
('Used when a wrong number of spaces is used around an operator, '
'bracket or block opener.'),
{'old_names': [('C0323', 'no-space-after-operator'),
('C0324', 'no-space-after-comma'),
('C0322', 'no-space-before-operator')]})
}
if sys.version_info < (3, 0):
MSGS.update({
'W0331': ('Use of the <> operator',
'old-ne-operator',
'Used when the deprecated "<>" operator is used instead \
of "!=".'),
'W0332': ('Use of "l" as long integer identifier',
'lowercase-l-suffix',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"'),
'W0333': ('Use of the `` operator',
'backtick',
'Used when the deprecated "``" (backtick) operator is used '
'instead of the str() function.',
{'scope': WarningScope.NODE}),
})
def _underline_token(token):
length = token[3][1] - token[2][1]
offset = token[2][1]
return token[4] + (' ' * offset) + ('^' * length)
def _column_distance(token1, token2):
if token1 == token2:
return 0
if token2[3] < token1[3]:
token1, token2 = token2, token1
if token1[3][0] != token2[2][0]:
return None
return token2[2][1] - token1[3][1]
def _last_token_on_line_is(tokens, line_end, token):
return (
line_end > 0 and tokens.token(line_end-1) == token or
line_end > 1 and tokens.token(line_end-2) == token
and tokens.type(line_end-1) == tokenize.COMMENT)
def _token_followed_by_eol(tokens, position):
return (tokens.type(position+1) == tokenize.NL or
tokens.type(position+1) == tokenize.COMMENT and
tokens.type(position+2) == tokenize.NL)
def _get_indent_length(line):
"""Return the length of the indentation on the given token's line."""
result = 0
for char in line:
if char == ' ':
result += 1
elif char == '\t':
result += _TAB_LENGTH
else:
break
return result
def _get_indent_hint_line(bar_positions, bad_position):
"""Return a line with |s for each of the positions in the given lists."""
if not bar_positions:
return ''
markers = [(pos, '|') for pos in bar_positions]
markers.append((bad_position, '^'))
markers.sort()
line = [' '] * (markers[-1][0] + 1)
for position, marker in markers:
line[position] = marker
return ''.join(line)
class _ContinuedIndent(object):
__slots__ = ('valid_outdent_offsets',
'valid_continuation_offsets',
'context_type',
'token',
'position')
def __init__(self,
context_type,
token,
position,
valid_outdent_offsets,
valid_continuation_offsets):
self.valid_outdent_offsets = valid_outdent_offsets
self.valid_continuation_offsets = valid_continuation_offsets
self.context_type = context_type
self.position = position
self.token = token
# The contexts for hanging indents.
# A hanging indented dictionary value after :
HANGING_DICT_VALUE = 'dict-value'
# Hanging indentation in an expression.
HANGING = 'hanging'
# Hanging indentation in a block header.
HANGING_BLOCK = 'hanging-block'
# Continued indentation inside an expression.
CONTINUED = 'continued'
# Continued indentation in a block header.
CONTINUED_BLOCK = 'continued-block'
SINGLE_LINE = 'single'
WITH_BODY = 'multi'
_CONTINUATION_MSG_PARTS = {
HANGING_DICT_VALUE: ('hanging', ' in dict value'),
HANGING: ('hanging', ''),
HANGING_BLOCK: ('hanging', ' before block'),
CONTINUED: ('continued', ''),
CONTINUED_BLOCK: ('continued', ' before block'),
}
def _Offsets(*args):
"""Valid indentation offsets for a continued line."""
return dict((a, None) for a in args)
def _BeforeBlockOffsets(single, with_body):
"""Valid alternative indent offsets for continued lines before blocks.
:param single: Valid offset for statements on a single logical line.
:param with_body: Valid offset for statements on several lines.
"""
return {single: SINGLE_LINE, with_body: WITH_BODY}
class TokenWrapper(object):
"""A wrapper for readable access to token information."""
def __init__(self, tokens):
self._tokens = tokens
def token(self, idx):
return self._tokens[idx][1]
def type(self, idx):
return self._tokens[idx][0]
def start_line(self, idx):
return self._tokens[idx][2][0]
def start_col(self, idx):
return self._tokens[idx][2][1]
def line(self, idx):
return self._tokens[idx][4]
class ContinuedLineState(object):
"""Tracker for continued indentation inside a logical line."""
def __init__(self, tokens, config):
self._line_start = -1
self._cont_stack = []
self._is_block_opener = False
self.retained_warnings = []
self._config = config
self._tokens = TokenWrapper(tokens)
@property
def has_content(self):
return bool(self._cont_stack)
@property
def _block_indent_size(self):
return len(self._config.indent_string.replace('\t', ' ' * _TAB_LENGTH))
@property
def _continuation_size(self):
return self._config.indent_after_paren
def handle_line_start(self, pos):
"""Record the first non-junk token at the start of a line."""
if self._line_start > -1:
return
self._is_block_opener = self._tokens.token(pos) in _CONTINUATION_BLOCK_OPENERS
self._line_start = pos
def next_physical_line(self):
"""Prepares the tracker for a new physical line (NL)."""
self._line_start = -1
self._is_block_opener = False
def next_logical_line(self):
"""Prepares the tracker for a new logical line (NEWLINE).
A new logical line only starts with block indentation.
"""
self.next_physical_line()
self.retained_warnings = []
self._cont_stack = []
def add_block_warning(self, token_position, state, valid_offsets):
self.retained_warnings.append((token_position, state, valid_offsets))
def get_valid_offsets(self, idx):
""""Returns the valid offsets for the token at the given position."""
# The closing brace on a dict or the 'for' in a dict comprehension may
# reset two indent levels because the dict value is ended implicitly
stack_top = -1
if self._tokens.token(idx) in ('}', 'for') and self._cont_stack[-1].token == ':':
stack_top = -2
indent = self._cont_stack[stack_top]
if self._tokens.token(idx) in _CLOSING_BRACKETS:
valid_offsets = indent.valid_outdent_offsets
else:
valid_offsets = indent.valid_continuation_offsets
return indent, valid_offsets.copy()
def _hanging_indent_after_bracket(self, bracket, position):
"""Extracts indentation information for a hanging indent."""
indentation = _get_indent_length(self._tokens.line(position))
if self._is_block_opener and self._continuation_size == self._block_indent_size:
return _ContinuedIndent(
HANGING_BLOCK,
bracket,
position,
_Offsets(indentation + self._continuation_size, indentation),
_BeforeBlockOffsets(indentation + self._continuation_size,
indentation + self._continuation_size * 2))
elif bracket == ':':
if self._cont_stack[-1].context_type == CONTINUED:
# If the dict key was on the same line as the open brace, the new
# correct indent should be relative to the key instead of the
# current indent level
paren_align = self._cont_stack[-1].valid_outdent_offsets
next_align = self._cont_stack[-1].valid_continuation_offsets.copy()
next_align[next_align.keys()[0] + self._continuation_size] = True
else:
next_align = _Offsets(indentation + self._continuation_size, indentation)
paren_align = _Offsets(indentation + self._continuation_size, indentation)
return _ContinuedIndent(HANGING_DICT_VALUE, bracket, position, paren_align, next_align)
else:
return _ContinuedIndent(
HANGING,
bracket,
position,
_Offsets(indentation, indentation + self._continuation_size),
_Offsets(indentation + self._continuation_size))
def _continuation_inside_bracket(self, bracket, pos):
"""Extracts indentation information for a continued indent."""
indentation = _get_indent_length(self._tokens.line(pos))
if self._is_block_opener and self._tokens.start_col(pos+1) - indentation == self._block_indent_size:
return _ContinuedIndent(
CONTINUED_BLOCK,
bracket,
pos,
_Offsets(self._tokens.start_col(pos)),
_BeforeBlockOffsets(self._tokens.start_col(pos+1),
self._tokens.start_col(pos+1) + self._continuation_size))
else:
return _ContinuedIndent(
CONTINUED,
bracket,
pos,
_Offsets(self._tokens.start_col(pos)),
_Offsets(self._tokens.start_col(pos+1)))
def pop_token(self):
self._cont_stack.pop()
def push_token(self, token, position):
"""Pushes a new token for continued indentation on the stack.
Tokens that can modify continued indentation offsets are:
* opening brackets
* 'lambda'
* : inside dictionaries
push_token relies on the caller to filter out those
interesting tokens.
:param token: The concrete token
:param position: The position of the token in the stream.
"""
if _token_followed_by_eol(self._tokens, position):
self._cont_stack.append(
self._hanging_indent_after_bracket(token, position))
else:
self._cont_stack.append(
self._continuation_inside_bracket(token, position))
class FormatChecker(BaseTokenChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
* use of <> instead of !=
"""
__implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 80, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('ignore-long-lines',
{'type': 'regexp', 'metavar': '<regexp>',
'default': r'^\s*(# )?<?https?://\S+>?$',
'help': ('Regexp for a line that is allowed to be longer than '
'the limit.')}),
('single-line-if-stmt',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : ('Allow the body of an if to be on the same '
'line as the test if there is no else.')}),
('no-space-check',
{'default': ','.join(_NO_SPACE_CHECK_CHOICES),
'type': 'multiple_choice',
'choices': _NO_SPACE_CHECK_CHOICES,
'help': ('List of optional constructs for which whitespace '
'checking is disabled')}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually \
" " (4 spaces) or "\\t" (1 tab).'}),
('indent-after-paren',
{'type': 'int', 'metavar': '<int>', 'default': 4,
'help': 'Number of spaces of indent required inside a hanging '
' or continued line.'}),
)
def __init__(self, linter=None):
BaseTokenChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
self._bracket_stack = [None]
def _pop_token(self):
self._bracket_stack.pop()
self._current_line.pop_token()
def _push_token(self, token, idx):
self._bracket_stack.append(token)
self._current_line.push_token(token, idx)
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ';'):
self.add_message('unnecessary-semicolon', line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_module(self, module):
self._keywords_with_parens = set()
if 'print_function' in module.future_imports:
self._keywords_with_parens.add('print')
def _check_keyword_parentheses(self, tokens, start):
"""Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if self._inside_brackets(':') and tokens[start][1] == 'for':
self._pop_token()
if tokens[start+1][1] != '(':
return
found_and_or = False
depth = 0
keyword_token = tokens[start][1]
line_num = tokens[start][2][0]
for i in xrange(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token[0] == tokenize.NL:
return
if token[1] == '(':
depth += 1
elif token[1] == ')':
depth -= 1
if not depth:
# ')' can't happen after if (foo), since it would be a syntax error.
if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
tokens[i+1][0] in (tokenize.NEWLINE, tokenize.ENDMARKER,
tokenize.COMMENT)):
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == 'not':
if not found_and_or:
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token in ('return', 'yield'):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token not in self._keywords_with_parens:
if not (tokens[i+1][1] == 'in' and found_and_or):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ',':
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
elif token[1] in ('and', 'or'):
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == 'yield':
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == 'for':
return
def _opening_bracket(self, tokens, i):
self._push_token(tokens[i][1], i)
# Special case: ignore slices
if tokens[i][1] == '[' and tokens[i+1][1] == ':':
return
if (i > 0 and (tokens[i-1][0] == tokenize.NAME and
not (keyword.iskeyword(tokens[i-1][1]))
or tokens[i-1][1] in _CLOSING_BRACKETS)):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_IGNORE, _MUST_NOT))
def _closing_bracket(self, tokens, i):
if self._inside_brackets(':'):
self._pop_token()
self._pop_token()
# Special case: ignore slices
if tokens[i-1][1] == ':' and tokens[i][1] == ']':
return
policy_before = _MUST_NOT
if tokens[i][1] in _CLOSING_BRACKETS and tokens[i-1][1] == ',':
if _TRAILING_COMMA in self.config.no_space_check:
policy_before = _IGNORE
self._check_space(tokens, i, (policy_before, _IGNORE))
def _check_equals_spacing(self, tokens, i):
"""Check the spacing of a single equals sign."""
if self._inside_brackets('(') or self._inside_brackets('lambda'):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_MUST, _MUST))
def _open_lambda(self, tokens, i): # pylint:disable=unused-argument
self._push_token('lambda', i)
def _handle_colon(self, tokens, i):
# Special case: ignore slices
if self._inside_brackets('['):
return
if (self._inside_brackets('{') and
_DICT_SEPARATOR in self.config.no_space_check):
policy = (_IGNORE, _IGNORE)
else:
policy = (_MUST_NOT, _MUST)
self._check_space(tokens, i, policy)
if self._inside_brackets('lambda'):
self._pop_token()
elif self._inside_brackets('{'):
self._push_token(':', i)
def _handle_comma(self, tokens, i):
# Only require a following whitespace if this is
# not a hanging comma before a closing bracket.
if tokens[i+1][1] in _CLOSING_BRACKETS:
self._check_space(tokens, i, (_MUST_NOT, _IGNORE))
else:
self._check_space(tokens, i, (_MUST_NOT, _MUST))
if self._inside_brackets(':'):
self._pop_token()
def _check_surrounded_by_space(self, tokens, i):
"""Check that a binary operator is surrounded by exactly one space."""
self._check_space(tokens, i, (_MUST, _MUST))
def _check_space(self, tokens, i, policies):
def _policy_string(policy):
if policy == _MUST:
return 'Exactly one', 'required'
else:
return 'No', 'allowed'
def _name_construct(token):
if tokens[i][1] == ',':
return 'comma'
elif tokens[i][1] == ':':
return ':'
elif tokens[i][1] in '()[]{}':
return 'bracket'
elif tokens[i][1] in ('<', '>', '<=', '>=', '!=', '=='):
return 'comparison'
else:
if self._inside_brackets('('):
return 'keyword argument assignment'
else:
return 'assignment'
good_space = [True, True]
pairs = [(tokens[i-1], tokens[i]), (tokens[i], tokens[i+1])]
for other_idx, (policy, token_pair) in enumerate(zip(policies, pairs)):
if token_pair[other_idx][0] in _EOL or policy == _IGNORE:
continue
distance = _column_distance(*token_pair)
if distance is None:
continue
good_space[other_idx] = (
(policy == _MUST and distance == 1) or
(policy == _MUST_NOT and distance == 0))
warnings = []
if not any(good_space) and policies[0] == policies[1]:
warnings.append((policies[0], 'around'))
else:
for ok, policy, position in zip(good_space, policies, ('before', 'after')):
if not ok:
warnings.append((policy, position))
for policy, position in warnings:
construct = _name_construct(tokens[i])
count, state = _policy_string(policy)
self.add_message('bad-whitespace', line=tokens[i][2][0],
args=(count, state, position, construct,
_underline_token(tokens[i])))
def _inside_brackets(self, left):
return self._bracket_stack[-1] == left
def _handle_old_ne_operator(self, tokens, i):
if tokens[i][1] == '<>':
self.add_message('old-ne-operator', line=tokens[i][2][0])
def _prepare_token_dispatcher(self):
raw = [
(_KEYWORD_TOKENS,
self._check_keyword_parentheses),
(_OPENING_BRACKETS, self._opening_bracket),
(_CLOSING_BRACKETS, self._closing_bracket),
(['='], self._check_equals_spacing),
(_SPACED_OPERATORS, self._check_surrounded_by_space),
([','], self._handle_comma),
([':'], self._handle_colon),
(['lambda'], self._open_lambda),
(['<>'], self._handle_old_ne_operator),
]
dispatch = {}
for tokens, handler in raw:
for token in tokens:
dispatch[token] = handler
return dispatch
def process_tokens(self, tokens):
"""process tokens and search for :
_ non strict indentation (i.e. not always using the <indent> parameter as
indent unit)
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
self._bracket_stack = [None]
indents = [0]
check_equal = False
line_num = 0
self._lines = {}
self._visited_lines = {}
token_handlers = self._prepare_token_dispatcher()
self._current_line = ContinuedLineState(tokens, self.config)
for idx, (tok_type, token, start, _, line) in enumerate(tokens):
if start[0] != line_num:
line_num = start[0]
# A tokenizer oddity: if an indented line contains a multi-line
# docstring, the line member of the INDENT token does not contain
# the full line; therefore we check the next token on the line.
if tok_type == tokenize.INDENT:
self.new_line(TokenWrapper(tokens), idx-1, idx+1)
else:
self.new_line(TokenWrapper(tokens), idx-1, idx)
if tok_type == tokenize.NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = True
self._process_retained_warnings(TokenWrapper(tokens), idx)
self._current_line.next_logical_line()
elif tok_type == tokenize.INDENT:
check_equal = False
self.check_indent_level(token, indents[-1]+1, line_num)
indents.append(indents[-1]+1)
elif tok_type == tokenize.DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = True
if len(indents) > 1:
del indents[-1]
elif tok_type == tokenize.NL:
self._check_continued_indentation(TokenWrapper(tokens), idx+1)
self._current_line.next_physical_line()
elif tok_type != tokenize.COMMENT:
self._current_line.handle_line_start(idx)
# This is the first concrete token following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
if check_equal:
check_equal = False
self.check_indent_level(line, indents[-1], line_num)
if tok_type == tokenize.NUMBER and token.endswith('l'):
self.add_message('lowercase-l-suffix', line=line_num)
try:
handler = token_handlers[token]
except KeyError:
pass
else:
handler(tokens, idx)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
self.add_message('too-many-lines', args=line_num, line=1)
def _process_retained_warnings(self, tokens, current_pos):
single_line_block_stmt = not _last_token_on_line_is(tokens, current_pos, ':')
for indent_pos, state, offsets in self._current_line.retained_warnings:
block_type = offsets[tokens.start_col(indent_pos)]
hints = dict((k, v) for k, v in offsets.iteritems()
if v != block_type)
if single_line_block_stmt and block_type == WITH_BODY:
self._add_continuation_message(state, hints, tokens, indent_pos)
elif not single_line_block_stmt and block_type == SINGLE_LINE:
self._add_continuation_message(state, hints, tokens, indent_pos)
def _check_continued_indentation(self, tokens, next_idx):
# Do not issue any warnings if the next line is empty.
if not self._current_line.has_content or tokens.type(next_idx) == tokenize.NL:
return
state, valid_offsets = self._current_line.get_valid_offsets(next_idx)
# Special handling for hanging comments. If the last line ended with a
# comment and the new line contains only a comment, the line may also be
# indented to the start of the previous comment.
if (tokens.type(next_idx) == tokenize.COMMENT and
tokens.type(next_idx-2) == tokenize.COMMENT):
valid_offsets[tokens.start_col(next_idx-2)] = True
# We can only decide if the indentation of a continued line before opening
# a new block is valid once we know of the body of the block is on the
# same line as the block opener. Since the token processing is single-pass,
# emitting those warnings is delayed until the block opener is processed.
if (state.context_type in (HANGING_BLOCK, CONTINUED_BLOCK)
and tokens.start_col(next_idx) in valid_offsets):
self._current_line.add_block_warning(next_idx, state, valid_offsets)
elif tokens.start_col(next_idx) not in valid_offsets:
self._add_continuation_message(state, valid_offsets, tokens, next_idx)
def _add_continuation_message(self, state, offsets, tokens, position):
readable_type, readable_position = _CONTINUATION_MSG_PARTS[state.context_type]
hint_line = _get_indent_hint_line(offsets, tokens.start_col(position))
self.add_message(
'bad-continuation',
line=tokens.start_line(position),
args=(readable_type, readable_position, tokens.line(position), hint_line))
@check_messages('multiple-statements')
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
if (isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in xrange(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append('')
def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if (isinstance(node, nodes.TryExcept) and
isinstance(node.parent, nodes.TryFinally)):
return
if (isinstance(node.parent, nodes.If) and not node.parent.orelse
and self.config.single_line_if_stmt):
return
self.add_message('multiple-statements', node=node)
self._visited_lines[line] = 2
@check_messages('backtick')
def visit_backquote(self, node):
self.add_message('backtick', node=node)
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
for line in lines.splitlines(True):
if not line.endswith('\n'):
self.add_message('missing-final-newline', line=i)
else:
stripped_line = line.rstrip()
if line[len(stripped_line):] not in ('\n', '\r\n'):
self.add_message('trailing-whitespace', line=i)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
line = line.split('#')[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
i += 1
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string
"""
indent = self.config.indent_string
if indent == '\\t': # \t is not interpreted in the configuration file
indent = '\t'
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ''
while string and string[0] in ' \t':
if string[0] != indent[0]:
if string[0] == '\t':
args = ('tab', 'space')
else:
args = ('space', 'tab')
self.add_message('mixed-indentation', args=args, line=line_num)
return level
suppl += string[0]
string = string[1:]
if level != expected or suppl:
i_type = 'spaces'
if indent[0] == '\t':
i_type = 'tabs'
self.add_message('bad-indentation', line=line_num,
args=(level * unit_size + len(suppl), i_type,
expected * unit_size))
def register(linter):
"""required method to auto register this checker """
linter.register_checker(FormatChecker(linter))
| hkupty/python-mode | pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/format.py | Python | lgpl-3.0 | 38,105 | [
"VisIt"
] | 416cbb27ee5d136b32fee6fc52b6890b1e5d15c0d7c474fd503153b048c78e9f |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
class PRIORITY:
LOWEST = -100
LOWER = -50
LOW = -10
NORMAL = 0
HIGH = 10
HIGHER = 50
HIGHEST = 100
class SORT_ORDER:
FIRST = 0
SECOND = 1
THIRD = 2
FOURTH = 3
FIFTH = 4
LAST = 100
class DBMS:
ACCESS = "Microsoft Access"
DB2 = "IBM DB2"
FIREBIRD = "Firebird"
MAXDB = "SAP MaxDB"
MSSQL = "Microsoft SQL Server"
MYSQL = "MySQL"
ORACLE = "Oracle"
PGSQL = "PostgreSQL"
SQLITE = "SQLite"
SYBASE = "Sybase"
HSQLDB = "HSQLDB"
class DBMS_DIRECTORY_NAME:
ACCESS = "access"
DB2 = "db2"
FIREBIRD = "firebird"
MAXDB = "maxdb"
MSSQL = "mssqlserver"
MYSQL = "mysql"
ORACLE = "oracle"
PGSQL = "postgresql"
SQLITE = "sqlite"
SYBASE = "sybase"
HSQLDB = "hsqldb"
class CUSTOM_LOGGING:
PAYLOAD = 9
TRAFFIC_OUT = 8
TRAFFIC_IN = 7
class OS:
LINUX = "Linux"
WINDOWS = "Windows"
class PLACE:
GET = "GET"
POST = "POST"
URI = "URI"
COOKIE = "Cookie"
USER_AGENT = "User-Agent"
REFERER = "Referer"
HOST = "Host"
CUSTOM_POST = "(custom) POST"
CUSTOM_HEADER = "(custom) HEADER"
class POST_HINT:
SOAP = "SOAP"
JSON = "JSON"
JSON_LIKE = "JSON-like"
MULTIPART = "MULTIPART"
XML = "XML (generic)"
ARRAY_LIKE = "Array-like"
class HTTPMETHOD:
GET = "GET"
POST = "POST"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DELETE"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
PATCH = "PATCH"
class NULLCONNECTION:
HEAD = "HEAD"
RANGE = "Range"
SKIP_READ = "skip-read"
class REFLECTIVE_COUNTER:
MISS = "MISS"
HIT = "HIT"
class CHARSET_TYPE:
BINARY = 1
DIGITS = 2
HEXADECIMAL = 3
ALPHA = 4
ALPHANUM = 5
class HEURISTIC_TEST:
CASTED = 1
NEGATIVE = 2
POSITIVE = 3
class HASH:
MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z'
MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z'
POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z'
MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z'
MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z'
MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128}\Z'
ORACLE = r'(?i)\As:[0-9a-f]{60}\Z'
ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z'
MD5_GENERIC = r'(?i)\A[0-9a-f]{32}\Z'
SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z'
SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z'
SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z'
SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z'
CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z'
WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z'
# Reference: http://www.zytrax.com/tech/web/mobile_ids.html
class MOBILES:
BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+")
GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1")
HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)")
HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")
IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3")
NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19")
NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344")
class PROXY_TYPE:
HTTP = "HTTP"
HTTPS = "HTTPS"
SOCKS4 = "SOCKS4"
SOCKS5 = "SOCKS5"
class REGISTRY_OPERATION:
READ = "read"
ADD = "add"
DELETE = "delete"
class DUMP_FORMAT:
CSV = "CSV"
HTML = "HTML"
SQLITE = "SQLITE"
class HTTP_HEADER:
ACCEPT = "Accept"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_LANGUAGE = "Accept-Language"
AUTHORIZATION = "Authorization"
CACHE_CONTROL = "Cache-Control"
CONNECTION = "Connection"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_LENGTH = "Content-Length"
CONTENT_RANGE = "Content-Range"
CONTENT_TYPE = "Content-Type"
COOKIE = "Cookie"
EXPIRES = "Expires"
HOST = "Host"
IF_MODIFIED_SINCE = "If-Modified-Since"
LAST_MODIFIED = "Last-Modified"
LOCATION = "Location"
PRAGMA = "Pragma"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_CONNECTION = "Proxy-Connection"
RANGE = "Range"
REFERER = "Referer"
SERVER = "Server"
SET_COOKIE = "Set-Cookie"
TRANSFER_ENCODING = "Transfer-Encoding"
URI = "URI"
USER_AGENT = "User-Agent"
VIA = "Via"
X_POWERED_BY = "X-Powered-By"
class EXPECTED:
BOOL = "bool"
INT = "int"
class OPTION_TYPE:
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
STRING = "string"
class HASHDB_KEYS:
DBMS = "DBMS"
DBMS_FORK = "DBMS_FORK"
CHECK_WAF_RESULT = "CHECK_WAF_RESULT"
CONF_TMP_PATH = "CONF_TMP_PATH"
KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS"
KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS"
KB_BRUTE_TABLES = "KB_BRUTE_TABLES"
KB_CHARS = "KB_CHARS"
KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS"
KB_INJECTIONS = "KB_INJECTIONS"
KB_ERROR_CHUNK_LENGTH = "KB_ERROR_CHUNK_LENGTH"
KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE"
OS = "OS"
class REDIRECTION:
YES = "Y"
NO = "N"
class PAYLOAD:
SQLINJECTION = {
1: "boolean-based blind",
2: "error-based",
3: "inline query",
4: "stacked queries",
5: "AND/OR time-based blind",
6: "UNION query",
}
PARAMETER = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
RISK = {
0: "No risk",
1: "Low risk",
2: "Medium risk",
3: "High risk",
}
CLAUSE = {
0: "Always",
1: "WHERE",
2: "GROUP BY",
3: "ORDER BY",
4: "LIMIT",
5: "OFFSET",
6: "TOP",
7: "Table name",
8: "Column name",
}
class METHOD:
COMPARISON = "comparison"
GREP = "grep"
TIME = "time"
UNION = "union"
class TECHNIQUE:
BOOLEAN = 1
ERROR = 2
QUERY = 3
STACKED = 4
TIME = 5
UNION = 6
class WHERE:
ORIGINAL = 1
NEGATIVE = 2
REPLACE = 3
class WIZARD:
BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba")
INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs")
ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll")
class ADJUST_TIME_DELAY:
DISABLE = -1
NO = 0
YES = 1
class WEB_API:
PHP = "php"
ASP = "asp"
ASPX = "aspx"
JSP = "jsp"
class CONTENT_TYPE:
TECHNIQUES = 0
DBMS_FINGERPRINT = 1
BANNER = 2
CURRENT_USER = 3
CURRENT_DB = 4
HOSTNAME = 5
IS_DBA = 6
USERS = 7
PASSWORDS = 8
PRIVILEGES = 9
ROLES = 10
DBS = 11
TABLES = 12
COLUMNS = 13
SCHEMA = 14
COUNT = 15
DUMP_TABLE = 16
SEARCH = 17
SQL_QUERY = 18
COMMON_TABLES = 19
COMMON_COLUMNS = 20
FILE_READ = 21
FILE_WRITE = 22
OS_CMD = 23
REG_READ = 24
PART_RUN_CONTENT_TYPES = {
"checkDbms": CONTENT_TYPE.TECHNIQUES,
"getFingerprint": CONTENT_TYPE.DBMS_FINGERPRINT,
"getBanner": CONTENT_TYPE.BANNER,
"getCurrentUser": CONTENT_TYPE.CURRENT_USER,
"getCurrentDb": CONTENT_TYPE.CURRENT_DB,
"getHostname": CONTENT_TYPE.HOSTNAME,
"isDba": CONTENT_TYPE.IS_DBA,
"getUsers": CONTENT_TYPE.USERS,
"getPasswordHashes": CONTENT_TYPE.PASSWORDS,
"getPrivileges": CONTENT_TYPE.PRIVILEGES,
"getRoles": CONTENT_TYPE.ROLES,
"getDbs": CONTENT_TYPE.DBS,
"getTables": CONTENT_TYPE.TABLES,
"getColumns": CONTENT_TYPE.COLUMNS,
"getSchema": CONTENT_TYPE.SCHEMA,
"getCount": CONTENT_TYPE.COUNT,
"dumpTable": CONTENT_TYPE.DUMP_TABLE,
"search": CONTENT_TYPE.SEARCH,
"sqlQuery": CONTENT_TYPE.SQL_QUERY,
"tableExists": CONTENT_TYPE.COMMON_TABLES,
"columnExists": CONTENT_TYPE.COMMON_COLUMNS,
"readFile": CONTENT_TYPE.FILE_READ,
"writeFile": CONTENT_TYPE.FILE_WRITE,
"osCmd": CONTENT_TYPE.OS_CMD,
"regRead": CONTENT_TYPE.REG_READ
}
class CONTENT_STATUS:
IN_PROGRESS = 0
COMPLETE = 1
class AUTH_TYPE:
BASIC = "basic"
DIGEST = "digest"
NTLM = "ntlm"
PKI = "pki"
class AUTOCOMPLETE_TYPE:
SQL = 0
OS = 1
SQLMAP = 2
class NOTE:
FALSE_POSITIVE_OR_UNEXPLOITABLE = "false positive or unexploitable"
class MKSTEMP_PREFIX:
HASHES = "sqlmaphashes-"
CRAWLER = "sqlmapcrawler-"
IPC = "sqlmapipc-"
TESTING = "sqlmaptesting-"
RESULTS = "sqlmapresults-"
COOKIE_JAR = "sqlmapcookiejar-"
BIG_ARRAY = "sqlmapbigarray-"
| undefinedv/Jingubang | sqlmap/lib/core/enums.py | Python | gpl-3.0 | 9,868 | [
"Galaxy"
] | e0466b33ff2ed434ef67f94afcc19191f1f85b91bd96e6d602663f38e84e84f4 |
from .cdk import CDApproximator
| omimo/xRBM | xrbm/train/__init__.py | Python | mit | 32 | [
"CDK"
] | 132a44e5f921bcd03847631b68b6b33e212a314fc629df4c829f9eb8f49607f7 |
import AlGDock.BindingPMF_plots
from AlGDock.BindingPMF import *
self = AlGDock.BindingPMF_plots.BPMF_plots()
#import numpy as np
#
#for k in range(self._cool_cycle):
# confs = self.confs['cool']['samples'][-1][k]
# for c in range(len(confs)):
# self.universe.setConfiguration(Configuration(self.universe,confs[c]))
# self.universe.normalizeConfiguration()
# self.confs['cool']['samples'][-1][k][c] = np.copy(self.universe.configuration().array)
import itertools
confs = [self.confs['cool']['samples'][-1][k] for k in range(self._cool_cycle)]
confs = np.array([conf[self.molecule.heavy_atoms,:] for conf in itertools.chain.from_iterable(confs)])
from pyRMSD.matrixHandler import MatrixHandler
rmsd_matrix = MatrixHandler().createMatrix(confs,'QCP_SERIAL_CALCULATOR')
# NOSUP_SERIAL_CALCULATOR
#GBSA_energy = [(self.cool_Es[-1][k]['LNAMD_GBSA'][:,-1]-self.cool_Es[-1][k]['LNAMD_Gas'][:,-1]) for k in range(self._cool_cycle)]
#GBSA_energy = np.array(list(itertools.chain.from_iterable(GBSA_energy)))
cum_Nk = np.cumsum([len(self.confs['cool']['samples'][-1][k]) for k in range(self._cool_cycle)])
# # Compute distance matrix with centering
# self._write_traj('cool.dcd',confs,moiety='L')
# import mdtraj as md
# traj = md.load('cool.dcd',top=self._FNs['prmtop']['L'])
# dist_matrix = [mdtraj.rmsd(traj,traj,frame=k,atom_indices=traj.topology.select('type!=H')) for k in range(N)]
# dist_matrix = np.array(dist_matrix)
#class RodriguezLaio:
# def __init__(self, molecule, confs):
#
# def _rmsd_pair(conf1, conf2):
# return np.array([])
# Compute distance matrix without centering
import numpy as np
N = len(confs)
# This does not work in the scipy.cluster.Z() function for some reason
#dist_matrix = [0. if j>=k else np.sqrt(np.sum((confs[j][self.molecule.heavy_atoms,:]-confs[k][self.molecule.heavy_atoms,:])**2)/self.molecule.nhatoms) for j in range(N) for k in range(N)]
#dist_matrix = np.reshape(np.array(dist_matrix), (N,N))
#dist_matrix += np.transpose(dist_matrix)
#flat_dist_matrix = np.array([np.sqrt(np.sum((confs[j]-confs[k])**2)/self.molecule.nhatoms) for j in range(N) for k in range(j+1,N)])
# Hierarchical Clustering
import scipy.cluster
Z = scipy.cluster.hierarchy.complete(rmsd_matrix.get_data())
#
#nclusters = []
#rmv = []
#for dcutoff in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7]:
# assignments = np.array(scipy.cluster.hierarchy.fcluster(Z, dcutoff, criterion='distance'))
# nc = len(set(assignments))
# GBSA_cluster_variances = []
# for n in range(nc):
# GBSA_n = GBSA_energy[assignments==n]
# if len(GBSA_n)>1:
# GBSA_cluster_variances.append(np.var(GBSA_n))
# if len(GBSA_cluster_variances)>0:
# rmv.append(np.sqrt(np.mean(GBSA_cluster_variances)))
# else:
# rmv.append(0.)
# nclusters.append(nc)
assignments = np.array(scipy.cluster.hierarchy.fcluster(Z, 0.2, criterion='distance'))
cum_Nclusters = [len(set(assignments[:cum_Nk[k]])) for k in range(self._cool_cycle)]
# Density-based clustering
# rho = sum(dist_matrix<0.1,0) # Parzen window estimate of density
# k nearest neighbors density estimate
# sdist_matrix = np.sort(dist_matrix)
# k = max(10,int(N/10.))
# rho = 1/sdist_matrix[:,k]
# # Find the minimum distance to point with higher density
# delta = []
# for j in range(N):
# distances = [dist_matrix[j,k] for k in range(N) if rho[k]>rho[j]]
# if len(distances)>0:
# delta.append(min(distances))
# else:
# delta.append(np.max(dist_matrix))
#
# import matplotlib.pyplot as plt
# plt.ion()
# plt.plot(rho,delta,'.')
| luizcieslak/AlGDock | notebook/2015-03-30-Cluster.py | Python | mit | 3,566 | [
"MDTraj"
] | 54927c150fcdad4bfda313ad17227e671790fc51f28829cf8f5373fe417334fc |
# Licensed under an MIT open source license - see LICENSE
'''
Data Reduction Routines for PPV data cubes
'''
import numpy as np
from scipy import ndimage as nd
from operator import itemgetter
from itertools import groupby
from astropy.io import fits
import copy
from scipy.optimize import curve_fit
from astropy.convolution import convolve
class property_arrays(object):
'''
Create property arrays from a data cube
Creates centroid (moment 1), integrated intensity, velocity dispersion (moment 2), total intensity (moment 0)
'''
def __init__(self, cube, clip_level = 3,rms_noise = None, kernel_size=None, save_name=None):
super(property_arrays, self).__init__()
self.cube = cube[0]#cube.data
self.header = cube[1]#cube.header
self.array_shape = (self.cube.shape[1],self.cube.shape[2])
self.save_name = save_name
self.clean_cube = np.ones(self.cube.shape)
self.noise_array = None
self.nan_mask = np.invert(np.isnan(self.cube), dtype=bool)
self.weight_cube = np.ones(self.cube.shape)
for i in range(self.cube.shape[1]):
for j in range(self.cube.shape[2]):
self.weight_cube[:,i,j] = np.arange(1,self.cube.shape[0]+1,1)
self.sigma = None
self.property_dict = {}
if rms_noise != None:
if isinstance(rms_noise, float):
self.noise_type_flag = 1
self.sigma = rms_noise
self.noise_array = np.ones(self.array_shape) * self.sigma
self.noise_mask = np.ones(self.array_shape)
self.clean_cube[self.cube < (clip_level * self.sigma)] = 0.0
self.clean_cube *= np.ma.masked_invalid(self.cube)
else:
self.noise_type_flag = 2
self.clean_cube, self.noise_array, self.sigma = given_noise_cube(self.cube, rms_noise, clip_level)
self.noise_mask = self.noise_array < (clip_level * self.sigma)
else:
if not kernel_size:
raise ValueError("Kernel Size must be given for moment masking.")
self.noise_type_flag = 0
self.clean_cube, self.mask_cube, self.sigma = moment_masking(self.cube, clip_level, kernel_size)
# self.noise_mask = self.noise_array < (clip_level * self.sigma)
self.nan_mask += self.mask_cube
def moment0(self):
moment0_array = np.sum(self.clean_cube * self.nan_mask, axis=0)
# moment0_array *= self.noise_mask
error_array = self.sigma * np.sqrt(np.sum(self.nan_mask * (self.clean_cube>0), axis=0))
# error_array *= self.noise_mask
self.property_dict["moment0"] = moment0_array, error_array
return self
def centroid(self):
centroid_array = np.sum(self.clean_cube * self.nan_mask * self.weight_cube, axis=0) / self.property_dict["moment0"][0]
# centroid_array *= self.noise_mask
first_err_term = self.sigma**2. * np.sqrt(np.sum(self.weight_cube[np.nonzero(self.clean_cube * self.nan_mask)], axis=0)) / self.property_dict["moment0"][0]**2.
second_err_term = self.property_dict["moment0"][1]**2. / self.property_dict["moment0"][0]**2.
error_array = np.sqrt(first_err_term + second_err_term)
# error_array *= self.noise_mask
self.property_dict["centroid"] = centroid_array, error_array
return self
def integrated_intensity(self):
masked_clean = self.clean_cube * self.nan_mask
int_intensity_array = np.ones(self.array_shape)
error_array = np.ones(self.array_shape)
for i in range(self.array_shape[0]):
for j in range(self.array_shape[1]):
z = np.where(masked_clean[:,i,j]>0)
continuous_sections = []
for _, g in groupby(enumerate(z[0]), lambda (i,x): i-x):
continuous_sections.append(map(itemgetter(1), g))
try:
integrating_section = max(continuous_sections, key=len)
int_intensity_array[i,j] = np.sum([masked_clean[k,i,j] for k in integrating_section])
error_array[i,j] = (np.sqrt(len(integrating_section)))**-1. * self.sigma
except ValueError:
int_intensity_array[i,j] = np.NaN
error_array[i,j] = np.NaN
self.property_dict["int_int"] = int_intensity_array, error_array
return self
def linewidth(self):
masked_clean = self.clean_cube * self.nan_mask
weight_clean = self.weight_cube * self.nan_mask
linewidth_array = np.empty(self.array_shape)
error_array = np.empty(self.array_shape)
for i in range(self.array_shape[0]):
for j in range(self.array_shape[1]):
linewidth_array[i,j] = np.sqrt(np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j])**2. * masked_clean[:,i,j]) / \
self.property_dict["moment0"][0][i,j])
first_err_term = (2 * np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j]) * masked_clean[:,i,j]) * self.property_dict["centroid"][1][i,j]**2. +\
self.sigma**2. * np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j])**2.)) / \
np.sum((weight_clean[:,i,j] - self.property_dict["centroid"][0][i,j])**2. * masked_clean[:,i,j])**2.
second_err_term = self.sigma**2. * np.sum(self.nan_mask[:,i,j])**2. / self.property_dict["moment0"][0][i,j]**2.
error_array[i,j] = np.sqrt(first_err_term + second_err_term)
self.property_dict["linewidth"] = linewidth_array, error_array
def pixel_to_physical_units(self):
if np.abs(self.header["CDELT3"])> 1: ## Lazy check to make sure we have units of km/s
vel_pix_division = np.abs(self.header["CDELT3"])/1000.
reference_velocity = self.header["CRVAL3"]/1000.
else:
vel_pix_division = np.abs(self.header["CDELT3"])
reference_velocity = self.header["CRVAL3"]
## Centroid error needs to be recalculated when changing to physical units
physical_weights = (np.sum(self.weight_cube, axis=0) * vel_pix_division) + \
reference_velocity - (vel_pix_division * self.header["CRPIX3"])
first_err_term = self.sigma**2. * np.sqrt(np.sum(physical_weights * (self.clean_cube>0) * self.nan_mask, axis=0)) / self.property_dict["moment0"][0]**2.
second_err_term = self.property_dict["moment0"][1]**2. / self.property_dict["moment0"][0]**2.
cent_error_array = np.sqrt(first_err_term + second_err_term)
# cent_error_array *= self.noise_mask
self.property_dict["centroid"] = (self.property_dict["centroid"][0] * vel_pix_division) + \
reference_velocity - (vel_pix_division * self.header["CRPIX3"]), \
cent_error_array
self.property_dict["int_int"] = (self.property_dict["int_int"][0] * vel_pix_division, \
self.property_dict["int_int"][1] * vel_pix_division)
self.property_dict["linewidth"] = (self.property_dict["linewidth"][0] * vel_pix_division, \
self.property_dict["linewidth"][1] * vel_pix_division)
return self
def save_fits(self, save_path=None):
new_hdr = copy.deepcopy(self.header)
del new_hdr["NAXIS3"],new_hdr["CRVAL3"],new_hdr["CRPIX3"],new_hdr['CDELT3'], new_hdr['CTYPE3']
new_hdr.update("NAXIS",2)
new_err_hdr = copy.deepcopy(new_hdr)
if self.save_name is None:
self.save_name = self.header["OBJECT"]
moment0_specs = {'comment': "= Image of the Zeroth Moment", 'BUNIT': 'K', 'name': 'moment0'}
centroid_specs = {'comment': "= Image of the First Moment", 'BUNIT': 'km/s', 'name': 'centroid'}
linewidth_specs = {'comment': "= Image of the Second Moment", 'BUNIT': 'km/s', 'name': 'linewidth'}
int_int_specs = {'comment': "= Image of the Integrated Intensity", 'BUNIT': 'K km/s', 'name': 'integrated_intensity'}
moment0_error_specs = {'comment': "= Image of the Zeroth Moment Error", 'BUNIT': 'K', 'name': 'moment0'}
centroid_error_specs = {'comment': "= Image of the First Moment Error", 'BUNIT': 'km/s', 'name': 'centroid'}
linewidth_error_specs = {'comment': "= Image of the Second Moment Error", 'BUNIT': 'km/s', 'name': 'linewidth'}
int_int_error_specs = {'comment': "= Image of the Integrated Intensity Error", 'BUNIT': 'K km/s', 'name': 'integrated_intensity'}
for prop_array in self.property_dict.keys():
if prop_array=='moment0':
specs = moment0_specs
specs_error = moment0_error_specs
elif prop_array=='centroid':
specs = centroid_specs
specs_error = centroid_error_specs
elif prop_array=='int_int':
specs = int_int_specs
specs_error = int_int_error_specs
elif prop_array=='linewidth':
specs = linewidth_specs
specs_error = linewidth_error_specs
if save_path!=None:
filename = "".join([save_path, self.save_name, ".", specs["name"], ".fits"])
filename_err = "".join([save_path, self.save_name, ".", specs["name"], "_error.fits"])
else:
filename = "".join([self.save_name, ".", specs["name"], ".fits"])
filename_err = "".join([self.save_name, ".", specs["name"], "_error.fits"])
## Update header for array and the error array
new_hdr.update("BUNIT",value=specs['BUNIT'],comment='')
new_hdr.add_comment(specs["comment"])
new_err_hdr.update("BUNIT",value=specs['BUNIT'],comment='')
new_err_hdr.add_comment(specs["comment"])
fits.writeto(filename,self.property_dict[prop_array][0],new_hdr)
fits.writeto(filename_err,self.property_dict[prop_array][1],new_hdr)
## Reset the comments
del new_hdr["COMMENT"]
del new_err_hdr["COMMENT"]
return self
def return_all(self, save=True, physical_units=True, continuous_boundary=True, save_path=None):
self.moment0()
self.centroid()
self.linewidth()
self.integrated_intensity()
if physical_units:
self.pixel_to_physical_units()
if continuous_boundary:
for prop_array in self.property_dict.keys():
pass
if save:
self.save_fits(save_path = None)
return self
def given_noise_cube(data_cube, noise_cube, clip_level):
if data_cube.shape!=noise_cube.shape:
raise ValueError("Error array has different dimensions.")
assert clip_level is int
noise_cube[np.where(noise_cube==0)] = np.NaN
clipped_cube = (data_cube/noise_cube) >= clip_level
inv_cube = np.invert(clip_cube,dtype=bool)
noise_array = np.max(inv_cube*data_cube,axis=0)
sigma = np.mean(noise_array)
return clipped_cube * data_cube, noise_array, sigma
def __sigma__(data_cube, clip_level):
flat_cube = np.ravel(data_cube[~np.isnan(data_cube)])
hist, bins = np.histogram(flat_cube, bins = int(len(flat_cube)/100.))
centres = (bins[:-1]+bins[1:])/2
def gaussian(x,*p):
# Peak Height is p[0],Sigma is p[1],Mu is p[2]
return p[0]*np.exp(-1*np.power(x-p[2],2) / (2*np.power(p[1],2)))
p0 = (np.max(hist), 1.0, centres[np.argmax(hist)])
opts, cov = curve_fit(gaussian, centres, hist, p0, maxfev=(100*len(hist))+1)
if opts[1] == p0[1]:
print "Fitting Failed. Sigma is %s" % (opts[1])
return opts[1]
def moment_masking(data_cube, clip_level, kernel_size):
sigma_orig = __sigma__(data_cube, clip_level)
if np.isnan(data_cube).any():
print "Using astropy to convolve over nans"
kernel = gauss_kern(kernel_size, ysize=kernel_size, zsize=kernel_size)
smooth_cube = convolve(data_cube, kernel, normalize_kernel=True)
else:
smooth_cube = nd.gaussian_filter(data_cube, kernel_size, mode="mirror")
sigma_smooth = __sigma__(smooth_cube, clip_level)
mask_cube = smooth_cube > (clip_level * sigma_smooth)
dilate_struct = nd.generate_binary_structure(3,3)
mask_cube = nd.binary_dilation(mask_cube, structure=dilate_struct)
noise_cube = np.invert(mask_cube, dtype=bool) * data_cube
# noise_array = np.max(noise_cube, axis=0)
return (mask_cube * data_cube), mask_cube, sigma_orig
def pad_wrapper(array, boundary_size=5):
xshape, yshape = array.shape
continuous_array = np.zeros((xshape - 6*boundary_size, yshape - 6*boundary_size))
reduced_array = array[boundary_size : xshape - boundary_size, boundary_size : yshape - boundary_size]
pass
def gauss_kern(size, ysize=None, zsize=None):
""" Returns a normalized 3D gauss kernel array for convolutions """
size = int(size)
if not ysize:
ysize = size
else:
ysize = int(ysize)
if not zsize:
zsize = size
else:
zsize = int(zsize)
x, y, z = np.mgrid[-size:size+1, -ysize:ysize+1, -zsize:zsize+1]
g = np.exp(-(x**2/float(size)+y**2/float(ysize)+z**2/float(zsize)))
return g / g.sum()
if __name__=='__main__':
pass
# import sys
# fib(int(sys.argv[1]))
# from astropy.io.fits import getdata
# cube, header = getdata("filename",header=True)
# shape = cube.shape
# cube[:,shape[0],:] = cube[:,0,:]
# cube[:,:,shape[1]] = cube[:,:,0]
# data = property_arrays((cube,header), rms_noise=0.001, save_name="filename")
# data.return_all() | keflavich/TurbuStat | turbustat/data_reduction/data_reduc.py | Python | mit | 14,244 | [
"Gaussian"
] | 0b9cc0404555de80e5eb186abba824e06740fd2dd4e23544885026a79d30a685 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-05 05:20
from __future__ import unicode_literals
import core.models.custom_fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pmm', '0002_auto_20180522_0516'),
]
operations = [
migrations.AlterModelOptions(
name='civilstatus',
options={'permissions': (('read_civilstatus', 'Can read Civil Status'),), 'verbose_name': 'Civil Status', 'verbose_name_plural': 'Civil Status'},
),
migrations.AlterModelOptions(
name='discipleship',
options={'permissions': (('read_discipleship', 'Can read Discipleship'),), 'verbose_name': 'Discipleship', 'verbose_name_plural': 'Discipleships'},
),
migrations.AlterModelOptions(
name='family',
options={'permissions': (('read_family', 'Can read Family'),), 'verbose_name': 'Family', 'verbose_name_plural': 'Relatives'},
),
migrations.AlterModelOptions(
name='familyrelationship',
options={'permissions': (('read_familyrelationship', 'Can read Family Relationship'),), 'verbose_name': 'Family Relationship', 'verbose_name_plural': 'Family Relationships'},
),
migrations.AlterModelOptions(
name='integrationlevel',
options={'permissions': (('read_integrationlevel', 'Can read Integration Level'),), 'verbose_name': 'Integration Level', 'verbose_name_plural': 'Integration Levels'},
),
migrations.AlterModelOptions(
name='lesson',
options={'ordering': ['lesson_number'], 'permissions': (('read_lesson', 'Can read Discipleship Lesson'),), 'verbose_name': 'Discipleship Lesson', 'verbose_name_plural': 'Discipleship Lessons'},
),
migrations.AlterModelOptions(
name='member',
options={'permissions': (('read_member', 'Can read Member'),), 'verbose_name': 'Member', 'verbose_name_plural': 'Members'},
),
migrations.AlterModelOptions(
name='occupation',
options={'permissions': (('read_occupation', 'Can read Occupation'),), 'verbose_name': 'Occupation', 'verbose_name_plural': 'Occupations'},
),
migrations.AlterModelOptions(
name='person',
options={'permissions': (('read_person', 'Can read Person'),), 'verbose_name': 'Person', 'verbose_name_plural': 'People'},
),
migrations.AlterModelOptions(
name='persondiscipleship',
options={'permissions': (('read_persondiscipleship', 'Can read Person in Discipleship'),), 'verbose_name': 'Person in Discipleship', 'verbose_name_plural': 'People in Discipleship'},
),
migrations.AlterField(
model_name='family',
name='family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relatives', to='pmm.Person', verbose_name='Relative'),
),
migrations.AlterField(
model_name='family',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relative', to='pmm.Person', verbose_name='Person'),
),
migrations.AlterField(
model_name='family',
name='relationship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pmm.FamilyRelationship', verbose_name='Relationship'),
),
migrations.AlterField(
model_name='lesson',
name='discipleship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lessons', to='pmm.Discipleship', verbose_name='Discipleship'),
),
migrations.AlterField(
model_name='lesson',
name='lesson_number',
field=models.SmallIntegerField(verbose_name='Lesson Number'),
),
migrations.AlterField(
model_name='member',
name='person_id',
field=models.OneToOneField(db_column='person_id', on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pmm.Person', verbose_name='Person reference'),
),
migrations.AlterField(
model_name='person',
name='baptized',
field=models.BooleanField(default=False, verbose_name='Baptized'),
),
migrations.AlterField(
model_name='person',
name='birthday',
field=models.DateField(blank=True, null=True, verbose_name='Birthday'),
),
migrations.AlterField(
model_name='person',
name='civil_status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pmm.CivilStatus', verbose_name='Civil Status'),
),
migrations.AlterField(
model_name='person',
name='discipleships',
field=models.ManyToManyField(through='pmm.PersonDiscipleship', to='pmm.Discipleship', verbose_name='Discipleships'),
),
migrations.AlterField(
model_name='person',
name='family',
field=models.ManyToManyField(through='pmm.Family', to='pmm.Person', verbose_name='Relatives'),
),
migrations.AlterField(
model_name='person',
name='first_name',
field=models.CharField(max_length=50, verbose_name='First Name'),
),
migrations.AlterField(
model_name='person',
name='gender',
field=core.models.custom_fields.FixedCharField(choices=[('M', 'Man'), ('W', 'Woman')], default='M', max_length=1, verbose_name='Gender'),
),
migrations.AlterField(
model_name='person',
name='integration_level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pmm.IntegrationLevel', verbose_name='Integration Level'),
),
migrations.AlterField(
model_name='person',
name='invited_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invites', to='pmm.Person', verbose_name='Invited By'),
),
migrations.AlterField(
model_name='person',
name='last_name',
field=models.CharField(max_length=50, verbose_name='Last Name'),
),
migrations.AlterField(
model_name='person',
name='last_visit',
field=models.DateField(blank=True, null=True, verbose_name='Last Visit'),
),
migrations.AlterField(
model_name='person',
name='new_birthday',
field=models.DateField(blank=True, null=True, verbose_name='New Birthday'),
),
migrations.AlterField(
model_name='person',
name='occupation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pmm.Occupation', verbose_name='Occupation'),
),
migrations.AlterField(
model_name='persondiscipleship',
name='disciple',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_disciples', to='pmm.Person', verbose_name='Disciple'),
),
migrations.AlterField(
model_name='persondiscipleship',
name='discipleship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_discipleships', to='pmm.Discipleship', verbose_name='Discipleship'),
),
migrations.AlterField(
model_name='persondiscipleship',
name='end_date',
field=models.DateField(null=True, verbose_name='Discipleship End Date'),
),
migrations.AlterField(
model_name='persondiscipleship',
name='last_lesson',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pmm.Lesson', verbose_name='Last Lesson'),
),
migrations.AlterField(
model_name='persondiscipleship',
name='start_date',
field=models.DateField(null=True, verbose_name='Discipleship Start Date'),
),
migrations.AlterField(
model_name='persondiscipleship',
name='teacher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_teachers', to='pmm.Person', verbose_name='Teacher'),
),
]
| sauli6692/ibc-server | pmm/migrations/0003_auto_20180605_0520.py | Python | mit | 8,709 | [
"VisIt"
] | 8cdb40455292d0fe28ffa3a04382c1d8638d798de9431f5a64166bc7d4caad84 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__all__ = ['GetSubnetV6']
def GetSubnetV6() -> str:
# not implemented
uRet:str = u'ff0X::1'
return uRet | thica/ORCA-Remote | src/ORCA/utils/Platform/generic/generic_GetSubnetV6.py | Python | gpl-3.0 | 986 | [
"ORCA"
] | fce486c20d9416cb0aa9d953464fcc7b7ef33fca3844e0536173db6dcfac3da7 |
import numpy
#from numpy import exp,pi
#from scipy.special import gamma
#import copy
#import matplotlib,pylab as plt
#from scipy import interpolate
#=========================================================================
"""
NAME
scalingrelations
PURPOSE
Scaling relations between various galaxy properties.
COMMENTS
FUNCTIONS
General:
logerr(l,m,s):
Concentration-mass:
MCrelation(M200,scatter=False,h=0.75):
M*-Mh relation:
binMS(cat=None):
Mstar_to_M200(M_Star,redshift,Behroozi=True):
BUGS
AUTHORS
This file is part of the Pangloss project, distributed under the
GPL v2, by Tom Collett (IoA) and Phil Marshall (Oxford).
Please cite: Collett et al 2013, http://arxiv.org/abs/1303.6564
HISTORY
2013-03-25 Collett & Marshall (Oxford)
"""
#=========================================================================
# Lognormal random deviate:
def logerr(l,m,s):
c=10**numpy.random.normal(m,s)
return c
#--------------------------------------------------------------
# Concentration - halo mass:
def MCrelation(M200,scatter=False,h=0.75):
if scatter==True:
M200/=h
r1=numpy.random.normal(0,0.015,len(M200))
r2=numpy.random.normal(0,0.005,len(M200))
logc_maccio=1.020+r1-(0.109+r2)*(numpy.log10(M200)-12)
else:
logc_maccio=1.020-(0.109)*(numpy.log10(M200)-12)
c_maccio=10**logc_maccio
return c_maccio
# Neto stuff
#
# if MCerror==False:
# c_200 = 4.67*(M200/(10**14))**-0.11 #Neto et al. equation 5
# if MCerror==True:
# c_200=4.67*(M200/(10**14))**-0.11
# logc_200=numpy.log10(c_200)
# lM200 = numpy.log10(M200)
# c_new = c_200*10**(numpy.random.normal(1,0.2,len(M200)))/10**(1+(0.2**2)/2)
# #print numpy.max(c_new),numpy.min(c_new)
#
# return c_new
#
#
# for i in range(len(M200)): #best fit scatter parameters of neto et al (double log normal)
# if lM200[i]<11.875:
# f=0.205
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.683,0.147) * (lM200[i]/11.875)**-0.11
# else:
# c_200[i]=logerr(logc_200[i],0.920,0.106) * (lM200[i]/11.875)**-0.11
# if lM200[i]<12.125:
# f=0.205
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.683,0.147)
# else:
# c_200[i]=logerr(logc_200[i],0.920,0.106)
# elif lM200[i]<12.375:
# f=0.171
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.658,0.150)
# else:
# c_200[i]=logerr(logc_200[i],0.903,0.108)
# elif lM200[i]<12.625:
# f=0.199
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.646,0.139)
# else:
# c_200[i]=logerr(logc_200[i],0.881,0.099)
# elif lM200[i]<12.875:
# f=0.229
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.605,0.158)
# else:
# c_200[i]=logerr(logc_200[i],0.838,0.101)
# elif lM200[i]<13.125:
# f=0.263
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.603,0.136)
# else:
# c_200[i]=logerr(logc_200[i],0.810,0.100)
# elif lM200[i]<13.375:
# f=0.253
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.586,0.140)
# else:
# c_200[i]=logerr(logc_200[i],0.793,0.099)
# elif lM200[i]<13.625:
# f=0.275
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.566,0.142)
# else:
# c_200[i]=logerr(logc_200[i],0.763,0.095)
# elif lM200[i]<13.875:
# f=0.318
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.543,0.140)
# else:
# c_200[i]=logerr(logc_200[i],0.744,0.094)
# elif lM200[i]<14.125:
# f=0.361
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.531,0.131)
# else:
# c_200[i]=logerr(logc_200[i],0.716,0.088)
# elif lM200[i]<14.375:
# f=0.383
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.510,0.121)
# else:
# c_200[i]=logerr(logc_200[i],0.689,0.095)
# elif lM200[i]<14.625:
# f=0.370
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.490,0.133)
# else:
# c_200[i]= logerr(logc_200[i],0.670,0.094)
# elif lM200[i]<14.875:
# f=0.484
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.519,0.121)
# else:
# c_200[i]=logerr(logc_200[i],0.635,0.091)
# elif lM200[i]<15.125:
# f=0.578
# if numpy.random.rand()<f:
# c_200[i]=logerr(logc_200[i],0.493,0.094)
# else:
# c_200[i]=logerr(logc_200[i],0.661,0.061)
# return c_200
#----------------------------------------------------------
def binMS(cat=None):
#binning up the MS catalogue and using that relation:
if cat == None:
d1= "../../data/GGL_los_8_0_0_1_1_N_4096_ang_4_STARS_SA_galaxies_ANALYTIC_SA_galaxies_on_plane_27_to_63.images.txt"
cat=[atpy.Table(d1, type='ascii')]
for i in range(len(cat)):
cat_j=copy.copy(cat[i])
cat_j.keep_columns(['M_Subhalo[M_sol/h]','M_Stellar[M_sol/h]','z_spec'])
if i == 0:
c=copy.copy(cat_j)
else:
c.append(cat_j)
Mhalo=c['M_Subhalo[M_sol/h]']
Mstars=c['M_Stellar[M_sol/h]']
LH=numpy.log10(Mhalo)
LS=numpy.log10(Mstars)
z=c.z_spec
massbin,delta=numpy.linspace(7,12,20,retstep=True)
zbin=[0,99]
MB=numpy.digitize(LH,massbin)
zB=numpy.digitize(z,zbin)
plt.scatter(LH,LS-LH,s=0.2,c='k',edgecolor='')
plt.xlabel('log(M_Halo/M$_\odot$)')
plt.ylabel('log(M_Stellar/M_Halo)')
plt.xlim([10,14])
plt.ylim([-3,0.5])
plt.savefig("starformationefficiency.png")
plt.show()
mean=numpy.zeros((len(massbin),len(zbin)))
for i in range(len(massbin)):
for j in range(len(zbin)):
M=c.where(LH>massbin[i]-delta/2. &\
LH<massbin[i]+delta/2.)
mean[i,j]=numpy.mean(numpy.log10(M['M_Subhalo[M_sol/h]']))
return None
#--------------------------------------------------------------
def Mstar_to_M200(M_Star,redshift,Behroozi=True):
if Behroozi==True:
#Following Behroozi et al. 2010.
M_200=numpy.zeros(len(M_Star))
#parameters:
for i in range(len(M_Star)):
z=redshift[i]
if z<0.9:
Mstar00 = 10.72
Mstar0a = 0.55
Mstar0aa=0.0
M_10 = 12.35
M_1a = 0.28
beta0 = 0.44
betaa = 0.18
delta0 = 0.57
deltaa = 0.17
gamma0 = 1.56
gammaa = 2.51
else:
Mstar00 = 11.09
Mstar0a = 0.56
Mstar0aa= 6.99
M_10 = 12.27
M_1a = -0.84
beta0 = 0.65
betaa = 0.31
delta0 = 0.56
deltaa = -0.12
gamma0 = 1.12
gammaa = -0.53
#scaled parameters:
a=1./(1.+z)
M_1=10**(M_10+M_1a*(a-1))
beta=beta0+betaa*(a-1)
Mstar0=10**(Mstar00+Mstar0a*(a-1)+Mstar0aa*(a-0.5)**2)
delta=delta0+deltaa*(a-1)
gamma=gamma0+gammaa*(a-1)
#reltationship ****NO SCATTER****
M_200[i] =10.0**(numpy.log10(M_1)+beta*numpy.log10(M_Star[i]/Mstar0)+((M_Star[i]/Mstar0)**delta)/(1.+(M_Star[i]/Mstar0)**-gamma)-0.5)
return M_200
#=========================================================================
| drphilmarshall/Pangloss | pangloss/scalingrelations.py | Python | gpl-2.0 | 8,506 | [
"Galaxy"
] | a5b1e01113c695260c7fb3797dde05f8ecd0fe6117c3220e77f779b2f7c9a0c6 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import uuid
import logging
import mooseutils
LOG = logging.getLogger(__name__)
class Page(object):
"""
Base class for input content that defines the methods called by the translator.
This classes uses properties to minimize modifications after construction.
"""
def __init__(self, fullname, **kwargs):
self.base = kwargs.pop('base', None) # set by Translator.init() or addPage()
self.source = kwargs.pop('source') # supplied source file/directory
self.external = kwargs.pop('external', False) # set by get_content.py used by appsyntax.py
self.translator = kwargs.pop('translator', None) # set by Translator.init() or addPage()
self.attributes = kwargs
self._fullname = fullname # local path of the node
self._name = fullname.split('/')[-1] # folder/file name
self.__unique_id = uuid.uuid4() # a unique identifier
@property
def uid(self):
"""Return the unique ID for this page."""
return self.__unique_id
@property
def name(self):
"""Return the name of the page (i.e., the directory or filename)."""
return self._name
@property
def local(self):
"""Returns the local directory/filename."""
return self._fullname
@property
def destination(self):
"""Returns the translator destination location."""
return os.path.join(self.base, self.local)
@property
def depth(self):
"""Returns the local folder depth"""
return self.local.strip(os.sep).count(os.sep)
def get(self, *args):
"""Return attribute by name as defined by key/value in init."""
return self.attributes.get(*args)
def __setitem__(self, key, value):
"""Set attribute with []"""
self.attributes[key] = value
def __getitem__(self, key):
"""Get attribute with []"""
return self.attributes[key]
def update(self, *args, **kwargs):
self.attributes.update(*args, **kwargs)
def relativeSource(self, other):
"""Location of this page related to the other page."""
return os.path.relpath(self.local, os.path.dirname(other.local))
def relativeDestination(self, other):
"""
Location of this page related to the other page.
Inputs:
other[LocationNodeBase]: The page that this page is relative too.
"""
return os.path.relpath(self.destination, os.path.dirname(other.destination))
def __str__(self):
"""Define the screen output."""
return '{}: {}, {}'.format(mooseutils.colorText(self.__class__.__name__, self.COLOR),
self.local, self.source)
class Text(Page):
"""Text only Page node for unit testing."""
COLOR = 'GREEN'
def __init__(self, **kwargs):
self.content = kwargs.pop('content', '')
super(Text, self).__init__('_text_', source='_text_', **kwargs)
class Directory(Page):
"""
Directory nodes.
Warning: Try not to do anything special here and avoid external modification to these objects as
this could create problems if there are multiple translators.
"""
COLOR = 'CYAN'
class File(Page):
"""
File nodes.
General files that need to be copied to the output directory.
"""
COLOR = 'MAGENTA'
class Source(File):
"""
Node for content that is being converted (e.g., Markdown files).
"""
COLOR = 'YELLOW'
def __init__(self, *args, **kwargs):
self.output_extension = kwargs.pop('output_extension', None)
super(Source, self).__init__(*args, **kwargs)
@property
def destination(self):
"""The content destination (override)."""
_, ext = os.path.splitext(self.source)
return super(Source, self).destination.replace(ext, self.output_extension)
| harterj/moose | python/MooseDocs/tree/pages.py | Python | lgpl-2.1 | 4,188 | [
"MOOSE"
] | bbf9067ec46f2efd72b25f6df1bbcff06d963a53ebe0151afdc99540dbeaa847 |
import cPickle
import gzip
import os, sys, errno
import time
import math
import subprocess
import socket # only for socket.getfqdn()
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
#import gnumpy as gnp
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
from frontend.label_modifier import HTSLabelModification
from frontend.merge_features import MergeFeat
#from frontend.mlpg_fast import MLParameterGenerationFast
#from frontend.mlpg_fast_layer import MLParameterGenerationFastLayer
import configuration
from models.deep_rnn import DeepRecurrentNetwork
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
from io_funcs.binary_io import BinaryIOCollection
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import StringIO
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
plotlogger = logging.getLogger("plotting")
# reference activation weights in layers
W = list(); layer_name = list()
for i in xrange(len(dnn.params)):
aa = dnn.params[i].get_value(borrow=True).T
print aa.shape, aa.size
if aa.size > aa.shape[0]:
W.append(aa)
layer_name.append(dnn.params[i].name)
## plot activation weights including input and output
layer_num = len(W)
for i_layer in xrange(layer_num):
fig_name = 'Activation weights W' + str(i_layer) + '_' + layer_name[i_layer]
fig_title = 'Activation weights of W' + str(i_layer)
xlabel = 'Neuron index of hidden layer ' + str(i_layer)
ylabel = 'Neuron index of hidden layer ' + str(i_layer+1)
if i_layer == 0:
xlabel = 'Input feature index'
if i_layer == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, W[i_layer])
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def load_covariance(var_file_dict, out_dimension_dict):
var = {}
io_funcs = BinaryIOCollection()
for feature_name in var_file_dict.keys():
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
var_values = numpy.reshape(var_values, (out_dimension_dict[feature_name], 1))
var[feature_name] = var_values
return var
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False, var_dict=None,
cmp_mean_vector = None, cmp_std_vector = None, init_dnn_model_file = None):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layer_size = hyper_params['hidden_layer_size']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
model_type = hyper_params['model_type']
hidden_layer_type = hyper_params['hidden_layer_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
sequential_training = hyper_params['sequential_training']
dropout_rate = hyper_params['dropout_rate']
# sequential_training = True
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = False)
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
train_set_x, train_set_y = shared_train_set_xy
shared_valid_set_xy, valid_set_x, valid_set_y = valid_data_reader.load_one_partition() #validation data is still read block by block
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
# pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DeepRecurrentNetwork(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs,
L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type, dropout_rate = dropout_rate)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y)) #, batch_size=batch_size
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.time()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
# finetune_lr = 0.000125
previous_finetune_lr = finetune_lr
print finetune_lr
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.time()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
# train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
# train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
# if sequential training, the batch size will be the number of frames in an utterance
if sequential_training == True:
batch_size = temp_train_set_x.shape[0]
n_train_batches = temp_train_set_x.shape[0] / batch_size
for index in xrange(n_train_batches):
## send a batch to the shared variable, rather than pass the batch size and batch index to the finetune function
train_set_x.set_value(numpy.asarray(temp_train_set_x[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
this_train_error = train_fn(current_finetune_lr, current_momentum)
train_error.append(this_train_error)
train_data_reader.reset()
logger.debug('calculating validation loss')
validation_losses = []
while (not valid_data_reader.is_finish()):
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition()
valid_set_x.set_value(numpy.asarray(temp_valid_set_x, dtype=theano.config.floatX), borrow=True)
valid_set_y.set_value(numpy.asarray(temp_valid_set_y, dtype=theano.config.floatX), borrow=True)
this_valid_loss = valid_fn()
validation_losses.append(this_valid_loss)
valid_data_reader.reset()
this_validation_loss = numpy.mean(validation_losses)
this_train_valid_loss = numpy.mean(numpy.asarray(train_error))
sub_end_time = time.time()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
if epoch > 5:
cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
# logger.debug('validation loss decreased, so saving model')
if this_validation_loss >= previous_loss:
logger.debug('validation loss increased')
# dbn = best_dnn_model
early_stop += 1
if epoch > 15 and early_stop > early_stop_epoch:
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.time()
# cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in xrange(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def dnn_generation_lstm(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
visualize_dnn(dnn_model)
file_number = len(valid_file_list)
for i in xrange(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction_lstm(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in xrange(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layer_size = cfg.hyper_params['hidden_layer_size']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in cfg.in_dir_dict.keys():
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
add_feat_dim = sum(cfg.additional_features.values())
lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+str(label_normaliser.dimension))
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
dur_file_list = prepare_file_path_list(file_id_list, cfg.in_dur_dir, cfg.dur_ext)
lf0_file_list = prepare_file_path_list(file_id_list, cfg.in_lf0_dir, cfg.lf0_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s_%d.dat' %(cfg.label_style, lab_dim)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.GenTestList:
try:
test_id_list = read_file_list(cfg.test_id_scp)
logger.debug('Loaded file id list from %s' % cfg.test_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.test_id_scp)
raise
in_label_align_file_list = prepare_file_path_list(test_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(test_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(test_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(test_id_list, nn_label_norm_dir, cfg.lab_ext)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list, label_type=cfg.label_type)
if cfg.additional_features:
out_feat_dir = os.path.join(data_dir, 'binary_label_'+suffix)
out_feat_file_list = prepare_file_path_list(file_id_list, out_feat_dir, cfg.lab_ext)
in_dim = label_normaliser.dimension
for new_feature, new_feature_dim in cfg.additional_features.iteritems():
new_feat_dir = os.path.join(data_dir, new_feature)
new_feat_file_list = prepare_file_path_list(file_id_list, new_feat_dir, '.'+new_feature)
merger = MergeFeat(lab_dim = in_dim, feat_dim = new_feature_dim)
merger.merge_data(binary_label_file_list, new_feat_file_list, out_feat_file_list)
in_dim += new_feature_dim
binary_label_file_list = out_feat_file_list
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
if cfg.GenTestList:
min_max_normaliser.load_min_max_values(label_norm_file)
else:
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
### enforce silence such that the normalization runs without removing silence: only for final synthesis
if cfg.GenTestList and cfg.enforce_silence:
min_max_normaliser.normalise_data(binary_label_file_list, nn_label_norm_file_list)
else:
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.iteritems():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in xrange(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.iteritems():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.itervalues():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None and not cfg.GenTestList:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output duration data
if cfg.MAKEDUR:
logger.info('creating duration (output) features')
label_type = cfg.label_type
feature_type = cfg.dur_feature_type
label_normaliser.prepare_dur_data(in_label_align_file_list, dur_file_list, label_type, feature_type)
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = cfg.delta_win #[-0.5, 0.0, 0.5]
acc_win = cfg.acc_win #[1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
if 'dur' in cfg.in_dir_dict.keys() and cfg.AcousticModel:
acoustic_worker.make_equal_frames(dur_file_list, lf0_file_list, cfg.in_dimension_dict)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim,
binary_label_file_list, lab_dim, silence_feature)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
in_label_align_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number]) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in cfg.out_dimension_dict.keys():
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number])
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
feature_index = 0
for feature_name in cfg.out_dimension_dict.keys():
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_var_vector = feature_std_vector**2
feature_var_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
add_feat_dim = sum(cfg.additional_features.values())
lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layer_size))
for hid_size in hidden_layer_size:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.%f.rnn.model' \
%(model_dir, cfg.combined_model_name, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number, cfg.hyper_params['learning_rate'])
### DNN model training
if cfg.TRAINDNN:
var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict)
logger.info('training DNN')
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_mean_vector = cmp_min_max[0, ]
cmp_std_vector = cmp_min_max[1, ]
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict,
cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
if cfg.GENBNFEA:
'''
Please only tune on this step when you want to generate bottleneck features from DNN
'''
temp_dir_name = '%s_%s_%d_%d_%d_%d_%s_hidden' \
%(cfg.model_type, cfg.combined_feature_name, \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), combined_model_arch)
gen_dir = os.path.join(gen_dir, temp_dir_name)
bottleneck_size = min(hidden_layers_sizes)
bottleneck_index = 0
for i in xrange(len(hidden_layers_sizes)):
if hidden_layers_sizes(i) == bottleneck_size:
bottleneck_index = i
logger.info('generating bottleneck features from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index)
### generate parameters from DNN
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%d_%d' \
%(cfg.combined_model_name, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layer_size), hidden_layer_size[0], hidden_layer_size[-1])
gen_dir = os.path.join(gen_dir, temp_dir_name)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.GenTestList:
gen_file_id_list = test_id_list
test_x_file_list = nn_label_norm_file_list
### comment the below line if you don't want the files in a separate folder
gen_dir = cfg.test_synth_dir
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if cfg.AcousticModel:
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features, enforce_silence = cfg.enforce_silence)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG, cfg=cfg)
if cfg.DurationModel:
### Perform duration normalization(min. state dur set to 1) ###
gen_dur_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.dur_ext)
gen_label_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.lab_ext)
in_gen_label_align_file_list = prepare_file_path_list(gen_file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
label_modifier = HTSLabelModification(silence_pattern = cfg.silence_pattern, label_type = cfg.label_type)
label_modifier.modify_duration_labels(in_gen_label_align_file_list, gen_dur_list, gen_label_list)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list, cfg) # reference copy synthesis speech
### setting back to original conditions before calculating objective scores ###
if cfg.GenTestList:
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
### evaluation: RMSE and CORR for duration
if cfg.CALMCD and cfg.DurationModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_dur_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.dur_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_dur_list, cfg.dur_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.dur_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features)
remover.remove_silence(in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_dur_list)
valid_dur_rmse, valid_dur_corr = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
test_dur_rmse, test_dur_corr = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
logger.info('Develop: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(valid_dur_rmse, valid_dur_corr))
logger.info('Test: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(test_dur_rmse, test_dur_corr))
### evaluation: calculate distortion
if cfg.CALMCD and cfg.AcousticModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.in_dimension_dict.has_key('mgc'):
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if cfg.in_dimension_dict.has_key('bap'):
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if cfg.in_dimension_dict.has_key('lf0'):
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_f0_corr, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_f0_corr, test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_f0_corr, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_f0_corr, test_vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_merlin.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
logger.info('Installation information:')
logger.info(' Merlin directory: '+os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)))
logger.info(' PATH:')
env_PATHs = os.getenv('PATH')
if env_PATHs:
env_PATHs = env_PATHs.split(':')
for p in env_PATHs:
if len(p)>0: logger.info(' '+p)
logger.info(' LD_LIBRARY_PATH:')
env_LD_LIBRARY_PATHs = os.getenv('LD_LIBRARY_PATH')
if env_LD_LIBRARY_PATHs:
env_LD_LIBRARY_PATHs = env_LD_LIBRARY_PATHs.split(':')
for p in env_LD_LIBRARY_PATHs:
if len(p)>0: logger.info(' '+p)
logger.info(' Python version: '+sys.version.replace('\n',''))
logger.info(' PYTHONPATH:')
env_PYTHONPATHs = os.getenv('PYTHONPATH')
if env_PYTHONPATHs:
env_PYTHONPATHs = env_PYTHONPATHs.split(':')
for p in env_PYTHONPATHs:
if len(p)>0:
logger.info(' '+p)
logger.info(' Numpy version: '+numpy.version.version)
logger.info(' Theano version: '+theano.version.version)
logger.info(' THEANO_FLAGS: '+os.getenv('THEANO_FLAGS'))
logger.info(' device: '+theano.config.device)
# Check for the presence of git
ret = os.system('git status > /dev/null')
if ret==0:
logger.info(' Git is available in the working directory:')
git_describe = subprocess.Popen(['git', 'describe', '--tags', '--always'], stdout=subprocess.PIPE).communicate()[0][:-1]
logger.info(' Merlin version: '+git_describe)
git_branch = subprocess.Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=subprocess.PIPE).communicate()[0][:-1]
logger.info(' branch: '+git_branch)
git_diff = subprocess.Popen(['git', 'diff', '--name-status'], stdout=subprocess.PIPE).communicate()[0]
git_diff = git_diff.replace('\t',' ').split('\n')
logger.info(' diff to Merlin version:')
for filediff in git_diff:
if len(filediff)>0: logger.info(' '+filediff)
logger.info(' (all diffs logged in '+os.path.basename(cfg.log_file)+'.gitdiff'+')')
os.system('git diff > '+cfg.log_file+'.gitdiff')
logger.info('Execution information:')
logger.info(' HOSTNAME: '+socket.getfqdn())
logger.info(' USER: '+os.getenv('USER'))
logger.info(' PID: '+str(os.getpid()))
PBS_JOBID = os.getenv('PBS_JOBID')
if PBS_JOBID:
logger.info(' PBS_JOBID: '+PBS_JOBID)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = StringIO.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
# if gnp._boardId is not None:
# import gpu_lock
# gpu_lock.free_lock(gnp._boardId)
sys.exit(0)
| ligz07/merlin | src/run_merlin.py | Python | apache-2.0 | 56,913 | [
"NEURON"
] | 170f63e2fe0424862d7578ba005315f5d51c81a11c7cb1ec0129ac688a22d0b0 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Andrew Starr-Bochicchio <a.starr.b@gmail.com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from locale import gettext as _
from gi.repository import Gtk
from typecatcher_lib.helpers import get_media_file
from random import choice
def html_font_view(font=None, text=None):
start_page_icon = get_media_file("typecatcher.svg")
try:
con_icon_name = "nm-no-connection"
con_theme = Gtk.IconTheme.get_default()
con_info = con_theme.lookup_icon(con_icon_name, 64, 0)
con_icon_uri = "file://" + con_info.get_filename()
except AttributeError:
con_icon_uri = get_media_file("no-connection.svg")
try:
installed_icon_name = "gtk-apply"
installed_theme = Gtk.IconTheme.get_default()
installed_info = installed_theme.lookup_icon(installed_icon_name, 64, 0)
installed_icon_uri = "file://" + installed_info.get_filename()
except AttributeError:
installed_icon_uri = get_media_file("installed.svg")
loader = get_media_file("ajax-loader.gif")
text_preview = select_text_preview(text)
html = """
<html>
<head>
<script src="https://ajax.googleapis.com/ajax/libs/webfont/1.6.26/webfont.js"></script>
<style>
body { font-size: 36px; }
#installed { float: right; font-size: 12px; width:50px; text-align:center; display: None; }
textarea { font: inherit; font-size: inherit; border: None; overflow: hidden; outline: none; width: 90%%; height: 100%%; }
#text_preview { display: None; }
#no_connect { text-align: center; display: None; font-size: 18px; }
#start_page { text-align: center; bottom: 0px;}
.wf-loading { height: 100%%; overflow: hidden; background: url(%s) center center no-repeat fixed;}
.wf-loading * { opacity: 0; }
.wf-active body, .wf-inactive body {
-webkit-animation: fade .25s ease;
animation: fade .25s ease;
-webkit-animation-fill-mode: both;
animation-fill-mode: both;
}
@-webkit-keyframes fade {
0%% { display: none; opacity: 0; }
1%% { display: block; }
100%%{ opacity: 1; }
}
@keyframes fade {
0%% { display: none; opacity: 0; }
1%% { display: block; }
100%%{ opacity: 1; }
}
</style>
</head>
<body>
<div id="installed">
<img src="%s" width=64 height=64>
<p>%s</p>
</div>
<div id='no_connect'>
<img src="%s" width=64 height=64 > <h1>%s</h1>
<p>%s</p>
</div>
<div id='text_preview'>
%s
</div>
<div id='start_page'>
<img src="%s" width=128 height=128>
<p>TypeCatcher</p>
</div>
</body>
</html>
""" % (loader, installed_icon_uri, _("Installed"),
con_icon_uri, _("Font not available."),
_("Please check your network connection."), text_preview,
start_page_icon)
return html
def select_text_preview(text):
ipsum = """Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."""
kafka = _("One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections.")
hgg = _("Far out in the uncharted backwaters of the unfashionable end of the Western Spiral arm of the Galaxy lies a small unregarded yellow sun. Orbiting this at a distance of roughly ninety-eight million miles is an utterly insignificant little blue-green planet...")
ggm = _("Many years later, as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.")
ralph = _("I am an invisible man. No, I am not a spook like those who haunted Edgar Allan Poe; nor am I one of your Hollywood-movie ectoplasms. I am a man of substance, of flesh and bone, fiber and liquids — and I might even be said to possess a mind. I am invisible, understand, simply because people refuse to see me.")
jj = _("Stately, plump Buck Mulligan came from the stairhead, bearing a bowl of lather on which a mirror and a razor lay crossed. A yellow dressinggown, ungirdled, was sustained gently behind him on the mild morning air.")
text_pool = [ipsum, kafka, ggm, hgg, ralph, jj]
if text is None or text == "random":
selected_text = choice(text_pool)
return "<p> %s </p>" % selected_text
elif text == "ipsum":
return "<p> %s </p>" % ipsum
elif text == "kafka":
return "<p> %s </p>" % kafka
elif text == "hgg":
return "<p> %s </p>" % hgg
elif text == "ggm":
return "<p> %s </p>" % ggm
elif text == "ralph":
return "<p> %s </p>" % ralph
elif text == "jj":
return "<p> %s </p>" % jj
elif text == "custom":
return "<textarea> %s </textarea>" % (_('Enter text...'))
| andrewsomething/typecatcher | typecatcher/html_preview.py | Python | gpl-3.0 | 5,739 | [
"Galaxy"
] | 664b05a65a0957e5642e7c6499bd535f99edf656e9d6cab6b519d3fb4fc84d2e |
from gpaw.mpi import world
from gpaw.blacs import BlacsGrid, Redistributor
if world.size < 2:
raise ValueError('Runs on two or more processors')
grid = BlacsGrid(world, 2, world.size // 2)
desc = grid.new_descriptor(12, 8, 2, 3)
a = desc.zeros()
a[:] = world.rank
subdesc = grid.new_descriptor(7, 7, 2, 2)
b = subdesc.zeros()
r = Redistributor(grid.comm, desc, subdesc, uplo='G')
ia = 3
ja = 2
ib = 1
jb = 1
M = 4
N = 5
r.redistribute(a, b, M, N, ia, ja, ib, jb)
a0 = desc.collect_on_master(a)
b0 = subdesc.collect_on_master(b)
if world.rank == 0:
print a0
print b0
xa = a0[ia:ia + M, ja:ja + N]
xb = b0[ib:ib + M, jb:jb + N]
assert (xa == xb).all()
| qsnake/gpaw | gpaw/test/parallel/submatrix_redist.py | Python | gpl-3.0 | 688 | [
"GPAW"
] | 7efa3e71529e13845e1c5f4609d5ebb9446e9dd622d80195e5638ed4ea328fd0 |
# Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
import sys
import getopt as opt
from util import *
from math import sqrt, ceil, floor
import os
from gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from options import *
try:
import pylab as pl
except:
print "This script requires the matplotlib python library (Ubuntu/Fedora package name python-matplotlib). Please install it."
sys.exit(1)
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
self.op.set_value('data_path', '/home/komet/software/cuda-convnet/data/cifar-10-py-colmajor')
def get_gpus(self):
self.need_gpu = ( self.op.get_value('show_preds') or
self.op.get_value('write_features') or
self.op.get_value('hist_features') or
self.op.get_value('nn_analysis') or
self.op.get_value('write_mv_result') or
self.op.get_value('write_mv_mc_result')
)
if self.need_gpu:
ConvNet.get_gpus(self)
def init_data_providers(self):
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
ConvNet.init_model_state(self)
if self.op.get_value('show_preds'):
self.sotmax_idx = self.get_layer_idx(self.op.get_value('show_preds'), check_type='softmax')
if self.op.get_value('write_features'):
self.ftr_layer_idx = self.get_layer_idx(self.op.get_value('write_features'))
if self.op.get_value('hist_features'):
self.ftr_layer_idx = self.get_layer_idx(self.op.get_value('hist_features'))
if self.op.get_value('nn_analysis'):
self.ftr_layer_idx = self.get_layer_idx(self.op.get_value('nn_analysis'))
if self.op.get_value('write_mv_result'):
self.sotmax_idx = self.get_layer_idx('logprob', check_type='cost.logreg')
if self.op.get_value('write_mv_mc_result'):
self.sotmax_idx = self.get_layer_idx('logprob', check_type='cost.logreg')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
train_errors = [o[0][self.show_cost][self.cost_idx] for o in self.train_outputs]
test_errors = [o[0][self.show_cost][self.cost_idx] for o in self.test_outputs]
numbatches = len(self.train_batch_range)
test_errors = numpy.row_stack(test_errors)
test_errors = numpy.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.)) # aim for about 20 labels
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) # but round to nearest 10
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title(self.show_cost)
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans):
FILTERS_PER_ROW = 16
MAX_ROWS = 16
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
filter_start = 0 # First filter to show
layer_names = [l['name'] for l in self.layers]
if self.show_filters not in layer_names:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[layer_names.index(self.show_filters)]
filters = layer['weights'][self.input_idx]
if layer['type'] == 'fc' or layer['type'] == 'fcdropo' or layer['type'] == 'fcdropcf': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], layer['filterPixels'][self.input_idx] * channels, num_filters))
filter_start = r.randint(0, layer['modules']-1)*num_filters # pick out some random modules
filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
num_filters *= layer['modules']
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans)
def plot_predictions(self):
data = self.get_next_batch(train=False)[2] # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS
NUM_TOP_CLASSES = min(num_classes, 4) # show this many top labels
label_names = self.test_data_provider.batch_meta['label_names']
if self.only_errors:
preds = n.zeros((data[0].shape[1], num_classes), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, num_classes), dtype=n.single)
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
data[0] = n.require(data[0][:,rand_idx], requirements='C')
data[1] = n.require(data[1][:,rand_idx], requirements='C')
data += [preds]
# Run the model
self.libmodel.startFeatureWriter(data, self.sotmax_idx,1,1)
self.finish_batch()
fig = pl.figure(3)
fig.text(.4, .95, '%s test case predictions' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
err_idx = nr.permutation(n.where(preds.argmax(axis=1) != data[1][0,:])[0])[:NUM_IMGS] # what the net got wrong
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
data[0] = self.test_data_provider.get_plottable_data(data[0])
for r in xrange(NUM_ROWS):
for c in xrange(NUM_COLS):
img_idx = r * NUM_COLS + c
if data[0].shape[0] <= img_idx:
break
pl.subplot(NUM_ROWS*2, NUM_COLS, r * 2 * NUM_COLS + c + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='nearest')
true_label = int(data[1][0,img_idx])
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
pl.subplot(NUM_ROWS*2, NUM_COLS, (r * 2 + 1) * NUM_COLS + c + 1, aspect='equal')
ylocs = n.array(range(NUM_TOP_CLASSES)) + 0.5
height = 0.5
width = max(ylocs)
pl.barh(ylocs, [l[0]*width for l in img_labels], height=height, \
color=['r' if l[1] == label_names[true_label] else 'b' for l in img_labels])
pl.title(label_names[true_label])
pl.yticks(ylocs + height/2, [l[1] for l in img_labels])
pl.xticks([width/2.0, width], ['50%', ''])
pl.ylim(0, ylocs[-1] + height*2)
def do_write_features(self):
if not os.path.exists(self.feature_path):
os.makedirs(self.feature_path)
next_data = self.get_next_batch(train=False)
b1 = next_data[1]
num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
while True:
batch = next_data[1]
data = next_data[2]
ftrs = n.zeros((data[0].shape[1], num_ftrs), dtype=n.single)
self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx,1)
# load the next batch while the current one is computing
next_data = self.get_next_batch(train=False)
self.finish_batch()
path_out = os.path.join(self.feature_path, 'data_batch_%d' % batch)
pickle(path_out, {'data': ftrs, 'labels': data[1]})
print "Wrote feature file %s" % path_out
if next_data[1] == b1:
break
pickle(os.path.join(self.feature_path, 'batches.meta'), {'source_model':self.load_file,
'num_vis':num_ftrs})
def do_hist_features(self):
train_flag = not self.hist_test
next_data = self.get_next_batch(train=train_flag)
b1 = next_data[1]
num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
# allocate sum data
out_data = n.zeros((next_data[2][0].shape[1], num_ftrs), dtype=n.single)
while True:
batch = next_data[1]
data = next_data[2]
ftrs = n.zeros((data[0].shape[1], num_ftrs), dtype=n.single)
self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx,1)
# load the next batch while the current one is computing
next_data = self.get_next_batch(train=train_flag)
self.finish_batch()
out_data += ftrs
if next_data[1] == b1:
break
# plot histogram
#import pdb; pdb.set_trace()
#print out_data
fig = pl.figure()
# min/max/mean subplot
min_value = n.min( out_data, 0 )
max_value = n.max( out_data, 0 )
mean_value = n.mean( out_data, 0 )
x_value = range(num_ftrs)
pl.plot( x_value, min_value,
x_value, max_value,
x_value, mean_value )
pl.xlim( 0, num_ftrs)
ply_max = int(n.max(max_value)*1.1)
ply_min = int(n.min(min_value) )
ply_min = int(ply_min - 0.1*abs(ply_min) ) - 0.5
pl.ylim( ply_min, ply_max )
pl.title( "Layer: " + str( self.hist_features) + " activations" )
def do_nn_analysis(self):
train_flag = not self.hist_test
next_data = self.get_next_batch(train=train_flag)
b1 = next_data[1]
num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
# resize data to only first 128 images
data = next_data[2]
num_data = 64
data[0] = data[0][:,0:num_data]
data[1] = data[1][:,0:num_data]
#print data[0], data[1]
# allocate sum data
out_data = []
try_times = 5000
for i in range(num_data):
out_data.append( n.zeros( (try_times, num_ftrs), dtype=n.single) )
#import pdb; pdb.set_trace()
# fille data
for i in range(try_times):
if i % 100 == 0:
print "\r %d/%d" % (i,try_times),
sys.stdout.flush()
#batch = next_data[1]
#data = next_data[2]
ftrs = n.zeros((data[0].shape[1], num_ftrs), dtype=n.single)
self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx,0)
# load the next batch while the current one is computing
self.finish_batch()
for j in range(num_data):
out_data[j][i,:] = ftrs[j,:]
# analysis out_data code goes here...
pl.figure(1)
# plot activation min/max/mean
pl.subplot(311)
data_idx = 0
min_value = n.min( out_data[data_idx], 0 )
max_value = n.max( out_data[data_idx], 0 )
mean_value = n.mean( out_data[data_idx], 0 )
x_value = range(num_ftrs)
pl.plot( x_value, min_value,
x_value, max_value,
x_value, mean_value )
pl.xlim( 0, num_ftrs)
ply_max = int(n.max(max_value)*1.5)
ply_min = int(n.min(min_value) )
ply_min = int(ply_min - 0.1*abs(ply_min) ) - 0.5
pl.ylim( ply_min, ply_max )
pl.title( "Layer: " + str( self.nn_analysis) + " activations (data: " + str(data_idx) + ")" )
# plot activation histogram
pl.subplot(312)
neuron_idx = 0
#import pdb; pdb.set_trace()
neuron_response = out_data[data_idx][:,neuron_idx]
pl.hist( neuron_response, 50 )
pl.title( "Neuron " + str(neuron_idx) + " activation hist (data: " + str(data_idx) + ")" )
pl.subplot(313)
neuron_idx = 1
#import pdb; pdb.set_trace()
neuron_response = out_data[data_idx][:,neuron_idx]
pl.hist( neuron_response, 50 )
pl.title( "Neuron " + str(neuron_idx) + " activation hist (data: " + str(data_idx) + ")" )
def do_write_mv_result(self, mcInference = False):
num_views = self.test_data_provider.get_num_views()
# enable mcInference if necessary
if( mcInference ):
numSamples = 1000;
self.libmodel.enableMCInference( numSamples );
# make sure it is multi-view provider
#import pdb; pdb.set_trace()
next_data = self.get_next_batch(train=False)
b1 = next_data[1]
#num_ftrs = self.layers[self.sotmax_idx]['outputs']
# cost layer produce no output, thus need check prev-layer
num_ftrs = self.layers[self.sotmax_idx]['inputLayers'][1]['outputs']
# create result
result = {}
result['labels'] = []
result['preds'] = []
# loop over testing batch
while True:
batch = next_data[1]
data = next_data[2]
num_cases = data[0].shape[1]/num_views
ftrs = n.zeros((num_cases, num_ftrs), dtype=n.single)
self.libmodel.startMultiviewFeatureWriter(data + [ftrs], num_views, self.sotmax_idx )
# load the next batch while the current one is computing
next_data = self.get_next_batch(train=False)
self.finish_batch()
# add to result
#import pdb; pdb.set_trace()
result['labels'].append( data[1][0,0:num_cases] )
result['preds'].append( ftrs )
if next_data[1] == b1:
break
# compute/print accuracy
assert( len(result['labels']) == len(result['preds'] ) )
num_batches = len(result['labels'])
num_cases = 0
num_wrong = 0
for ii in range( num_batches ):
act_index = result['labels'][ii]
num_cases_ii = act_index.shape[0]
#import pdb; pdb.set_trace()
assert( num_cases_ii == result['preds'][ii].shape[0] )
num_cases += num_cases_ii
pred_index = n.argmax( result['preds'][ii], 1 )
for jj in range( num_cases_ii ):
if pred_index[jj] != act_index[jj]:
num_wrong += 1
print "Testing Error: %2.4f" % ( 1.0 *num_wrong / num_cases )
# write predicition feature to file: self.write_mv_result
if self.write_mv_result:
print "Write result to: ", self.write_mv_result
pickle( self.write_mv_result, result )
else :
print "Write result to: ", self.write_mv_mc_result
pickle( self.write_mv_mc_result, result )
def start(self):
self.op.print_values()
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if self.write_features:
self.do_write_features()
if self.hist_features:
self.do_hist_features()
if self.nn_analysis:
self.do_nn_analysis()
if self.write_mv_result:
self.do_write_mv_result( )
if self.write_mv_mc_result:
self.do_write_mv_result( True )
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'train_batch_range', 'test_batch_range'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
#----my options----------
op.add_option("hist-features", "hist_features", StringOptionParser,
"plot histogram of feature activation", default="")
op.add_option("hist-test", "hist_test", BooleanOptionParser,
"True: plot hist of test data, False: plot hist of training data", default=True )
op.add_option("nn-analysis", "nn_analysis", StringOptionParser,
"run inference on training mode for many times and analysis output", default="")
#op.add_option("data-provider", "dp_type", StringOptionParser, "Data provider", default="default")
#op.add_option("write-mv-result", "write_mv_result", StringOptionParser, "Write test data multiview features to file", default="", requires=['feature-path'])
op.add_option("write-mv-result", "write_mv_result", StringOptionParser, "Write test data multiview features to file", default="" )
op.add_option("write-mv-mc-result", "write_mv_mc_result", StringOptionParser, "Write test data multiview features on MC inferenceto file", default="" )
op.options['load_file'].default = None
return op
if __name__ == "__main__":
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| zygmuntz/kaggle-cifar | dropconnect/shownet.py | Python | bsd-2-clause | 24,244 | [
"NEURON"
] | e423384050167d55c77de407a5f456875d2d899b4229021cbae6a6f8b44b03f1 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
required: false
active:
version_added: "2.2"
choices: [ "yes", "no" ]
default: "yes"
description:
- Whether the volume is activate and visible to the host.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
pvs:
version_added: "2.2"
description:
- Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
required: false
shrink:
version_added: "2.2"
description:
- shrink if current size is higher than size requested
required: false
default: yes
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol: vg=firefly lv=test size=512
# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
- lvol: vg=firefly lv=test size=512 pvs=/dev/sda,/dev/sdb
# Create cache pool logical volume
- lvol: vg=firefly lv=lvcache size=512m opts='--type cache-pool'
# Create a logical volume of 512g.
- lvol: vg=firefly lv=test size=512g
# Create a logical volume the size of all remaining space in the volume group
- lvol: vg=firefly lv=test size=100%FREE
# Create a logical volume with special options
- lvol: vg=firefly lv=test size=512g opts="-r 16"
# Extend the logical volume to 1024m.
- lvol: vg=firefly lv=test size=1024
# Extend the logical volume to consume all remaining space in the volume group
- lvol: vg=firefly lv=test size=+100%FREE
# Extend the logical volume to take all remaining space of the PVs
- lvol: vg=firefly lv=test size=100%PVS
# Resize the logical volume to % of VG
- lvol: vg-firefly lv=test size=80%VG force=yes
# Reduce the logical volume to 512m
- lvol: vg=firefly lv=test size=512 force=yes
# Set the logical volume to 512m and do not try to shrink if size is lower than current one
- lvol: vg=firefly lv=test size=512 shrink=no
# Remove the logical volume.
- lvol: vg=firefly lv=test state=absent force=yes
# Create a snapshot volume of the test logical volume.
- lvol: vg=firefly lv=test snapshot=snap1 size=100m
# Deactivate a logical volume
- lvol: vg=firefly lv=test active=false
# Create a deactivated logical volume
- lvol: vg=firefly lv=test size=512g active=false
'''
import re
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[','').replace(']',''),
'size': int(decimal_point.match(parts[1]).group(1)),
'active': (parts[2][4] == 'a')
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
shrink=dict(type='bool', default='yes'),
active=dict(type='bool', default='yes'),
snapshot=dict(type='str', default=None),
pvs=dict(type='str')
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found == None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
active = module.boolean(module.params['active'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit(): raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
msg = ''
if this_lv is None:
if state == 'present':
### create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif not size:
pass
elif size_opt == 'l':
### Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
### resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| CenturylinkTechnology/ansible-modules-extras | system/lvol.py | Python | gpl-3.0 | 16,600 | [
"Firefly"
] | 8f9c346c65c3637f94da7107de8a20c567d120b75606546db05ca30a6242e94b |
from ..helpers import UniqueCourseTest
from ...fixtures.course import CourseFixture
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.create_mode import ModeCreationPage
from lms.envs.bok_choy import EMAIL_FILE_PATH
from ..ga_helpers import GaccoTestMixin
from ...pages.lms.ga_pay_and_verify import CourseAboutPage
class PaidCourseTest(UniqueCourseTest, GaccoTestMixin):
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PaidCourseTest, self).setUp()
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add a no-id-professional mode to the course
ModeCreationPage(
self.browser,
self.course_id,
mode_slug=u'no-id-professional',
mode_display_name=u'Paid Course',
min_price=100
).visit()
# Set up email client
self.setup_email_client(EMAIL_FILE_PATH)
# Set window size
self.setup_window_size_for_pc()
def _auto_auth(self, course_id=None):
return AutoAuthPage(self.browser, course_id=course_id).visit().user_info
def _assert_receipt_content(self, receipt_content, display_name, payment_method, price):
self.assertEqual(self.course_info['display_name'], receipt_content['description'])
self.assertEqual(payment_method, receipt_content['payment_method'])
self.assertEqual(price, receipt_content['price'])
def _assert_purchase_email(self, display_name, price, payment_method):
email_message = self.email_client.get_latest_message()
self.assertEqual(email_message['subject'], 'Your course has been completed from edX')
self.assertIn(display_name, email_message['body'])
self.assertIn('{:,d}'.format(price), email_message['body'])
self.assertIn(payment_method, email_message['body'])
def test_enroll_paid_course_flow(self):
self._auto_auth()
about_page = CourseAboutPage(self.browser, self.course_id).visit()
payment_flow = about_page.register()
payment_data = payment_flow.get_payment_content()
self.assertEqual(self.course_info['display_name'], payment_data['description'])
# price 100 and tax 8
self.assertEqual('108', payment_data['price'])
fake_payment_page = payment_flow.proceed_to_payment()
fake_payment_page.submit_payment()
self._assert_receipt_content(
payment_flow.get_receipt_content(),
self.course_info['display_name'], 'Credit Card', '108',
)
self._assert_purchase_email(self.course_info['display_name'], 108, 'Credit Card')
dashboard_page = payment_flow.go_to_dashboard()
self.assertTrue(dashboard_page.has_paid_course_purchased_message(self.course_info['display_name']))
dashboard_page.show_receipt_page(self.course_id, self.course_info['display_name'])
self._assert_receipt_content(
payment_flow.get_receipt_content(),
self.course_info['display_name'], 'Credit Card', '108',
)
| nttks/edx-platform | common/test/acceptance/tests/lms/test_ga_paid_course.py | Python | agpl-3.0 | 3,226 | [
"VisIt"
] | 910ccdd4b0578f2232ae914339321c354ba3967d9c2415067d573944cfb073fc |
#!/usr/bin/env python
# This example demonstrates the use of vtkAssembly. In an assembly,
# the motion of one actor affects the position of other actors.
import vtk
# Create four parts: a top level assembly (in this case, a
# vtkCylinder) and three primitives (using vtkSphereSource,
# vtkCubeSource, and vtkConeSource). Set up mappers and actors for
# each part of the assembly to carry information about material
# properties and associated geometry.
sphere = vtk.vtkSphereSource()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.SetOrigin(2, 1, 3)
sphereActor.RotateY(6)
sphereActor.SetPosition(2.25, 0, 0)
sphereActor.GetProperty().SetColor(1, 0, 1)
cube = vtk.vtkCubeSource()
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
cubeActor.SetPosition(0.0, .25, 0)
cubeActor.GetProperty().SetColor(0, 0, 1)
cone = vtk.vtkConeSource()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.SetPosition(0, 0, .25)
coneActor.GetProperty().SetColor(0, 1, 0)
# top part of the assembly
cylinder = vtk.vtkCylinderSource()
cylinderMapper = vtk.vtkPolyDataMapper()
cylinderMapper.SetInputConnection(cylinder.GetOutputPort())
cylinderMapper.SetResolveCoincidentTopologyToPolygonOffset()
cylinderActor = vtk.vtkActor()
cylinderActor.SetMapper(cylinderMapper)
cylinderActor.GetProperty().SetColor(1, 0, 0)
# Create the assembly and add the 4 parts to it. Also set the origin,
# position and orientation in space.
assembly = vtk.vtkAssembly()
assembly.AddPart(cylinderActor)
assembly.AddPart(sphereActor)
assembly.AddPart(cubeActor)
assembly.AddPart(coneActor)
assembly.SetOrigin(5, 10, 15)
assembly.AddPosition(5, 0, 0)
assembly.RotateX(15)
# Create the Renderer, RenderWindow, and RenderWindowInteractor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(assembly)
ren.AddActor(coneActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(200, 200)
# Set up the camera to get a particular view of the scene
camera = vtk.vtkCamera()
camera.SetClippingRange(21.9464, 30.0179)
camera.SetFocalPoint(3.49221, 2.28844, -0.970866)
camera.SetPosition(3.49221, 2.28844, 24.5216)
camera.SetViewAngle(30)
camera.SetViewUp(0, 1, 0)
ren.SetActiveCamera(camera)
iren.Initialize()
renWin.Render()
iren.Start()
| hlzz/dotfiles | graphics/VTK-7.0.0/Examples/Rendering/Python/assembly.py | Python | bsd-3-clause | 2,777 | [
"VTK"
] | cf34aeee57fa61f5a63c0ae34030bc7fe6a92051b0fe1cad69467207691e5075 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
if "bpy" in locals():
import importlib
importlib.reload(mb_utils)
else:
from molblend import mb_utils
import os
import string
import random
import logging
import numpy as np
import bpy
from bpy.types import (PropertyGroup,
UIList)
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
IntVectorProperty,
FloatProperty,
FloatVectorProperty,
BoolVectorProperty,
PointerProperty,
CollectionProperty,
EnumProperty)
logger = logging.getLogger(__name__)
def get_object_list(colprop):
all_obs = []
delete = []
for i, item in enumerate(colprop):
if item.object and item.object.name in bpy.context.scene.objects:
all_obs.append(item.object)
else:
delete.append(i)
for d in delete[::-1]:
try:
colprop.remove(d)
except AttributeError:
pass
return all_obs
class mb_object_pointer(PropertyGroup):
#name = StringProperty(name="Object name")
object = PointerProperty(name="Object", type=bpy.types.Object)
@property
def name(self):
return self.object.name
class mb_element_mesh(PropertyGroup):
name = StringProperty(name="Element")
data = PointerProperty(name="Mesh", type=bpy.types.Mesh)
#def get_data(self):
#return bpy.data.meshes.get(self.name)
class atom_scale(PropertyGroup):
name = StringProperty()
val = FloatProperty(name="Atom scale", default=0.4, min=0.0, max=5.0,
precision=2, update=mb_utils.update_all_meshes)
class mb_mesh(PropertyGroup):
type = EnumProperty(
name="type", description="Identifies the mesh type",
items=mb_utils.enums.mesh_types, default='NONE')
class mb_atom_mode(PropertyGroup):
name = IntProperty(name="index")
index = IntProperty(name="index")
freq = FloatProperty(name="frequency")
vec = FloatVectorProperty(name="vector", subtype="XYZ")
class mb_dipole(PropertyGroup):
origin = PointerProperty(name="origin", type=bpy.types.Object)
target = PointerProperty(name="Dipole", type=bpy.types.Object)
pvt_objects = CollectionProperty(name="Dipole arrow objects",
type=mb_object_pointer)
@property
def objects(self):
return get_object_list(self.pvt_objects)
class mb_unit_cell(PropertyGroup):
origin = PointerProperty(name="origin", type=bpy.types.Object)
a = PointerProperty(name="a", type=bpy.types.Object)
b = PointerProperty(name="b", type=bpy.types.Object)
c = PointerProperty(name="c", type=bpy.types.Object)
pvt_objects = CollectionProperty(name="Unit cell objects",
type=mb_object_pointer)
@property
def objects(self):
return get_object_list(self.pvt_objects)
class mb_mode_arrows(PropertyGroup):
pvt_objects = CollectionProperty(name="Mode arrows",
type=mb_object_pointer)
@property
def objects(self):
return get_object_list(self.pvt_objects)
class mb_molecule_objects(PropertyGroup):
pvt_atoms = CollectionProperty(name="Atoms", type=mb_object_pointer)
pvt_bonds = CollectionProperty(name="Bonds", type=mb_object_pointer)
parent = PointerProperty(name="Parent", type=bpy.types.Object)
dipole = PointerProperty(name="Dipole", type=mb_dipole)
unit_cell = PointerProperty(name="Unit cell objects", type=mb_unit_cell)
pvt_other = CollectionProperty(name="Bonds", type=mb_object_pointer)
mode_arrows = PointerProperty(name="Mode arrows", type=mb_mode_arrows)
@property
def atoms(self):
return get_object_list(self.pvt_atoms)
@property
def bonds(self):
return get_object_list(self.pvt_bonds)
@property
def other(self):
return get_object_list(self.pvt_other)
def get_all_objects(self, with_parent=True):
all_obs = []
if self.parent and with_parent:
all_obs.append(self.parent)
all_obs.extend(self.atoms)
all_obs.extend(self.bonds)
all_obs.extend(self.other)
for ob in (self.dipole.origin,
self.dipole.target,
self.dipole.objects,
self.unit_cell.origin,
self.unit_cell.a,
self.unit_cell.b,
self.unit_cell.c):
if ob:
all_obs.append(ob)
all_obs.extend(self.unit_cell.objects)
return all_obs
class mb_qvec(PropertyGroup):
iqpt = IntProperty(name="nqpt", default=1)
qvec = FloatVectorProperty(name="q vector", default=(0,0,0))
mode_txt = PointerProperty(type=bpy.types.Text)
class MB_UL_modes(UIList):
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
split = layout.split(0.18)
col = split.column()
col.label(str(item.iqpt))
col = split.column()
col.label("q=({:5.3f}, {:5.3f}, {:5.3f})".format(*item.qvec))
def get_material_props(ob=None, node_name="mb.principled", material=None):
try:
mat = material or ob.material_slots[0].material
if bpy.context.scene.render.engine == 'CYCLES':
data = mat.node_tree.nodes[node_name].inputs[0]
prop = "default_value"
else:
data = mat
prop = "diffuse_color"
return data, prop
except IndexError:
return "{} has no material".format(ob.name), None
except AttributeError as e:
if "no attribute 'node_tree'" in e.args[0]:
msg = "{} has no material".format(ob.name)
else:
msg = "from get_material_props: {}".format(e)
return msg, None
except KeyError as e:
msg = "Material {} has no node with name {}".format(mat.name, node_name)
return msg, None
class mb_molecule(PropertyGroup):
index = IntProperty(name="Molecule index")
name = StringProperty(name="Molecule identifier")
# index that increases with each added atom in the molecule,
# or is set to max index of imported file. It doesn't
# decrease when atom is deleted. => Not an indicator of size of molecule!
# Only guarantees uniqueness for atom names
atom_index = IntProperty(name="Atom counter")
objects = PointerProperty(name="Molecule objects",
type=mb_molecule_objects)
meshes = CollectionProperty(name="Meshes", type=mb_element_mesh)
# display properties
bond_material = EnumProperty(
name="Bond material", description="Choose bond material",
items=mb_utils.enums.bond_material, default='ATOMS',
update=mb_utils.update_bond_material
)
bond_generic_material = PointerProperty(type=bpy.types.Material)
draw_style = EnumProperty(
name="Display style", description="Style to draw atoms and bonds",
items=mb_utils.enums.molecule_styles, default='BAS',
update=mb_utils.update_draw_style
)
radius_type = EnumProperty(
name="Radius type", description="Type of radius to use as reference",
items=mb_utils.enums.radius_types, default='covalent',
update=mb_utils.update_radius_type
)
bond_radius = FloatProperty(
name="Bond radius", description="Radius for bond objects",
default=0.1, min=0.0, max=3.0,
update=mb_utils.update_all_meshes
)
atom_scales = CollectionProperty(type=atom_scale)
refine_atoms = IntProperty(
name="Refine atoms", description="Refine value for atom meshes",
default=8, min=2, max=64,
update=mb_utils.update_refine_atoms
)
refine_bonds = IntProperty(
name="Refine bonds", description="Refine value for atom meshes",
default=8, min=2, max=64,
update=mb_utils.update_refine_bonds
)
# vibrational modes
qvecs = CollectionProperty(name="Modes", type=mb_qvec)
active_nqpt = IntProperty(
name="Active q-point",
description="Active q-point",
default=0, min=0,
update=mb_utils.update_active_mode
)
max_mode = IntProperty(
name="Number of modes",
description="Number of vibrational modes of molecule",
default=0, min=0,
)
active_mode = IntProperty(
name="Active Mode",
description="Active Mode to display. 0 = equilibrium position",
default=0, min=0,
update=mb_utils.update_active_mode
)
mode_scale = FloatProperty(
name="Mode Scale", description="Scale of normal mode displacement",
default=1.0,
update=mb_utils.update_active_mode
)
mode_arrows_scale = FloatProperty(
name="Arrow Scale", description="Scale of mode arrows",
default=25.0, min=-1000.0, max=1000.0,
#update=mb_utils.update_active_mode
)
show_mode_arrows = BoolProperty(
name="Show arrows", default=False,
description="Show arrows for mode eigenvectors",
update=mb_utils.update_show_mode_arrows
)
mode_arrows_phase = FloatProperty(
name="Arrow phase", default=0.,
description="Mode phase in units of pi",
update=mb_utils.update_active_mode
)
autoplay_mode_animation= BoolProperty(
name="Autoplay", default=True,
description="Automatically start animation on mode change",
update=mb_utils.update_active_mode
)
show_unit_cell_frame = BoolProperty(
name="Show frame", default=True,
description="Show unit cell frame",
update=mb_utils.update_show_unit_cell_frame
)
show_unit_cell_arrows = BoolProperty(
name="Show arrows", default=True,
description="Show unit cell arrows",
update=mb_utils.update_show_unit_cell_arrows
)
def draw_vibrations(self, layout):
layout.operator("mb.import_modes")
if self.qvecs:
layout.template_list("MB_UL_modes", "", self, "qvecs", self,
"active_nqpt", rows=1)
layout.prop(self, "active_mode")
if not 'mode' in self:
layout.label("mode wasn't loaded correctly.")
return None
layout.label("Frequency: {}".format(self['mode']["freq"]))
#layout.prop(self['qpts'][self.active_nqpt]['modes'][self.active_mode],
#"symmetry", text="Symmetry")
layout.prop(self, "mode_scale", slider=False)
# The play/pause etc buttons are copy/pasted from space_time.py
row = layout.row(align=True)
row.operator("screen.frame_jump", text="", icon='REW').end = False
row.operator("mb.frame_skip", text="", icon='PREV_KEYFRAME').next = False
screen = bpy.context.screen
scene = bpy.context.scene
if not screen.is_animation_playing:
# if using JACK and A/V sync:
# hide the play-reversed button
# since JACK transport doesn't support reversed playback
if scene.sync_mode == 'AUDIO_SYNC' and context.user_preferences.system.audio_device == 'JACK':
sub = row.row(align=True)
sub.scale_x = 2.0
sub.operator("screen.animation_play", text="", icon='PLAY')
else:
row.operator("screen.animation_play", text="", icon='PLAY_REVERSE').reverse = True
row.operator("screen.animation_play", text="", icon='PLAY')
else:
sub = row.row(align=True)
sub.scale_x = 2.0
sub.operator("screen.animation_play", text="", icon='PAUSE')
row.operator("mb.frame_skip", text="", icon='NEXT_KEYFRAME').next = True
row.operator("screen.frame_jump", text="", icon='FF').end = True
row.prop(self, "autoplay_mode_animation", text="Autoplay")
layout.prop(self, "show_mode_arrows", text="Arrows")
row = layout.row()
row.active = self.show_mode_arrows
row.prop(self, 'mode_arrows_scale')
row = layout.row()
row.active = self.show_mode_arrows
row.prop(self, 'mode_arrows_phase')
def draw_properties(self, layout):
layout.prop(self.objects.parent, "name")
layout.label("(id: '{}')".format(self.name))
if 'unit_cells' in self and len(self['unit_cells']) > 1:
layout.label("Number of frames: {}".format(len(self['unit_cells'])))
if self.objects.parent:
col = layout.column()
col.prop(self.objects.parent, "location",
text="Parent location")
layout.operator("mb.center_mol_parent")
def draw_dipole_props(self, layout):
if (self.objects.dipole.target and
self.objects.dipole.target.name in bpy.context.scene.objects):
col = layout.column()
col.prop(self.objects.dipole.target, "location",
text="")
for ob in self.objects.dipole.objects:
if ob.material_slots:
mat = ob.material_slots[0].material
break
data, prop = get_material_props(ob)
if data:
layout.prop(data, prop, text="Color")
layout.operator("mb.remove_dipole")
else:
layout.operator("mb.draw_dipole")
def draw_unit_cell_props(self, layout):
if (self.objects.unit_cell.a
and self.objects.unit_cell.b
and self.objects.unit_cell.c):
row = layout.row()
col = row.column()
avec = self.objects.unit_cell.a
bvec = self.objects.unit_cell.b
cvec = self.objects.unit_cell.c
col.prop(avec, "location", text="a")
col = row.column()
col.prop(bvec, "location", text="b")
col = row.column()
col.prop(cvec, "location", text="c")
# unit cell volume
vol = np.absolute(np.dot(avec.location,
np.cross(bvec.location,
cvec.location)))
layout.label("Volume: {:5.3f} {}".format(vol, u"\u212B\u00B3"))
ob_frame = None
ob_not_frame = None
for ob in self.objects.unit_cell.objects:
if 'frame' in ob.name:
ob_frame = ob
elif 'arrowhead' in ob.name:
ob_arrow = ob
if ob_arrow and ob_arrow.material_slots:
data, prop = get_material_props(ob_arrow)
if data:
layout.prop(data, prop, text="Color")
layout.prop(ob_frame.modifiers['mb.wireframe'], 'thickness',
text="Frame thickness")
layout.prop(self, "show_unit_cell_arrows")
layout.prop(self, "show_unit_cell_frame")
layout.operator("mb.remove_unit_cell")
else:
layout.operator("mb.draw_unit_cell")
def draw_styles(self, layout):
props = {
"Atom scale": [self.atom_scales[self.draw_style], "val", 10],
"Bond radius": [self, "bond_radius", 20],
"Radius type": [self, "radius_type", 30],
"Display style": [self, "draw_style", 40],
"Bond material": [self, "bond_material", 50],
"Refine atoms": [self, "refine_atoms", 70],
"Refine bonds": [self, "refine_bonds", 80],
}
data, prop = get_material_props(material=self.bond_generic_material)
props.update({'Bond color': [data, prop, 60]})
for label, (data, prop, i) in sorted(props.items(),
key=lambda t: t[-1][-1]):
try:
layout.prop(data, prop, text=label)
except TypeError:
#get_material_props returns a string if no material was found
layout.label("{}: {}".format(label, data))
def add_object(self, ob, parent_to_mol=True, type=None):
'''
Add an object to the molecule's atoms collection and return the
collection item. If object is already in collection, just return the
collection item.
'''
if type != None:
ob.mb.type = type
ob.mb.parent = self.objects.parent
if parent_to_mol and not ob.parent == self.id_data:
ob.parent = self.id_data
ob.matrix_parent_inverse = self.id_data.matrix_world.inverted()
collection = {
'ATOM': self.objects.pvt_atoms,
'BOND': self.objects.pvt_bonds,
'UC': self.objects.unit_cell.pvt_objects,
'DIPOLE': self.objects.dipole.pvt_objects,
'MODE_ARROW': self.objects.mode_arrows.pvt_objects,
'NONE': self.objects.pvt_other
}
objects = collection[ob.mb.type]
for existing in objects:
if existing.object == ob:
item = existing
break
else:
item = objects.add()
item.object = ob
if ob.type == 'ATOM':
self.atom_index = max(self.atom_index, ob.mb.index)
return item
def remove_object(self, ob):
if ob.parent == self.objects.parent:
mat = ob.matrix_world.copy()
ob.parent = None
ob.matrix_world = mat
collection = {
'ATOM': self.objects.pvt_atoms,
'BOND': self.objects.pvt_bonds,
'UC': self.objects.unit_cell.pvt_objects,
'DIPOLE': self.objects.dipole.pvt_objects,
'MODE_ARROW': self.objects.mode_arrows.pvt_objects,
'NONE': self.objects.pvt_other
}
objects = collection[ob.mb.type]
for i, item in enumerate(objects):
if item.object == ob:
objects.remove(i)
return
def get_mode(self, n_mode=None, n_qpt=None):
if n_mode is None:
n_mode = self.active_mode
if n_qpt is None:
n_qpt = self.active_nqpt
try:
qpt = json.loads(self.qvecs[n_qpt].mode_txt.as_string())
except:
logger.error("Problem loading mode from text object. Check console")
logger.exception("")
return None
return qpt['modes'][n_mode-1]
class mb_object(PropertyGroup):
index = IntProperty(name="Index")
name = StringProperty(name="Object name")
type = EnumProperty(
name="type", description="Select the object type",
items=mb_utils.enums.object_types, default='NONE')
parent = PointerProperty(name="Molecule parent",
type=bpy.types.Object)
# used by type == 'ATOM'
pvt_bonds = CollectionProperty(type=mb_object_pointer)
@property
def bonds(self):
return get_object_list(self.pvt_bonds)
atom_name = StringProperty(name="Atom name")
element = StringProperty(
name="Element", description="Element Symbol",
update=mb_utils.update_atom_element)
element_long = StringProperty(
name="Element name", description="Full element name")
mode_arrow = PointerProperty(type=bpy.types.Object)
# used by type == 'BOND'
pvt_bonded_atoms = CollectionProperty(type=mb_object_pointer)
@property
def bonded_atoms(self):
return get_object_list(self.pvt_bonded_atoms)
supercell = IntVectorProperty(name="supercell", default=(0,0,0),
size=3)
# used by type == 'PARENT'
molecule = PointerProperty(type=mb_molecule)
@property
def object(self):
return self.id_data
@property
def world_location(self):
return self.id_data.matrix_world.to_translation()
def get_molecule(self):
try:
return self.parent.mb.molecule
except AttributeError:
return None
def add_bond(self, ob):
"""Add object to bond collection and return new collection item."""
if not self.type == 'ATOM':
logger.warning("Something is trying to add bond to "
"non-ATOM type object")
return None
bond = None
for existing in self.bonds:
if existing == ob:
bond = existing
break
else:
bond = self.pvt_bonds.add()
bond.object = ob
return bond
def remove_bond(self, ob):
if not self.type == 'ATOM':
logger.warning("Something is trying to remove bond from "
"non-ATOM type object")
return None
for i, b in enumerate(self.bonds):
if b == ob:
self.pvt_bonds.remove(i)
return
def add_bonded_atom(self, ob):
if not self.type == 'BOND':
logger.warning("Something is trying to add bonded_atom to "
"non-BOND type object")
return
atom = None
for existing in self.bonded_atoms:
if existing == ob:
atom = existing
break
else:
atom = self.pvt_bonded_atoms.add()
atom.object = ob
return atom
def remove_bonded_atom(self, ob):
if not self.type == 'BOND':
logger.warning("Something is trying to remove bonded_atom "
"{} ({}) from non-BOND type object {} ({})".format(
ob.name, ob.mb.type, self.name, self.type))
return
for i, a in enumerate(self.bonded_atoms):
if a == ob:
self.pvt_bonded_atoms.remove(i)
return
def draw_properties(self, context, layout, ob):
element = context.scene.mb.elements[self.element]
props = {
"Element": [self, "element", 10],
"Name": [self, "atom_name", 20],
"Covalent radius": [element, "covalent", 30],
"vdW radius": [element, "vdw", 40],
"constant radius": [element, "constant", 50],
#"Atom color": atom_color,
}
data, prop = get_material_props(ob)
props.update({'Atom color': [data, prop, 60]})
for label, (data, prop, i) in sorted(props.items(),
key=lambda t: t[-1][-1]):
try:
layout.prop(data, prop, text=label)
except TypeError:
layout.label("{}: {}".format(label, data))
class mb_text(PropertyGroup):
type = EnumProperty(
name="type", description="Select the text type",
items=mb_utils.enums.text_types, default='NONE')
parent = PointerProperty(type=bpy.types.Object)
class mb_element(PropertyGroup):
name = StringProperty(name="Element")
element = StringProperty(name="Element")
element_name = StringProperty(name="Element name")
atomic_number = IntProperty(name="Atomic number")
color = FloatVectorProperty(name="Color", subtype='COLOR', size=3)
covalent = FloatProperty(
name="Covalent radius",
description="Scene wide covalent radius",
min=0.0, max=5.0,
update=mb_utils.update_all_meshes)
vdw = FloatProperty(
name="vdW radius",
description="Scene wide van der Waals radius",
min=0.0, max=5.0,
update=mb_utils.update_all_meshes)
constant = FloatProperty(
name="Constant radius",
min=0.0, max=5.0,
update=mb_utils.update_all_meshes)
class mb_scn_import(PropertyGroup):
filepath = StringProperty(
name="Import file",
description="Filepath to molecule file to import (.xyz, .pdb)",
default=os.getcwd() + "/", subtype="FILE_PATH")
modes = BoolProperty(
name="Modes",
description="Import normal modes of molecule as keyframes.",
default=False)
n_q = IntProperty(name="q point", default=1,
min=1, description="Import modes of 'n_q'th q point in file")
modes_path = StringProperty(
name="Modes file",
description="Filepath to modes file to import "
"(In Quantum Espresso: dynmat.out)",
default="", subtype="FILE_PATH")
class mb_scn_export(PropertyGroup):
filepath = StringProperty(name="Export file", default="",
subtype="FILE_PATH",
description="Filepath to exported molecule file (.xyz, .pdb)")
selection_only = BoolProperty(name="Selected Objects", default=True,
description="Only export selected objects")
file_type = EnumProperty(
name="File type", default="XYZ", items=mb_utils.enums.file_types,
description="File format to export to",
update=mb_utils.update_export_file_type)
length_unit = EnumProperty(
name="Unit", default='1.0', items=mb_utils.enums.angstrom_per_unit,
description="Unit in output file (to convert to from Angstrom)")
length_unit_other = FloatProperty(
name="Custom Unit", default=1.0, min=0.000001,
description="Enter unit of export file as Angstrom/unit")
class mb_scn_globals(PropertyGroup):
draw_style = EnumProperty(
name="Draw style", description="Style to draw atoms and bonds",
items=mb_utils.enums.molecule_styles, default='BAS',
)
radius_type = EnumProperty(
name="Radius type",
items=mb_utils.enums.radius_types, default='covalent')
bond_radius = FloatProperty(
name="Bond radius",
description="Radius of bonds for Sticks, and Ball and Sticks",
default=0.1, min=0.0, max=3.0)
atom_scales = CollectionProperty(type=atom_scale)
import_props = PointerProperty(type=mb_scn_import)
export_props = PointerProperty(type=mb_scn_export)
show_bond_lengths = BoolProperty(
name="Show bond lengths", default=False,
description="Display bond length of selected bonds",
update=mb_utils.update_show_bond_lengths
)
bond_length_font_size = IntProperty(
name="Bond length font size", default=12,
description="Font size of bond lengths")
#show_bond_angles = BoolProperty(
#name="Show bond angles", default=False,
#description="Display bond angle of selected bonds",
#update=mb_utils.update_show_bond_angles)
element_to_add = StringProperty(
name="Element", description="Element to add to scene",
default="C")
geometry_to_add = EnumProperty(
name="Geometry",
description="Geometry the new bond should be in relative to "
"existing bonds.",
items=mb_utils.enums.geometries, default='NONE')
class mb_git_commit(PropertyGroup):
commit_id = StringProperty(name="git commit id")
date = StringProperty(name="git commit date")
time_stamp = StringProperty(name="git commit time stamp")
class mb_scn_info(PropertyGroup):
git_commits = CollectionProperty(type=mb_git_commit)
class mb_scene(PropertyGroup):
is_initialized = BoolProperty(default=False)
elements = CollectionProperty(type=mb_element)
# index that increases with each added molecule, but doesn't decrease when
# molecule is deleted.
molecule_count = IntProperty(name="Molecule counter")
globals = PointerProperty(type=mb_scn_globals)
info = PointerProperty(type=mb_scn_info)
# store last active object for modal operator
modal_last_active = PointerProperty(name="Last active",
type=bpy.types.Object)
#@class
def id_generator(self, size=6,
chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def new_molecule(self,
name_mol="Molecule",
draw_style=None,
radius_type=None,
bond_radius=None,
refine_atoms=None,
refine_bonds=None,
atom_scales=None):
# create new empty that will be the parent for the molecule
parent_ob = bpy.data.objects.new(name_mol, None)
parent_ob.empty_draw_type = 'SPHERE'
parent_ob.empty_draw_size = 0.3
parent_ob.mb.type = 'PARENT'
parent_ob.mb.parent = parent_ob
self.id_data.objects.link(parent_ob)
# now populate the molecule data
mol = parent_ob.mb.molecule
new_id = self.id_generator()
mol.name = new_id
mol.index = self.molecule_count
self.molecule_count += 1
mol.draw_style = draw_style or self.globals.draw_style
mol.radius_type = radius_type or self.globals.radius_type
mol.bond_radius = bond_radius or self.globals.bond_radius
if refine_atoms:
mol.refine_atoms = refine_atoms
if refine_bonds:
mol.refine_bonds = refine_bonds
for scale in (atom_scales or self.globals.atom_scales):
new_scale = mol.atom_scales.add()
new_scale.name = scale.name
new_scale.val = scale.val
mol.objects.parent = parent_ob
return mol
def remove_molecule(self, mol, only_if_empty=False):
"""only_if_empty: only remove if it has no atoms or bonds
"""
if ((not mol.objects.atoms and not mol.objects.bonds)
or not only_if_empty):
parent = mol.objects.parent
for ob in mol.objects.get_all_objects():
if ob != parent:
self.id_data.objects.unlink(ob)
bpy.data.objects.remove(ob)
if parent:
if parent.name in self.id_data.objects:
self.id_data.objects.unlink(parent)
bpy.data.objects.remove(parent)
def register():
bpy.types.Object.mb = PointerProperty(type=mb_object)
bpy.types.Mesh.mb = PointerProperty(type=mb_mesh)
bpy.types.Scene.mb = PointerProperty(type=mb_scene)
bpy.types.Text.mb = PointerProperty(type=mb_text)
def unregister():
del bpy.types.Object.mb
del bpy.types.Mesh.mb
del bpy.types.Scene.mb
| floaltvater/molblend | mb_datastructure.py | Python | gpl-2.0 | 31,708 | [
"Quantum ESPRESSO"
] | bd950f8534b8572758751e6af0f28c04111dea35febc95ef8b640ee9f7bc97bf |
#!/usr/bin/env python
# encoding: utf-8
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from pyshorteners import Shortener, Shorteners
from pyshorteners.exceptions import ShorteningErrorException
import responses
import pytest
s = Shortener(Shorteners.ADFLY, uid='TEST', key='TEST_KEY')
shorten = 'http://ad.fly/test'
expanded = 'http://www.test.com'
@responses.activate
def test_adfly_short_method():
# mock responses
params = urlencode({
'domain': 'adf.ly',
'advert_type': 'int', # int or banner
'key': s.key,
'uid': s.uid,
'url': expanded,
})
mock_url = '{}?{}'.format(s.api_url, params)
responses.add(responses.GET, mock_url, body=shorten,
match_querystring=True)
shorten_result = s.short(expanded)
assert shorten_result == shorten
assert s.shorten == shorten_result
assert s.expanded == expanded
@responses.activate
def test_adfly_short_method_bad_response():
# mock responses
params = urlencode({
'domain': 'adf.ly',
'advert_type': 'int', # int or banner
'key': s.key,
'uid': s.uid,
'url': expanded,
})
mock_url = '{}?{}'.format(s.api_url, params)
responses.add(responses.GET, mock_url, body=shorten, status=400,
match_querystring=True)
with pytest.raises(ShorteningErrorException):
s.short(expanded)
def test_adfly_bad_params():
s = Shortener(Shorteners.ADFLY)
with pytest.raises(TypeError):
s.short(expanded)
| Oire/pyshorteners | tests/test_adfly.py | Python | gpl-3.0 | 1,581 | [
"ADF"
] | 69423a1e707fd026ecee94a6ce3d5a2cefcbf14e99d715e22b4a0dd9fce25d87 |
#!/usr/bin/env python
import os
import sys
import numpy as np
import galsim
from .psfmaker import PSFMaker
from ..observation import Observation
class DESPSFMaker(PSFMaker):
"""
make a DES-like PSF
from esheldon's fork of the great3 code
blessed by Mike and Aaron to roughly match DES
M. R. Becker 2015
E. Sheldon 2014
"""
def __init__(self,seed=None,**kw):
assert seed is not None,"Random seed must be given in DES psf maker!"
self.rng = np.random.RandomState(seed)
self.conf = {}
self.conf.update(kw)
def make_opt_psf(self,image_size=None,pixel_scale=None):
"""make DES optics PSF"""
assert image_size is not None,"You must specify an image size!"
assert pixel_scale is not None,"You must specify a pixel scale!"
lam_over_diam = 0.036
obscuration = 0.2
rms_aberration = 0.26
use_aber = ["defocus", "astig1", "astig2", "coma1", "coma2", "trefoil1", "trefoil2", "spher"]
n_aber = len(use_aber)
aber_weights = np.array((0.13, 0.13, 0.14, 0.06, 0.06, 0.05, 0.06, 0.03))
aber_dict = {}
tmp_vec = np.zeros(n_aber)
for ind_ab in range(n_aber):
tmp_vec[ind_ab] = rms_aberration * aber_weights[ind_ab] *\
self.rng.normal() / np.sqrt(np.sum(aber_weights**2))
aber_dict[use_aber[ind_ab]] = tmp_vec[ind_ab]
pad_factor = 1.5
twoR = 2. * lam_over_diam / (
0.005 * 0.5 * np.pi * np.pi * (1.-obscuration) )
image_size_arcsec = image_size * pixel_scale
if image_size_arcsec < twoR * pad_factor:
pad_factor = image_size_arcsec / twoR
psf = galsim.OpticalPSF(lam_over_diam,
obscuration=obscuration,
pad_factor=pad_factor*5.0,
suppress_warning=False,
max_size=image_size_arcsec,
**aber_dict)
return psf
def make_atmos_psf(self,atmos_psf_fwhm=None):
"""make DES atmos PSF"""
assert atmos_psf_fwhm is not None,"You must specify atmos_psf_fwhm!"
min_atmos_psf_e = np.sqrt(1.e-4)
max_atmos_psf_e = np.sqrt(9.e-4)
atmos_psf_e = self.rng.uniform()*(max_atmos_psf_e - min_atmos_psf_e) \
+ min_atmos_psf_e
atmos_psf_beta = self.rng.uniform()*180.0
atmos_psf = galsim.Gaussian(sigma=atmos_psf_fwhm)
atmos_psf = atmos_psf.shear(e=atmos_psf_e,beta=atmos_psf_beta*galsim.degrees)
return atmos_psf
def make_psf(self,image_size=None,pixel_scale=None,atmos_psf_fwhm=None,full_output=False):
"""
make DES PSF
full_output: output all psf,opt_psf,atmos_psf
"""
assert atmos_psf_fwhm is not None,"You must specify atmos_psf_fwhm!"
assert image_size is not None,"You must specify an image size!"
assert pixel_scale is not None,"You must specify a pixel scale!"
opt_psf = self.make_opt_psf(image_size=image_size,pixel_scale=pixel_scale)
atmos_psf = self.make_atmos_psf(atmos_psf_fwhm=atmos_psf_fwhm)
psf = galsim.Convolve(atmos_psf,opt_psf)
if full_output:
return psf,opt_psf,atmos_psf
else:
return psf
def get_psf(self,pixel_scale=None,seeing=None,**kwargs):
"""
produce a DES-like PSF
pixel_scale: pixel scale in arcsec
seeing: atmos seeing in arcsec
psfmaker: dict of PSF-specific options
psf: previous PSF from this function (optional)
can be used to produce the same PSF model, but rendered with a shift in the center
shift: shift in arcsec of PSF model center, input as [col,row]
"""
if pixel_scale is None:
key = 'pixel_scale'
assert key in self.conf,"You must specify '%s' for the PSF!" % key
pixel_scale = self.conf['pixel_scale']
if seeing is None:
key = 'seeing'
assert key in self.conf,"You must specify '%s' for the PSF!" % key
seeing = self.conf.get(key)
assert 'psfmaker' in self.conf,"You must specify '%s' for the PSF!" % 'psfmaker'
assert 'size' in self.conf['psfmaker'],"You must specify '%s' for the PSF!" % 'size'
psf_size = self.conf['psfmaker']['size']
if 'psf' not in kwargs:
psf = self.make_psf(image_size=psf_size, \
pixel_scale=pixel_scale, \
atmos_psf_fwhm=seeing, \
full_output=False)
else:
psf = kwargs['psf']['galsim_object']
if 'shift' in kwargs:
psf = psf.shift(kwargs['shift'][0],kwargs['shift'][1])
psf_im = psf.drawImage(nx=psf_size,ny=psf_size,scale=pixel_scale)
p = Observation()
p.image = psf_im.array.copy()
p['galsim_image'] = psf_im
p['galsim_object'] = psf
return p
def test():
import matplotlib.pyplot as plt
image_size=32
pixel_scale=0.26
atmos_psf_fwhm = 0.77
sfunc=lambda x: np.log(np.abs(x))
dpm = DESPSFMaker(seed=12345)
psf,opt_psf,atmos_psf = dpm.make_psf(image_size=image_size,
pixel_scale=pixel_scale,
atmos_psf_fwhm=atmos_psf_fwhm,full_output=True)
psfims = [sfunc(opt_psf.draw(scale=pixel_scale/10.0,nx=image_size*10.0,ny=image_size*10.0).array),
sfunc(atmos_psf.draw(scale=pixel_scale/10.0,nx=image_size*10.0,ny=image_size*10.0).array),
sfunc(psf.drawImage(scale=pixel_scale,nx=image_size,ny=image_size).array)]
vmin = np.inf
vmax = -np.inf
for im in psfims:
if np.min(im) < vmin:
vmin = np.min(im)
if np.max(im) > vmax:
vmax = np.max(im)
fig,axs = plt.subplots(1,3,figsize=(12.0,4.0))
ax = axs[0]
xyp = np.arange(image_size*10+1)*pixel_scale/10.0
psfim = psfims[0]
ax.imshow(psfim,extent=(0.0,image_size*pixel_scale,0.0,image_size*pixel_scale),vmin=vmin,vmax=vmax,aspect='equal')
ax.set_title('optics PSF')
ax.set_ylabel(r'$y\ [{\rm arcsec}]$')
ax.set_xlabel(r'$x\ [{\rm arcsec}]$')
ax = axs[1]
xyp = np.arange(image_size*10+1)*pixel_scale/10.0
psfim = psfims[1]
ax.imshow(psfim,extent=(0.0,image_size*pixel_scale,0.0,image_size*pixel_scale),vmin=vmin,vmax=vmax,aspect='equal')
ax.set_title('atmos PSF')
ax.set_ylabel(r'$y\ [{\rm arcsec}]$')
ax.set_xlabel(r'$x\ [{\rm arcsec}]$')
ax = axs[2]
xyp = np.arange(image_size+1)*pixel_scale
psfim = psfims[2]
ax.imshow(psfim,extent=(0.0,image_size*pixel_scale,0.0,image_size*pixel_scale),vmin=vmin,vmax=vmax,aspect='equal')
ax.set_title('observed PSF w/ pixel window')
ax.set_ylabel(r'$y\ [{\rm arcsec}]$')
ax.set_xlabel(r'$x\ [{\rm arcsec}]$')
#fig.tight_layout(rect=[0.0,0.03,1.0,0.97])
plt.show()
| kstory8/egret | egret/psfmakers/despsfmaker.py | Python | bsd-3-clause | 7,218 | [
"Gaussian"
] | 7190a4d3cf0b47a4c93713bc7b4e56ff5c17b6772e09b6c8b22a951ed51bc311 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import pandas as pd
import numpy as np
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.chronos.data.utils.impute import impute_timeseries_dataframe, \
_last_impute_timeseries_dataframe, _const_impute_timeseries_dataframe, \
_linear_impute_timeseries_dataframe
def get_ugly_ts_df():
data = np.random.random_sample((50, 5))
mask = np.random.random_sample((50, 5))
mask[mask >= 0.4] = 2
mask[mask < 0.4] = 1
mask[mask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=50)
return df
class TestImputeTimeSeries(ZooTestCase):
def setup_method(self, method):
self.df = get_ugly_ts_df()
def teardown_method(self, method):
pass
def test_impute_timeseries_dataframe(self):
with pytest.raises(AssertionError):
impute_timeseries_dataframe(self.df, dt_col="z")
with pytest.raises(AssertionError):
impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="dummy")
with pytest.raises(AssertionError):
impute_timeseries_dataframe(self.df, dt_col="a")
last_res_df = impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="last")
assert self.df.isna().sum().sum() != 0
assert last_res_df.isna().sum().sum() == 0
const_res_df = impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="const")
assert self.df.isna().sum().sum() != 0
assert const_res_df.isna().sum().sum() == 0
linear_res_df = impute_timeseries_dataframe(
self.df, dt_col="datetime", mode="linear")
assert self.df.isna().sum().sum() != 0
assert linear_res_df.isna().sum().sum() == 0
def test_last_impute_timeseries_dataframe(self):
data = {'data': [np.nan, np.nan, 1, np.nan, 2, 3]}
df = pd.DataFrame(data)
res_df = _last_impute_timeseries_dataframe(df)
assert res_df['data'][0] == 0
assert res_df['data'][1] == 0
assert res_df['data'][3] == 1
def test_const_impute_timeseries_dataframe(self):
data = {'data': [np.nan, 1, np.nan, 2, 3]}
df = pd.DataFrame(data)
res_df = _const_impute_timeseries_dataframe(df, 1)
assert res_df['data'][0] == 1
assert res_df['data'][2] == 1
def test_linear_timeseries_dataframe(self):
data = {'data': [np.nan, 1, np.nan, 2, 3]}
df = pd.DataFrame(data)
res_df = _linear_impute_timeseries_dataframe(df)
assert res_df['data'][0] == 1
assert res_df['data'][2] == 1.5
| intel-analytics/BigDL | python/chronos/test/bigdl/chronos/data/utils/test_impute.py | Python | apache-2.0 | 3,359 | [
"ORCA"
] | dfb2adb39d4faafe2c0df82f0794ac22228cf41bed3640257050fe077da4a09a |
# Orca
#
# Copyright 2008-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Utilities for obtaining tutorial utterances for objects. In general,
there probably should be a singleton instance of the TutorialGenerator
class. For those wishing to override the generators, however,
one can create a new instance and replace/extend the tutorial generators
as they see fit."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2008-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
from . import debug
from . import orca_state
from . import settings
from .orca_i18n import _ # for gettext support
class TutorialGenerator:
"""Takes accessible objects and produces a tutorial string to speak
for those objects. See the getTutorialString method, which is the
primary entry point. Subclasses can feel free to override/extend
the getTutorialGenerators instance field as they see fit."""
def __init__(self, script):
# The script that created us. This allows us to ask the
# script for information if we need it.
#
self._script = script
# storing the last spoken message.
self.lastTutorial = ""
self.lastRole = None
# Set up a dictionary that maps role names to functions
# that generate tutorial strings for objects that implement that role.
#
self.tutorialGenerators = {}
self.tutorialGenerators[pyatspi.ROLE_CHECK_BOX] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_COMBO_BOX] = \
self._getTutorialForComboBox
self.tutorialGenerators[pyatspi.ROLE_FRAME] = \
self._getTutorialForFrame
self.tutorialGenerators[pyatspi.ROLE_ICON] = \
self._getTutorialForIcon
self.tutorialGenerators[pyatspi.ROLE_LAYERED_PANE] = \
self._getTutorialForLayeredPane
self.tutorialGenerators[pyatspi.ROLE_LIST] = \
self._getTutorialForList
self.tutorialGenerators[pyatspi.ROLE_LIST_ITEM] = \
self._getTutorialForListItem
self.tutorialGenerators[pyatspi.ROLE_PAGE_TAB] = \
self._getTutorialForPageTab
self.tutorialGenerators[pyatspi.ROLE_PARAGRAPH] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_PASSWORD_TEXT] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_ENTRY] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_PUSH_BUTTON] = \
self._getTutorialForPushButton
self.tutorialGenerators[pyatspi.ROLE_SPIN_BUTTON] = \
self._getTutorialForSpinButton
self.tutorialGenerators[pyatspi.ROLE_TABLE_CELL] = \
self._getTutorialForTableCellRow
self.tutorialGenerators[pyatspi.ROLE_TEXT] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_TOGGLE_BUTTON] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_RADIO_BUTTON] = \
self._getTutorialForRadioButton
self.tutorialGenerators[pyatspi.ROLE_MENU] = \
self._getTutorialForMenu
self.tutorialGenerators[pyatspi.ROLE_CHECK_MENU_ITEM] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_MENU_ITEM] = \
self._getTutorialForMenuItem
self.tutorialGenerators[pyatspi.ROLE_RADIO_MENU_ITEM] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_SLIDER] = \
self._getTutorialForSlider
def _debugGenerator(self, generatorName, obj, alreadyFocused, utterances):
"""Prints debug.LEVEL_FINER information regarding
the tutorial generator.
Arguments:
- generatorName: the name of the generator
- obj: the object being presented
- alreadyFocused: False if object just received focus
- utterances: the generated text
"""
debug.println(debug.LEVEL_FINER,
"GENERATOR: %s" % generatorName)
debug.println(debug.LEVEL_FINER,
" obj = %s" % obj.name)
debug.println(debug.LEVEL_FINER,
" role = %s" % obj.getRoleName())
debug.println(debug.LEVEL_FINER,
" alreadyFocused = %s" % alreadyFocused)
debug.println(debug.LEVEL_FINER,
" utterances:")
for text in utterances:
debug.println(debug.LEVEL_FINER,
" (%s)" % text)
def _getDefaultTutorial(
self, obj, alreadyFocused, forceTutorial, role=None):
"""The default tutorial generator returns the empty tutorial string
because We have no associated tutorial function for the object.
Arguments:
- obj: an Accessible
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
- role: A role that should be used instead of the Accessible's
possible role.
Returns the empty list []
"""
return []
def _getTutorialForCheckBox(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a check box.
Arguments:
- obj: the check box
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user on how to toggle a checkbox.
msg = _("Press space to toggle.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForCheckBox",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForComboBox(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a combobox.
Arguments:
- obj: the combo box
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user on how to interact
# with a combobox.
msg = _("Press space to expand, and use up and down to select an item.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForComboBox",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForFrame(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a frame.
Arguments:
- obj: the frame
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
name = self._script.utilities.displayedText(obj)
if not name and obj.description:
name = obj.description
# Translators: If this application has more than one unfocused alert or
# dialog window, inform user of how to refocus these.
childWindowsMsg = _("Press alt+f6 to give focus to child windows.")
# If this application has more than one unfocused alert or
# dialog window, tell user how to give them focus.
try:
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
except:
alertAndDialogCount = 0
if alertAndDialogCount > 0:
utterances.append(childWindowsMsg)
self._debugGenerator("_getTutorialForFrame",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForIcon(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for an icon.
Arguments:
- obj: the icon
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
if obj.parent.getRole() == pyatspi.ROLE_LAYERED_PANE:
utterances = self._getTutorialForLayeredPane(obj.parent,
alreadyFocused,
forceTutorial)
else:
utterances = self._getDefaultTutorial(obj,
alreadyFocused,
forceTutorial)
self._debugGenerator("_getTutorialForIcon",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForLayeredPane(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a layered pane.
Arguments:
- obj: the layered pane
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
name = self._script.utilities.displayedText(obj)
if not name and obj.description:
name = obj.description
# Translators: this gives tips on how to navigate items in a
# layered pane.
msg = _("To move to items, use either " \
"the arrow keys or type ahead searching.")
utterances.append(msg)
# Translators: this is the tutorial string for when first landing
# on the desktop, describing how to access the system menus.
desktopMsg = _("To get to the system menus press the alt+f1 key.")
scriptName = self._script.name
try:
sibling = obj.parent.getChildAtIndex(0)
except AttributeError:
sibling = None
if 'nautilus' in scriptName and obj == sibling:
utterances.append(desktopMsg)
if (not alreadyFocused and self.lastTutorial != utterances) \
or forceTutorial:
pass
else:
utterances = []
self._debugGenerator("_getTutorialForLayeredPane",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForList(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a list.
Arguments:
- obj: the list
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string when navigating lists.
msg = _("Use up and down to select an item.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForList",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForListItem(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a listItem.
Arguments:
- obj: the listitem
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to collapse the node.
expandedMsg = _("To collapse, press shift plus left.")
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to expand the node.
collapsedMsg = _("To expand, press shift plus right.")
# If already in focus then the tree probably collapsed or expanded
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
if (self.lastTutorial != [expandedMsg]) or forceTutorial:
utterances.append(expandedMsg)
else:
if (self.lastTutorial != [collapsedMsg]) or forceTutorial:
utterances.append(collapsedMsg)
self._debugGenerator("_getTutorialForListItem",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForMenuItem(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a menu item
Arguments:
- obj: the menu item
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for activating a menu item
msg = _("To activate press return.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForMenuItem",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForText(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a text object.
Arguments:
- obj: the text component
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
if not obj.getState().contains(pyatspi.STATE_EDITABLE):
return []
utterances = []
# Translators: This is the tutorial string for when landing
# on text fields.
msg = _("Type in text.")
if (not alreadyFocused or forceTutorial) and \
not self._script.utilities.isReadOnlyTextArea(obj):
utterances.append(msg)
self._debugGenerator("_getTutorialForText",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForPageTab(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a page tab.
Arguments:
- obj: the page tab
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for landing
# on a page tab, we are informing the
# user how to navigate these.
msg = _("Use left and right to view other tabs.")
if (self.lastTutorial != [msg]) or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForPageTabList",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForPushButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a push button
Arguments:
- obj: the push button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for activating a push button.
msg = _("To activate press space.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForPushButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForSpinButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a spin button. If the object already has
focus, then no tutorial is given.
Arguments:
- obj: the spin button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for when landing
# on a spin button.
msg = _("Use up or down arrow to select value." \
" Or type in the desired numerical value.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForSpinButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForTableCell(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial utterances for a single table cell
Arguments:
- obj: the table
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to collapse the node.
expandedMsg = _("To collapse, press shift plus left.")
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to expand the node.
collapsedMsg = _("To expand, press shift plus right.")
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [ False, False ]
for i, child in enumerate(obj):
if self._script.utilities.hasMeaningfulToggleAction(child):
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
# Don't speak the label if just the checkbox state has
# changed.
#
if alreadyFocused and not hasToggle[i]:
pass
else:
utterances.extend( \
self._getTutorialForTableCell(obj[i],
alreadyFocused, forceTutorial))
return utterances
# [[[TODO: WDW - Attempt to infer the cell type. There's a
# bunch of stuff we can do here, such as check the EXPANDABLE
# state, check the NODE_CHILD_OF relation, etc. Logged as
# bugzilla bug 319750.]]]
#
if self._script.utilities.hasMeaningfulToggleAction(obj):
utterances = self._getTutorialForCheckBox(
obj, alreadyFocused, forceTutorial)
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
if self.lastTutorial != [expandedMsg] or forceTutorial:
utterances.append(expandedMsg)
else:
if self.lastTutorial != [collapsedMsg] or forceTutorial:
utterances.append(collapsedMsg)
self._debugGenerator("_getTutorialForTableCell",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForTableCellRow(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for the active table cell in the table row.
Arguments:
- obj: the table
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
if (not alreadyFocused):
try:
parent_table = obj.parent.queryTable()
except:
parent_table = None
readFullRow = self._script.utilities.shouldReadFullRow(obj)
if readFullRow and parent_table \
and not self._script.utilities.isLayoutOnly(obj.parent):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parent_table.getRowAtIndex(index)
column = parent_table.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
speakAll = True
if "lastRow" in self._script.pointOfReference and \
"lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
speakAll = (pointOfReference["lastRow"] != row) or \
((row == 0 or row == parent_table.nRows-1) and \
pointOfReference["lastColumn"] == column)
utterances.extend(self._getTutorialForTableCell(obj,
alreadyFocused, forceTutorial))
else:
utterances = self._getTutorialForTableCell(obj,
alreadyFocused, forceTutorial)
else:
utterances = self._getTutorialForTableCell(obj, alreadyFocused, \
forceTutorial)
self._debugGenerator("_getTutorialForTableCellRow",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForRadioButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a radio button.
Arguments:
- obj: the radio button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user, how to navigate radiobuttons.
msg = _("Use arrow keys to change.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForRadioButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForMenu(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a menu.
Arguments:
- obj: the menu
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user, how to navigate menus.
mainMenuMsg = _("To navigate, press left or right arrow. " \
"To move through items press up or down arrow.")
# Translators: this is a tip for the user, how to
# navigate into sub menus.
subMenuMsg = _("To enter sub menu, press right arrow.")
# Checking if we are a submenu,
# we can't rely on our parent being just a menu.
if obj.parent.name != "" and obj.parent.__class__ == obj.__class__:
if (self.lastTutorial != [subMenuMsg]) or forceTutorial:
utterances.append(subMenuMsg)
else:
if (self.lastTutorial != [mainMenuMsg]) or forceTutorial:
utterances.append(mainMenuMsg)
self._debugGenerator("_getTutorialForMenu",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForSlider(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a slider. If the object already has
focus, then no tutorial is given.
Arguments:
- obj: the slider
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for when landing
# on a slider.
msg = _("To decrease press left arrow, to increase press right arrow." \
" To go to minimum press home, and for maximum press end.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForSlider",
obj,
alreadyFocused,
utterances)
return utterances
def _getBindingsForHandler(self, handlerName):
handler = self._script.inputEventHandlers.get(handlerName)
if not handler:
return None
bindings = self._script.keyBindings.getBindingsForHandler(handler)
if not bindings:
return None
binding = bindings[0]
return binding.asString()
def _getModeTutorial(self, obj, alreadyFocused, forceTutorial):
return []
def getTutorial(self, obj, alreadyFocused, forceTutorial=False, role=None):
"""Get the tutorial for an Accessible object. This will look
first to the specific tutorial generators and if this
does not exist then return the empty tutorial.
This method is the primary method
that external callers of this class should use.
Arguments:
- obj: the object
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
- role: Alternative role to use
Returns a list of utterances to be spoken.
"""
if not settings.enableTutorialMessages:
return []
if not (obj and obj == orca_state.locusOfFocus):
return []
utterances = []
role = role or obj.getRole()
msg = self._getModeTutorial(obj, alreadyFocused, forceTutorial)
if not msg:
if role in self.tutorialGenerators:
generator = self.tutorialGenerators[role]
else:
generator = self._getDefaultTutorial
msg = generator(obj, alreadyFocused, forceTutorial)
if msg == self.lastTutorial and role == self.lastRole \
and not forceTutorial:
msg = []
if msg:
utterances = [" ".join(msg)]
self.lastTutorial = msg
self.lastRole = role
if forceTutorial:
self.lastTutorial = ""
self.lastRole = None
self._debugGenerator("getTutorial",
obj,
alreadyFocused,
utterances)
return utterances
| chrys87/orca-beep | src/orca/tutorialgenerator.py | Python | lgpl-2.1 | 30,663 | [
"ORCA"
] | a94e338b4aff32a28abbd854535b8aa63362a7dbb0a641126df7a843dacb2a8b |
#!/usr/bin/python2.5
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates a Google Calendar with releases from a Pivotal Tracker project.
Dependencies:
gdata-python-client:
Download: http://code.google.com/p/gdata-python-client/downloads/list
Ubuntu package: python-gdata
beautifulsoup:
Download: http://www.crummy.com/software/BeautifulSoup/
Ubuntu package: python-beautifulsoup
To use:
- Create a ~/.tracker2gcal-auth.ini file containing:
[tracker]
username: username
password: password
[calendar]
username: username@google.com
password: password
- Get the calendar ID from the "Settings" pane of the calendar in Google
Calendar. Example:
google.com_t60bvmdcq9e2ai7el5lk00ns9s@group.calendar.google.com
- Get the Tracker Project ID from the URL of the Tracker UI.
- Run tracker2gcal:
tracker2gcal.py -t 1728 -c YOUR_CALENDAR_ID
"""
__author__ = 'dcoker@google.com (Doug Coker)'
import ConfigParser
import logging
import optparse
import os
import re
import sys
import time
import urllib
import atom
import atom.service
from BeautifulSoup import BeautifulStoneSoup
import gdata.calendar
import gdata.calendar.service
import gdata.service
import pytracker
import pytrackergoogle
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
_DATESTRING_RE = re.compile(r'^\d{4}-\d{2}-\d{2}')
_DEFAULT_TRACKER_BASE_API_URL = pytracker.DEFAULT_BASE_API_URL
def YMDToSeconds(ds):
assert _DATESTRING_RE.match(ds)
return time.mktime(time.strptime(ds, '%Y-%m-%d'))
def YMDPlusOneDay(ds):
assert _DATESTRING_RE.match(ds)
next = time.strftime('%Y-%m-%d',
time.localtime(time.mktime(
time.strptime(ds, '%Y-%m-%d')) + 86400))
return next
class Calendar(object):
"""Wrapper for the Google Calendar GData API."""
def __init__(self, calendar_id, auth):
self.calendar_id = calendar_id
self.cal_client = gdata.calendar.service.CalendarService()
self.cal_client.email = auth[0]
self.cal_client.password = auth[1]
self.cal_client.source = 'tracker2gcal'
self.cal_client.ProgrammaticLogin()
self.feed = self._GetEventFeed()
def _GetEventFeedUri(self):
return ('/calendar/feeds/%s/private/full' %
urllib.quote_plus(self.calendar_id))
def _GetBatchEventFeedUri(self):
return self._GetEventFeedUri() + '/batch'
def _GetEventFeed(self):
return self.cal_client.GetCalendarEventFeed(uri=self._GetEventFeedUri())
def Visit(self, filt, callback):
"""Visits all events in the calendar that satisfy filt with callback."""
events = self._GetEventFeed()
for event in events.entry:
if filt(event):
callback(event)
def DeleteEventVisitor(self, event):
"""A Visitor that deletes the event."""
logging.info('deleting %s', event.title.text)
self.cal_client.DeleteEvent(event.GetEditLink().href)
def CreateForBatch(self, title, when, content=''):
"""Creates an Event for batch operation.
Args:
title: title of event
when: date only, in %Y/%m/%d format
content: content event body
Returns:
The populated CalendarEventEntry.
"""
event = gdata.calendar.CalendarEventEntry()
event.title = atom.Title(text=title)
event.content = atom.Content(text=content)
stop = YMDPlusOneDay(when)
event.when.append(gdata.calendar.When(start_time=when, end_time=stop))
event.batch_id = gdata.BatchId(text='insert-request')
return event
def GetEventFeedForBatch(self):
"""Returns an Event feed intended for batch operations."""
return gdata.calendar.CalendarEventFeed()
def RunBatch(self, event_feed):
"""Executes the adds in event_feed."""
response_feed = self.cal_client.ExecuteBatch(
event_feed,
url=self._GetBatchEventFeedUri())
for entry in response_feed.entry:
logging.info('id %s / status %s / reason %s',
entry.batch_id.text,
entry.batch_status.code,
entry.batch_status.reason)
def GetCredentials(parser, scope):
u = parser.get(scope, 'username')
p = parser.get(scope, 'password')
assert u
assert p
return (u, p)
def main(opts):
parser = ConfigParser.RawConfigParser()
parser.read(opts.credentials)
cal_auth = GetCredentials(parser, 'calendar')
tracker_auth = GetCredentials(parser, 'tracker')
c = Calendar(opts.calendar_id, cal_auth)
if opts.tracker_base_api_url.find('.google.com') != -1:
tracker_auth = pytrackergoogle.TrackerAtGoogleAuth(*tracker_auth)
else:
tracker_auth = pytracker.HostedTrackerAuth(*tracker_auth)
t = pytracker.Tracker(opts.tracker_id, tracker_auth,
base_api_url=opts.tracker_base_api_url)
# For now, we care only about type:release stories.
# We could extend this to also include stories with
# specific tags.
def FilterForReleases(event):
return event.title.text.find('[release') != -1
c.Visit(FilterForReleases, c.DeleteEventVisitor)
xml = t.GetReleaseStoriesXml()
soup = BeautifulStoneSoup(xml)
batch = c.GetEventFeedForBatch()
releases = soup.stories.findAll('story')
logging.info('found %d releases', len(releases))
for e in releases:
url = e.url.contents[0]
# can't use .name -- soup would return the tag name.
title = e.find('name').contents[0]
# The release date is computed by Tracker and is an estimate of story
# completion.
release_date = pytracker.TrackerDatetimeToYMD(
e.iteration.finish.contents[0])
suffix = '[release, floating]'
calendar_date = release_date
body = (url + '\n\n\n\n'
'[This event was automatically created based on data from Tracker]')
# Hard deadlines are special.
if e.find('deadline'):
scheduled_date = pytracker.TrackerDatetimeToYMD(e.deadline.contents[0])
suffix = '[release, hard]'
# Prefix the event with "SLIPPING" if release > deadline
scheduled_secs = YMDToSeconds(scheduled_date)
release_secs = YMDToSeconds(release_date)
delta = release_secs - scheduled_secs
if release_secs > scheduled_secs:
title = 'SLIPPING %.1f days: %s' % (delta / 86400, title)
calendar_date = scheduled_date
title = title + ' ' + suffix
batch.AddInsert(entry=c.CreateForBatch(title, calendar_date, body))
logging.info('%s: %s / %s', url, title, calendar_date)
c.RunBatch(batch)
def ParseOpts():
"""Parses the command line arguments and returns a dictionary of flags."""
parser = optparse.OptionParser()
default_credentials_file = os.path.join(os.environ['HOME'],
'.tracker2gcal-auth.ini')
parser.add_option('-u', '--credentials-file', dest='credentials',
help='file containing authentication details',
default=default_credentials_file)
parser.add_option('-c', '--calendar-id', dest='calendar_id',
help='target calendar id', metavar='ID')
parser.add_option('-t', '--tracker-id', dest='tracker_id',
help='tracker id', metavar='ID', type='int')
parser.add_option('-b', '--tracker-base-api-url',
dest='tracker_base_api_url',
help='the base URL of the Tracker API (including trailing '
'slash).',
default=_DEFAULT_TRACKER_BASE_API_URL)
(options, _) = parser.parse_args()
# Check for errors
errors = False
if getattr(options, 'tracker_id') is None:
logging.error('Missing -t/--tracker-id option')
errors = True
if not re.match(r'^https?://[^/]+.*/$',
getattr(options, 'tracker_base_api_url')):
logging.error('-b/--tracker-base-api-url does not look like a valid URL.')
errors = True
if getattr(options, 'calendar_id') is None:
logging.error('Missing -c/--calendar-id option')
errors = True
else:
if not re.search(r'@group.calendar.google.com$', options.calendar_id):
logging.error('%s does not look like a valid calendar ID.',
options.calendar_id)
errors = True
if errors:
parser.print_help()
sys.exit(1)
return options
if __name__ == '__main__':
main(ParseOpts())
| marcosdiez/pytracker | tracker2gcal.py | Python | apache-2.0 | 8,815 | [
"VisIt"
] | 6b8c181a0fd394c0a43aea0fd87ce0518b333383c3c87d74658e0e628a98a8f3 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in image_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.platform import test
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
@test_util.for_all_test_methods(test_util.disable_xla,
'align_corners=False not supported by XLA')
class ResizeNearestNeighborOpTest(test.TestCase):
TYPES = [np.float32, np.float64]
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 4).reshape(in_shape).astype(nptype)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
@test_util.run_deprecated_v1
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 6).reshape(in_shape).astype(nptype)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
for nptype in self.TYPES:
x = np.arange(0, 24).reshape(in_shape).astype(nptype)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testCompareGpuVsCpu(self):
in_shape = [1, 4, 6, 3]
out_shape = [1, 8, 16, 3]
for nptype in self.TYPES:
x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype)
for align_corners in [True, False]:
with self.cached_session(use_gpu=False):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(
input_tensor, out_shape[1:3], align_corners=align_corners)
grad_cpu = gradient_checker.compute_gradient(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(
input_tensor, out_shape[1:3], align_corners=align_corners)
grad_gpu = gradient_checker.compute_gradient(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5)
class ResizeBilinearOpTest(test.TestCase):
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 4).reshape(in_shape).astype(np.float32)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
@test_util.run_deprecated_v1
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.float32)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testCompareGpuVsCpu(self):
in_shape = [2, 4, 6, 3]
out_shape = [2, 8, 16, 3]
size = np.prod(in_shape)
x = 1.0 / size * np.arange(0, size).reshape(in_shape).astype(np.float32)
# Align corners will be deprecated for tf2.0 and the false version is not
# supported by XLA.
align_corner_options = [True
] if test_util.is_xla_enabled() else [True, False]
for align_corners in align_corner_options:
grad = {}
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=in_shape)
resized_tensor = image_ops.resize_bilinear(
input_tensor, out_shape[1:3], align_corners=align_corners)
grad[use_gpu] = gradient_checker.compute_gradient(
input_tensor, in_shape, resized_tensor, out_shape, x_init_value=x)
self.assertAllClose(grad[False], grad[True], rtol=1e-4, atol=1e-4)
@test_util.run_deprecated_v1
def testTypes(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape)
with self.cached_session() as sess:
for dtype in [np.float16, np.float32, np.float64]:
input_tensor = constant_op.constant(x.astype(dtype), shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
grad = sess.run(gradients_impl.gradients(resize_out, input_tensor))[0]
self.assertAllEqual(in_shape, grad.shape)
# Not using gradient_checker.compute_gradient as I didn't work out
# the changes required to compensate for the lower precision of
# float16 when computing the numeric jacobian.
# Instead, we just test the theoretical jacobian.
self.assertAllEqual([[[[1.], [0.], [1.], [0.], [1.], [0.]], [[0.], [
0.
], [0.], [0.], [0.], [0.]], [[1.], [0.], [1.], [0.], [1.], [0.]],
[[0.], [0.], [0.], [0.], [0.], [0.]]]], grad)
class ResizeBicubicOpTest(test.TestCase):
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 4).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3],
align_corners=align_corners)
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
@test_util.run_deprecated_v1
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3],
align_corners=align_corners)
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3],
align_corners=align_corners)
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradOnUnsupportedType(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.uint8)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3])
grad = gradients_impl.gradients(input_tensor, [resize_out])
self.assertEqual([None], grad)
class ScaleAndTranslateOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testGrads(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
kernel_types = [
'lanczos1', 'lanczos3', 'lanczos5', 'gaussian', 'box', 'triangle',
'keyscubic', 'mitchellcubic'
]
scales = [(1.0, 1.0), (0.37, 0.47), (2.1, 2.1)]
translations = [(0.0, 0.0), (3.14, 1.19), (2.1, 3.1), (100.0, 200.0)]
for scale in scales:
for translation in translations:
for kernel_type in kernel_types:
for antialias in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
scale_and_translate_out = image_ops.scale_and_translate(
input_tensor,
out_shape[1:3],
scale=constant_op.constant(scale),
translation=constant_op.constant(translation),
kernel_type=kernel_type,
antialias=antialias)
err = gradient_checker.compute_gradient_error(
input_tensor,
in_shape,
scale_and_translate_out,
out_shape,
x_init_value=x)
self.assertLess(err, 1e-3)
def testIdentityGrads(self):
"""Tests that Gradients for 1.0 scale should be ones for some kernels."""
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
kernel_types = ['lanczos1', 'lanczos3', 'lanczos5', 'triangle', 'keyscubic']
scale = (1.0, 1.0)
translation = (0.0, 0.0)
antialias = True
for kernel_type in kernel_types:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
with backprop.GradientTape() as tape:
tape.watch(input_tensor)
scale_and_translate_out = image_ops.scale_and_translate(
input_tensor,
out_shape[1:3],
scale=constant_op.constant(scale),
translation=constant_op.constant(translation),
kernel_type=kernel_type,
antialias=antialias)
grad = tape.gradient(scale_and_translate_out, input_tensor)[0]
grad_v = self.evaluate(grad)
self.assertAllClose(np.ones_like(grad_v), grad_v)
class CropAndResizeOpTest(test.TestCase):
def testShapeIsCorrectAfterOp(self):
batch = 2
image_height = 3
image_width = 4
crop_height = 4
crop_width = 5
depth = 2
num_boxes = 2
image_shape = [batch, image_height, image_width, depth]
crop_size = [crop_height, crop_width]
crops_shape = [num_boxes, crop_height, crop_width, depth]
image = np.arange(0, batch * image_height * image_width *
depth).reshape(image_shape).astype(np.float32)
boxes = np.array([[0, 0, 1, 1], [.1, .2, .7, .8]], dtype=np.float32)
box_ind = np.array([0, 1], dtype=np.int32)
with self.session(use_gpu=True) as sess:
crops = image_ops.crop_and_resize(
constant_op.constant(
image, shape=image_shape),
constant_op.constant(
boxes, shape=[num_boxes, 4]),
constant_op.constant(
box_ind, shape=[num_boxes]),
constant_op.constant(
crop_size, shape=[2]))
self.assertEqual(crops_shape, list(crops.get_shape()))
crops = self.evaluate(crops)
self.assertEqual(crops_shape, list(crops.shape))
def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples):
"""Generate samples that are far enough from a set of anchor points.
We generate uniform samples in [low, high], then reject those that are less
than radius away from any point in anchors. We stop after we have accepted
num_samples samples.
Args:
low: The lower end of the interval.
high: The upper end of the interval.
anchors: A list of length num_crops with anchor points to avoid.
radius: Distance threshold for the samples from the anchors.
num_samples: How many samples to produce.
Returns:
samples: A list of length num_samples with the accepted samples.
"""
self.assertTrue(low < high)
self.assertTrue(radius >= 0)
num_anchors = len(anchors)
# Make sure that at least half of the interval is not forbidden.
self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low))
anchors = np.reshape(anchors, num_anchors)
samples = []
while len(samples) < num_samples:
sample = np.random.uniform(low, high)
if np.all(np.fabs(sample - anchors) > radius):
samples.append(sample)
return samples
@test_util.run_deprecated_v1
def testGradRandomBoxes(self):
"""Test that the gradient is correct for randomly generated boxes.
The mapping is piecewise differentiable with respect to the box coordinates.
The points where the function is not differentiable are those which are
mapped to image pixels, i.e., the normalized y coordinates in
np.linspace(0, 1, image_height) and normalized x coordinates in
np.linspace(0, 1, image_width). Make sure that the box coordinates are
sufficiently far away from those rectangular grid centers that are points of
discontinuity, so that the finite difference Jacobian is close to the
computed one.
"""
np.random.seed(1) # Make it reproducible.
delta = 1e-3
radius = 2 * delta
low, high = -0.5, 1.5 # Also covers the case of extrapolation.
image_height = 4
for image_width in range(1, 3):
for crop_height in range(1, 3):
for crop_width in range(2, 4):
for depth in range(1, 3):
for num_boxes in range(1, 3):
batch = num_boxes
image_shape = [batch, image_height, image_width, depth]
crop_size = [crop_height, crop_width]
crops_shape = [num_boxes, crop_height, crop_width, depth]
boxes_shape = [num_boxes, 4]
image = np.arange(0, batch * image_height * image_width *
depth).reshape(image_shape).astype(np.float32)
boxes = []
for _ in range(num_boxes):
# pylint: disable=unbalanced-tuple-unpacking
y1, y2 = self._randomUniformAvoidAnchors(
low, high, np.linspace(0, 1, image_height), radius, 2)
x1, x2 = self._randomUniformAvoidAnchors(
low, high, np.linspace(0, 1, image_width), radius, 2)
# pylint: enable=unbalanced-tuple-unpacking
boxes.append([y1, x1, y2, x2])
boxes = np.array(boxes, dtype=np.float32)
box_ind = np.arange(batch, dtype=np.int32)
with self.cached_session(use_gpu=True):
image_tensor = constant_op.constant(image, shape=image_shape)
boxes_tensor = constant_op.constant(boxes, shape=[num_boxes, 4])
box_ind_tensor = constant_op.constant(
box_ind, shape=[num_boxes])
crops = image_ops.crop_and_resize(
image_tensor,
boxes_tensor,
box_ind_tensor,
constant_op.constant(
crop_size, shape=[2]))
err = gradient_checker.compute_gradient_error(
[image_tensor, boxes_tensor], [image_shape, boxes_shape],
crops,
crops_shape,
delta=delta,
x_init_value=[image, boxes])
self.assertLess(err, 2e-3)
@test_util.run_all_in_graph_and_eager_modes
class RGBToHSVOpTest(test.TestCase):
TYPES = [np.float32, np.float64]
def testShapeIsCorrectAfterOp(self):
in_shape = [2, 20, 30, 3]
out_shape = [2, 20, 30, 3]
for nptype in self.TYPES:
x = np.random.randint(0, high=255, size=[2, 20, 30, 3]).astype(nptype)
with self.cached_session(use_gpu=True):
rgb_input_tensor = constant_op.constant(x, shape=in_shape)
hsv_out = gen_image_ops.rgb_to_hsv(rgb_input_tensor)
self.assertEqual(out_shape, list(hsv_out.get_shape()))
hsv_out = self.evaluate(hsv_out)
self.assertEqual(out_shape, list(hsv_out.shape))
def testRGBToHSVGradSimpleCase(self):
def f(x):
return gen_image_ops.rgb_to_hsv(x)
# Building a simple input tensor to avoid any discontinuity
x = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8,
0.9]]).astype(np.float32)
rgb_input_tensor = constant_op.constant(x, shape=x.shape)
# Computing Analytical and Numerical gradients of f(x)
analytical, numerical = gradient_checker_v2.compute_gradient(
f, [rgb_input_tensor])
self.assertAllClose(numerical, analytical, atol=1e-4)
def testRGBToHSVGradRandomCase(self):
def f(x):
return gen_image_ops.rgb_to_hsv(x)
np.random.seed(0)
# Building a simple input tensor to avoid any discontinuity
x = np.random.rand(1, 5, 5, 3).astype(np.float32)
rgb_input_tensor = constant_op.constant(x, shape=x.shape)
# Computing Analytical and Numerical gradients of f(x)
self.assertLess(
gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [rgb_input_tensor])), 1e-4)
def testRGBToHSVGradSpecialCaseRGreatest(self):
# This test tests a specific subset of the input space
# with a dummy function implemented with native TF operations.
in_shape = [2, 10, 20, 3]
def f(x):
return gen_image_ops.rgb_to_hsv(x)
def f_dummy(x):
# This dummy function is a implementation of RGB to HSV using
# primitive TF functions for one particular case when R>G>B.
r = x[..., 0]
g = x[..., 1]
b = x[..., 2]
# Since MAX = r and MIN = b, we get the following h,s,v values.
v = r
s = 1 - math_ops.div_no_nan(b, r)
h = 60 * math_ops.div_no_nan(g - b, r - b)
h = h / 360
return array_ops.stack([h, s, v], axis=-1)
# Building a custom input tensor where R>G>B
x_reds = np.ones((in_shape[0], in_shape[1], in_shape[2])).astype(np.float32)
x_greens = 0.5 * np.ones(
(in_shape[0], in_shape[1], in_shape[2])).astype(np.float32)
x_blues = 0.2 * np.ones(
(in_shape[0], in_shape[1], in_shape[2])).astype(np.float32)
x = np.stack([x_reds, x_greens, x_blues], axis=-1)
rgb_input_tensor = constant_op.constant(x, shape=in_shape)
# Computing Analytical and Numerical gradients of f(x)
analytical, numerical = gradient_checker_v2.compute_gradient(
f, [rgb_input_tensor])
# Computing Analytical and Numerical gradients of f_dummy(x)
analytical_dummy, numerical_dummy = gradient_checker_v2.compute_gradient(
f_dummy, [rgb_input_tensor])
self.assertAllClose(numerical, analytical, atol=1e-4)
self.assertAllClose(analytical_dummy, analytical, atol=1e-4)
self.assertAllClose(numerical_dummy, numerical, atol=1e-4)
if __name__ == "__main__":
test.main()
| chemelnucfin/tensorflow | tensorflow/python/ops/image_grad_test.py | Python | apache-2.0 | 21,897 | [
"Gaussian"
] | 923d9819957a01f2bc12f7e163e760822bfa9b77d677347319cf76c3b182175f |
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for SearchIO exonerate-text indexing."""
import os
import unittest
from search_tests_common import CheckIndex
class ExonerateTextIndexCases(CheckIndex):
fmt = 'exonerate-text'
def test_exn_22_m_est2genome(self):
"""Test exonerate-text indexing, single"""
filename = os.path.join('Exonerate', 'exn_22_m_est2genome.exn')
self.check_index(filename, self.fmt)
def test_exn_22_q_multiple(self):
"""Test exonerate-text indexing, single"""
filename = os.path.join('Exonerate', 'exn_22_q_multiple.exn')
self.check_index(filename, self.fmt)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_SearchIO_exonerate_text_index.py | Python | gpl-2.0 | 954 | [
"Biopython"
] | 0b85279bcc6c0f1e43520761c1969cf48bc628bc0d3054d50f4b69f5d894f49d |
import os
from setuptools import setup, find_packages
import multiprocessing
template_files = os.listdir(os.path.join(os.path.dirname(__file__), 'citation_reporter', 'templates'))
templates = [os.path.join('templates', template) for template in template_files]
example_files = os.listdir(os.path.join(os.path.dirname(__file__), 'citation_reporter', 'examples'))
examples = [os.path.join('examples', example) for example in example_files]
setup(name='citation_reporter',
version='0.2.0',
scripts=[
'scripts/citation_reporter_cli.py',
'scripts/citation_reporter_web.py'
],
test_suite='nose.collector',
tests_require=[
'nose',
'mock'
],
install_requires=[
'biopython',
'boltons',
'Flask',
'PyYAML',
'requests'
],
include_package_data=True,
package_data={
'templates': 'citation_reporter/templates/*',
'examples': 'citation_reporter/examples/*'
},
packages=find_packages(),
zip_safe=False
)
| sanger-pathogens/citation_reporter | setup.py | Python | gpl-3.0 | 1,057 | [
"Biopython"
] | 05f5797e5adac2352594365067d014844809bb954c3dbbeb355ec2400d559750 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0039_auto_20150723_0019'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='key',
field=models.UUIDField(default=uuid.uuid4, unique=True, editable=False),
),
]
| koebbe/homeworks | visit/migrations/0040_auto_20150723_0020.py | Python | mit | 442 | [
"VisIt"
] | ded38b14e06456b4eed2af476d1ce87680bc91135885c3e721a09035a43a2773 |
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
"""
| dvthiriez/rvi_backend | config/__init__.py | Python | mpl-2.0 | 288 | [
"Jaguar"
] | f675540eee93df6ff2f7af4b0fd5d11cfe6eb4197fe3cb39c14d3527218e5408 |
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
###########################################################################
# #
# ESPResSo++ Benchmark Python script for a polymer melt #
# #
###########################################################################
import sys
import time
import espresso
import mpi4py.MPI as MPI
import logging
from espresso import Real3D, Int3D
from espresso.tools import lammps, gromacs
from espresso.tools import decomp
from espresso.tools import timers
# simulation parameters (nvt = False is nve)
steps = 1000
rc = 1.12
skin = 0.3
nvt = True
timestep = 0.01
# run with "tlj tfene tcos" to activate tabulated potentials
tabfileLJ = "pot-lj.txt"
tabfileFENE = "pot-fene.txt"
tabfileCosine = "pot-cosine.txt"
spline = 2 # spline interpolation type (1, 2, 3)
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
sys.stdout.write('Setting up simulation ...\n')
bonds, angles, x, y, z, Lx, Ly, Lz = lammps.read('espressopp_polymer_melt.start')
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espresso.System()
system.rng = espresso.esutil.RNG(54321)
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size,size,rc,skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espresso.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# add particles to the system and then decompose
for pid in range(num_particles):
system.storage.addParticle(pid + 1, Real3D(x[pid], y[pid], z[pid]))
system.storage.decompose()
# Lennard-Jones with Verlet list
vl = espresso.VerletList(system, cutoff = rc + system.skin)
potTabLJ = espresso.interaction.Tabulated(itype=spline, filename=tabfileLJ, cutoff=rc)
potLJ = espresso.interaction.LennardJones(sigma=1.0, epsilon=1.0, cutoff=rc, shift=False)
if sys.argv.count("tlj") > 0:
print('tabulated potential from file %s' % potTabLJ.filename)
interLJ = espresso.interaction.VerletListTabulated(vl)
interLJ.setPotential(type1 = 0, type2 = 0, potential = potTabLJ)
else:
interLJ = espresso.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1 = 0, type2 = 0, potential = potLJ)
system.addInteraction(interLJ)
# FENE bonds
fpl = espresso.FixedPairList(system.storage)
fpl.addBonds(bonds)
potTabFENE = espresso.interaction.Tabulated(itype=spline, filename=tabfileFENE)
potFENE = espresso.interaction.FENE(K=30.0, r0=0.0, rMax=1.5)
if sys.argv.count("tfene") > 0:
print('tabulated potential from file %s' % potTabFENE.filename)
interFENE = espresso.interaction.FixedPairListTabulated(system, fpl)
interFENE.setPotential(type1 = 0, type2 = 0, potential = potTabFENE)
else:
interFENE = espresso.interaction.FixedPairListFENE(system, fpl)
interFENE.setPotential(type1 = 0, type2 = 0, potential = potFENE)
system.addInteraction(interFENE)
# Cosine with FixedTriple list
ftl = espresso.FixedTripleList(system.storage)
ftl.addTriples(angles)
potTabCosine = espresso.interaction.TabulatedAngular(itype=spline, filename = tabfileCosine)
potCosine = espresso.interaction.Cosine(K=1.5, theta0=3.1415926)
if sys.argv.count("tcos") > 0:
print('tabulated potential from file %s' % potTabCosine.filename)
interCosine = espresso.interaction.FixedTripleListTabulatedAngular(system, ftl)
interCosine.setPotential(type1 = 0, type2 = 0, potential = potTabCosine)
else:
interCosine = espresso.interaction.FixedTripleListCosine(system, ftl)
interCosine.setPotential(type1 = 0, type2 = 0, potential = potCosine)
system.addInteraction(interCosine)
# integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = 0.003
if(nvt):
langevin = espresso.integrator.Langevin(system)
langevin.gamma = 1.0
langevin.temperature = 1.0
integrator.langevin = langevin
integrator.dt = 0.01
# print simulation parameters
print ''
print 'number of particles =', num_particles
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'nvt =', nvt
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
configurations = espresso.analysis.Configurations(system)
configurations.gather()
temperature = espresso.analysis.Temperature(system)
pressure = espresso.analysis.Pressure(system)
pressureTensor = espresso.analysis.PressureTensor(system)
fmt = '%5d %8.4f %10.5f %8.5f %12.3f %12.3f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
P = pressure.compute()
Pij = pressureTensor.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interLJ.computeEnergy()
Eb = interFENE.computeEnergy()
Ea = interCosine.computeEnergy()
Etotal = Ek + Ep + Eb + Ea
sys.stdout.write(' step T P Pxy etotal ekinetic epair ebond eangle\n')
sys.stdout.write(fmt % (0, T, P, Pij[3], Etotal, Ek, Ep, Eb, Ea))
start_time = time.clock()
integrator.run(steps)
end_time = time.clock()
T = temperature.compute()
P = pressure.compute()
Pij = pressureTensor.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interLJ.computeEnergy()
Eb = interFENE.computeEnergy()
Ea = interCosine.computeEnergy()
Etotal = Ek + Ep + Eb + Ea
sys.stdout.write(fmt % (steps, T, P, Pij[3], Etotal, Ek, Ep, Eb, Ea))
sys.stdout.write('\n')
# print timings and neighbor list information
timers.show(integrator.getTimers(), precision=2)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
| govarguz/espressopp | bench/polymer_melt/espressopp/espressopp_polymer_melt_tabulated.py | Python | gpl-3.0 | 6,896 | [
"ESPResSo",
"Gromacs",
"LAMMPS"
] | 0fce6120917ed4a85405a0710458d53d70654fd484f4d8423435c3f00e03877e |
"""Example creating minimal model.
This demonstrates just the very core SBML functionality.
"""
from pathlib import Path
import libsbml
from sbmlutils import EXAMPLES_DIR
from sbmlutils.cytoscape import visualize_sbml
from sbmlutils.examples import templates
from sbmlutils.factory import *
from sbmlutils.metadata import *
class U(Units):
"""UnitDefinitions."""
min = UnitDefinition("min")
mmole = UnitDefinition("mmole")
m2 = UnitDefinition("m2", "meter^2")
mM = UnitDefinition("mM", "mmole/liter")
mmole_per_min = UnitDefinition("mmole_per_min", "mmole/min")
l_per_min = UnitDefinition("l_per_min", "l/min")
_m = Model(
sid="complete_model",
packages=["distrib", "fbc"],
notes="""
# Complete model
Example demonstrating more complete information in SBML model.
Showcasing combination of multiple features.
"""
+ templates.terms_of_use,
creators=templates.creators,
units=U,
model_units=ModelUnits(
time=U.min,
extent=U.mmole,
substance=U.mmole,
length=U.meter,
area=U.m2,
volume=U.liter,
),
)
_m.compartments = [
Compartment(
sid="cell",
metaId="meta_cell",
value=1.0,
# unit support
unit=U.liter,
spatialDimensions=3,
constant=True,
# annotation and sbo support
sboTerm=SBO.PHYSICAL_COMPARTMENT,
annotations=[(BQB.IS, "ncit/C48694")],
# provenance via notes
notes="""
Overall cell compartment with volume set to an arbitrary value of 1.0.
""",
# uncertainties
uncertainties=[
Uncertainty(
formula="normal(1.0, 0.1)",
uncertParameters=[
UncertParameter(type=libsbml.DISTRIB_UNCERTTYPE_MEAN, value=1.0),
UncertParameter(
type=libsbml.DISTRIB_UNCERTTYPE_STANDARDDEVIATION, value=0.1
),
],
uncertSpans=[
UncertSpan(
type=libsbml.DISTRIB_UNCERTTYPE_RANGE,
valueLower=0.2,
valueUpper=3.0,
),
],
)
],
),
]
_m.species = [
Species(
sid="S1",
metaId="meta_S1",
name="glucose",
compartment="cell",
# clean handling of amounts vs. concentrations
initialConcentration=10.0,
substanceUnit=U.mmole,
hasOnlySubstanceUnits=False,
# additional information via FBC
sboTerm=SBO.SIMPLE_CHEMICAL,
chemicalFormula="C6H12O6",
charge=0,
annotations=[
(BQB.IS, "chebi/CHEBI:4167"),
(BQB.IS, "inchikey/WQZGKKKJIJFFOK-GASJEMHNSA-N"),
],
notes="Species represents D-glucopyranose.",
),
Species(
sid="S2",
metaId="meta_S2",
name="glucose 6-phosphate",
initialConcentration=10.0,
compartment="cell",
substanceUnit=U.mmole,
hasOnlySubstanceUnits=False,
sboTerm=SBO.SIMPLE_CHEMICAL,
chemicalFormula="C6H11O9P",
charge=0,
annotations=[(BQB.IS, "chebi/CHEBI:58225")],
),
]
_m.parameters = [
Parameter(
sid="k1",
value=0.1,
constant=True,
unit=U.l_per_min,
sboTerm=SBO.KINETIC_CONSTANT,
),
]
_m.reactions = [
Reaction(
sid="J0",
name="hexokinase",
equation="S1 -> S2",
# reactions should have compartment set for layouts
compartment="cell",
formula=("k1 * S1", U.mmole_per_min), # [liter/min]* [mmole/liter]
pars=[
Parameter(
sid="J0_lb",
value=0.0,
constant=True,
unit=U.mmole_per_min,
name="lower flux bound J0",
),
Parameter(
sid="J0_ub",
value=1000.0,
constant=True,
unit=U.mmole_per_min,
name="upper flux bound J0",
),
],
# additional fbc information (here used for constraint testing)
lowerFluxBound="J0_lb",
upperFluxBound="J0_ub",
notes="Simplified hexokinase reaction ignoring ATP, ADP cofactors."
"Reaction is not mass and charge balanced.",
annotations=[(BQB.IS, "uniprot/P17710")],
),
]
_m.constraints = [
Constraint("J0_lb_constraint", math="J0 >= J0_lb"),
Constraint("J0_ub_constraint", math="J0 >= J0_ub"),
]
def create(tmp: bool = False) -> FactoryResult:
"""Create model."""
return create_model(
models=_m,
output_dir=EXAMPLES_DIR,
# now unit valid model
units_consistency=True,
tmp=tmp,
)
if __name__ == "__main__":
fac_result = create()
visualize_sbml(sbml_path=fac_result.sbml_path)
| matthiaskoenig/sbmlutils | src/sbmlutils/examples/complete_model.py | Python | lgpl-3.0 | 4,944 | [
"Cytoscape"
] | ed69b9a1c4b4fcac4340c3f62ff286d9e0c61985419257e0e39fae41f8720e32 |
#!/usr/bin/env python
"""Find the functions in a module missing type annotations.
To use it run
./functions_missing_types.py <module>
and it will print out a list of functions in the module that don't
have types.
"""
import argparse
import ast
import importlib
import os
NUMPY_ROOT = os.path.dirname(os.path.join(
os.path.abspath(__file__), "..",
))
# Technically "public" functions (they don't start with an underscore)
# that we don't want to include.
EXCLUDE_LIST = {
"numpy": {
# Stdlib modules in the namespace by accident
"absolute_import",
"division",
"print_function",
"warnings",
"sys",
"os",
"math",
# Accidentally public, deprecated, or shouldn't be used
"Tester",
"alen",
"add_docstring",
"add_newdoc",
"add_newdoc_ufunc",
"core",
"compat",
"fastCopyAndTranspose",
"get_array_wrap",
"int_asbuffer",
"numarray",
"oldnumeric",
"safe_eval",
"set_numeric_ops",
"test",
"typeDict",
# Builtins
"bool",
"complex",
"float",
"int",
"long",
"object",
"str",
"unicode",
# More standard names should be preferred
"alltrue", # all
"sometrue", # any
}
}
class FindAttributes(ast.NodeVisitor):
"""Find top-level attributes/functions/classes in stubs files.
Do this by walking the stubs ast. See e.g.
https://greentreesnakes.readthedocs.io/en/latest/index.html
for more information on working with Python's ast.
"""
def __init__(self):
self.attributes = set()
def visit_FunctionDef(self, node):
if node.name == "__getattr__":
# Not really a module member.
return
self.attributes.add(node.name)
# Do not call self.generic_visit; we are only interested in
# top-level functions.
return
def visit_ClassDef(self, node):
if not node.name.startswith("_"):
self.attributes.add(node.name)
return
def visit_AnnAssign(self, node):
self.attributes.add(node.target.id)
def find_missing(module_name):
module_path = os.path.join(
NUMPY_ROOT,
module_name.replace(".", os.sep),
"__init__.pyi",
)
module = importlib.import_module(module_name)
module_attributes = {
attribute for attribute in dir(module) if not attribute.startswith("_")
}
if os.path.isfile(module_path):
with open(module_path) as f:
tree = ast.parse(f.read())
ast_visitor = FindAttributes()
ast_visitor.visit(tree)
stubs_attributes = ast_visitor.attributes
else:
# No stubs for this module yet.
stubs_attributes = set()
exclude_list = EXCLUDE_LIST.get(module_name, set())
missing = module_attributes - stubs_attributes - exclude_list
print("\n".join(sorted(missing)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("module")
args = parser.parse_args()
find_missing(args.module)
if __name__ == "__main__":
main()
| simongibbons/numpy | tools/functions_missing_types.py | Python | bsd-3-clause | 3,211 | [
"VisIt"
] | 9b8428ffb8f0539def9937a36e3d50528cc626eb01bb8b37b867e778193b7705 |
import logging
import os
import pysam
import subprocess
class AnnotationError(Exception):
pass
def ensureIndexed(bedPath, preset="bed", trySorting=True):
if not bedPath.endswith(".gz"):
if not os.path.exists(bedPath+".gz"):
logging.info("bgzf compressing {}".format(bedPath))
pysam.tabix_compress(bedPath, bedPath+".gz")
if not os.path.exists(bedPath+".gz"):
raise Exception("Failed to create compress {preset} file for {file}; make sure the {preset} file is "
"sorted and the directory is writeable".format(preset=preset, file=bedPath))
bedPath += ".gz"
if not os.path.exists(bedPath+".tbi"):
logging.info("creating tabix index for {}".format(bedPath))
pysam.tabix_index(bedPath, preset=preset)
if not os.path.exists(bedPath+".tbi"):
raise Exception("Failed to create tabix index file for {file}; make sure the {preset} file is "
"sorted and the directory is writeable".format(preset=preset, file=bedPath))
line = pysam.Tabixfile(bedPath).fetch().next()
if len(line.strip().split("\t")) < 6 and preset == "bed":
raise AnnotationError("BED files need to have at least 6 (tab-delimited) fields (including "
"chrom, start, end, name, score, strand; score is unused)")
if len(line.strip().split("\t")) < 9 and preset == "bed":
raise AnnotationError("GFF/GTF files need to have at least 9 tab-delimited fields")
return bedPath
# def sortFile(uncompressedPath, preset):
# if preset == "bed":
# fields = {"chrom":0, "start":1, "end":2}
# elif preset == "gff":
# fields = {"chrom":0, "start":3, "end":4}
# sortCommand = "sort -k{chrom}V -k{start}n -k{end}n".format(**fields)
# tabixCommand = "{sort} {path} | bgzip > {path}.gz".format(sort=sortCommand, path=uncompressedPath)
# logging.info("Trying to sort input annotation file with command:")
# logging.info(" {}".format(tabixCommand))
# subprocess.check_call(tabixCommand, shell=True)
| gatoravi/svviz | src/svviz/tabix.py | Python | mit | 2,083 | [
"pysam"
] | bea6b69d391e49e08f17fc4871aa84ee8e89c7ff96052feede00874fa1a1d1d4 |
import simtk.openmm as mm
import simtk.unit as u
import numpy as np
class Unpacker(object):
def __init__(self, state):
positions, velocities, forces, time, kinetic_energy, potential_energy, boxes, parameters = self.state_to_tuple(state)
self.kinetic_energy_unit = kinetic_energy.unit
self.potential_energy_unit = potential_energy.unit
self.forces_unit = forces.unit
self.positions_unit = positions.unit
self.velocities_unit = velocities.unit
#self.parameters_unit = dict((key, val.unit) for key, val in parameters.items())
self.boxes_unit = boxes.unit
self.time_unit = time.unit
self.parameters_order = parameters.keys()
self.n_atoms = len(positions)
self.N = self.n_atoms * 3 * 3 # Positions, velocities, forces
self.N += 1 # Time
self.N += 2 # Potential and Kinetic Energy
self.N += 9 # Box
self.N += len(parameters) # Other shit
self.split_indices = [self.n_atoms * 3, self.n_atoms * 3 * 2, self.n_atoms * 3 * 3]
self.split_indices.append(self.n_atoms * 3 * 3 + 1) # time
self.split_indices.append(self.n_atoms * 3 * 3 + 2) # KE
self.split_indices.append(self.n_atoms * 3 * 3 + 3) # PE
self.split_indices.append(self.n_atoms * 3 * 3 + 12) # boxes
#self.split_indices.append(self.n_atoms * 3 * 3 + 12 + 1 + len(parameters)) # Don't need an index for the last entries, e.g. parameters
def state_to_tuple(self, state):
time = state.getTime()
kinetic_energy = state.getKineticEnergy()
potential_energy = state.getPotentialEnergy()
positions = state.getPositions(asNumpy=True)
velocities = state.getVelocities(asNumpy=True)
forces = state.getForces(asNumpy=True)
boxes = state.getPeriodicBoxVectors(asNumpy=True)
parameters = state.getParameters()
return positions, velocities, forces, time, kinetic_energy, potential_energy, boxes, parameters
def state_to_array(self, state):
positions, velocities, forces, time, kinetic_energy, potential_energy, boxes, parameters = self.state_to_tuple(state)
arr = np.zeros(self.N)
positions_arr, velocities_arr, forces_arr, time_arr, kinetic_energy_arr, potential_energy_arr, boxes_arr, parameters_arr = np.split(arr, self.split_indices)
positions_arr[:] = (positions / self.positions_unit).flat
velocities_arr[:] = (velocities / self.velocities_unit).flat
forces_arr[:] = (forces / self.forces_unit).flat
time_arr[:] = time / self.time_unit
kinetic_energy_arr[:] = kinetic_energy / self.kinetic_energy_unit
potential_energy_arr[:] = potential_energy / self.potential_energy_unit
boxes_arr[:] = (boxes / self.boxes_unit).flat
parameters_arr[:] = parameters["MonteCarloPressure"] # HACK HARDCODED, fix me later
return arr
def array_to_state(self, array):
#state = mm.State(self, energy=energy, coordList=None, velList=None, forceList=None, periodicBoxVectorsList=None, paramMap=None)
positions_arr, velocities_arr, forces_arr, time_arr, kinetic_energy_arr, potential_energy_arr, boxes_arr, parameters_arr = np.split(array, self.split_indices)
kinetic_energy = kinetic_energy_arr[0] # * self.kinetic_energy_unit
potential_energy = potential_energy_arr[0] # * self.potential_energy_unit
energy = (kinetic_energy, potential_energy)
positions = positions_arr.reshape((self.n_atoms, 3))
#positions = u.Quantity(map(lambda x: tuple(x), positions), self.positions_unit)
positions = map(lambda x: tuple(x), positions)
velocities = velocities_arr.reshape((self.n_atoms, 3))
#velocities = u.Quantity(map(lambda x: tuple(x), velocities), self.velocities_unit)
velocities = map(lambda x: tuple(x), velocities)
forces = forces_arr.reshape((self.n_atoms, 3))
#forces = u.Quantity(map(lambda x: tuple(x), forces), self.forces_unit)
forces = map(lambda x: tuple(x), forces)
boxes = boxes_arr.reshape((3, 3))
#boxes = u.Quantity(map(lambda x: tuple(x), boxes), self.boxes_unit)
boxes = map(lambda x: tuple(x), boxes)
params = dict(MonteCarloPressure=parameters_arr[0])
time = time_arr[0] # * self.time_unit
state = mm.State(simTime=time, paramMap=params, energy=energy, coordList=positions, velList=velocities, forceList=forces, periodicBoxVectorsList=boxes)
return state
| kyleabeauchamp/DBayes | dbayes/unpacking.py | Python | gpl-2.0 | 4,727 | [
"OpenMM"
] | 69c4945f1bf0c61e750cdf11c1d45478751dc9369608d572b3c83cac24bf79a5 |
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError, e:
USE_STATICFILES = False
def null_technical_500_response(request, exc_type, exc_value, tb):
raise exc_type, exc_value, tb
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import run, WSGIServerException
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
from django.core.handlers.wsgi import WSGIHandler
try:
from werkzeug import run_simple, DebuggedApplication
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Using the Werkzeug debugger (http://werkzeug.pocoo.org/)"
print "Quit the server with %s." % quit_command
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
url = "http://%s:%s/" % (addr, port)
webbrowser.open(url)
run_simple(addr, int(port), DebuggedApplication(handler, True),
use_reloader=use_reloader, use_debugger=True, threaded=threaded)
inner_run()
| rtucker-mozilla/inventory | vendor-local/src/django-extensions/django_extensions/management/commands/runserver_plus.py | Python | bsd-3-clause | 4,895 | [
"VisIt"
] | 016578c3721273f5021a1fcd543efc40d19455668e23ecad228a6af2b19062ae |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/export.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import codecs
import collections
import copy
import csv
import datetime
import itertools
import logging
import os
import re
import shutil
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from king_phisher import archive
from king_phisher import errors
from king_phisher import ipaddress
from king_phisher import serializers
from king_phisher import utilities
from boltons import iterutils
import dateutil.tz
import geojson
from smoke_zephyr.utilities import escape_single_quote
from smoke_zephyr.utilities import unescape_single_quote
from smoke_zephyr.utilities import parse_case_snake_to_camel
import xlsxwriter
__all__ = (
'campaign_to_xml',
'convert_value',
'liststore_export',
'liststore_to_csv',
'liststore_to_xlsx_worksheet',
'message_data_to_kpm'
)
KPM_ARCHIVE_FILES = {
'attachment_file': 'message_attachment.bin',
'target_file': 'target_file.csv'
}
KPM_INLINE_IMAGE_REGEXP = re.compile(r"""{{\s*inline_image\(\s*(('(?:[^'\\]|\\.)+')|("(?:[^"\\]|\\.)+"))\s*\)\s*}}""")
logger = logging.getLogger('KingPhisher.Client.export')
XLSXWorksheetOptions = collections.namedtuple('XLSXWorksheetOptions', ('column_widths', 'title'))
def message_template_to_kpm(template):
files = []
cursor = 0
match = True
while match:
match = KPM_INLINE_IMAGE_REGEXP.search(template[cursor:])
if not match:
break
file_path = unescape_single_quote(match.group(1)[1:-1])
files.append(file_path)
file_name = os.path.basename(file_path)
start = cursor + match.start()
end = cursor + match.end()
inline_tag = "{{{{ inline_image('{0}') }}}}".format(escape_single_quote(file_name))
template = template[:start] + inline_tag + template[end:]
cursor = start + len(inline_tag)
return template, files
def message_template_from_kpm(template, files):
files = dict(zip(map(os.path.basename, files), files))
cursor = 0
match = True
while match:
match = KPM_INLINE_IMAGE_REGEXP.search(template[cursor:])
if not match:
break
file_name = unescape_single_quote(match.group(1)[1:-1])
file_path = files.get(file_name)
start = cursor + match.start()
end = cursor + match.end()
if not file_path:
cursor = end
continue
insert_tag = "{{{{ inline_image('{0}') }}}}".format(escape_single_quote(file_path))
template = template[:start] + insert_tag + template[end:]
cursor = start + len(insert_tag)
return template
def convert_value(table_name, key, value):
"""
Perform any conversions necessary to neatly display the data in XML format.
:param str table_name: The table name that the key and value pair are from.
:param str key: The data key.
:param value: The data value to convert.
:return: The converted value.
:rtype: str
"""
if isinstance(value, datetime.datetime):
value = value.isoformat()
if value is not None:
value = str(value)
return value
def campaign_to_xml(rpc, campaign_id, xml_file, encoding='utf-8'):
"""
Load all information for a particular campaign and dump it to an XML file.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str xml_file: The destination file for the XML data.
:param str encoding: The encoding to use for strings.
"""
tzutc = dateutil.tz.tzutc()
root = ET.Element('king_phisher')
# Generate export metadata
metadata = ET.SubElement(root, 'metadata')
serializers.to_elementtree_subelement(
metadata,
'timestamp',
datetime.datetime.utcnow().replace(tzinfo=tzutc),
attrib={'utc': 'true'}
)
serializers.to_elementtree_subelement(metadata, 'version', '1.3')
campaign = ET.SubElement(root, 'campaign')
logger.info('gathering campaign information for export')
page_size = 1000
try:
campaign_info = rpc.graphql_find_file('get_campaign_export.graphql', id=campaign_id, page=page_size)['db']['campaign']
except errors.KingPhisherGraphQLQueryError as error:
logger.error('graphql error: ' + error.message)
raise
for key, value in campaign_info.items():
if key in ('landingPages', 'messages', 'visits', 'credentials', 'deaddropDeployments', 'deaddropConnections'):
continue
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=tzutc)
serializers.to_elementtree_subelement(campaign, key, value)
table_elements = {}
while True:
cursor = None # set later if any table hasNextPage
for table_name in ('landing_pages', 'messages', 'visits', 'credentials', 'deaddrop_deployments', 'deaddrop_connections'):
gql_table_name = parse_case_snake_to_camel(table_name, upper_first=False)
if campaign_info[gql_table_name]['pageInfo']['hasNextPage']:
cursor = campaign_info[gql_table_name]['pageInfo']['endCursor']
table = campaign_info[gql_table_name]['edges']
if table_name not in table_elements:
table_elements[table_name] = ET.SubElement(campaign, table_name)
for node in table:
table_row_element = ET.SubElement(table_elements[table_name], table_name[:-1])
for key, value in node['node'].items():
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=tzutc)
serializers.to_elementtree_subelement(table_row_element, key, value)
if cursor is None:
break
campaign_info = rpc.graphql_find_file('get_campaign_export.graphql', id=campaign_id, cursor=cursor, page=page_size)['db']['campaign']
logger.info('completed processing campaign information for export')
document = minidom.parseString(ET.tostring(root))
with open(xml_file, 'wb') as file_h:
file_h.write(document.toprettyxml(indent=' ', encoding=encoding))
logger.info('campaign export complete')
def campaign_credentials_to_msf_txt(rpc, campaign_id, target_file):
"""
Export credentials into a format that can easily be used with Metasploit's
USERPASS_FILE option.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str target_file: The destination file for the credential data.
"""
with open(target_file, 'w') as file_h:
for credential_node in _get_graphql_campaign_credentials(rpc, campaign_id):
credential = credential_node['node']
file_h.write("{0} {1}\n".format(credential['username'], credential['password']))
def _get_graphql_campaign_credentials(rpc, campaign_id):
results = rpc.graphql("""\
query getCampaignCredentials($campaign: String!) {
db {
campaign(id: $campaign) {
credentials {
edges {
node {
username
password
}
}
}
}
}
}""", {'campaign': campaign_id})
return results['db']['campaign']['credentials']['edges']
def campaign_visits_to_geojson(rpc, campaign_id, geojson_file):
"""
Export the geo location information for all the visits of a campaign into
the `GeoJSON <http://geojson.org/>`_ format.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str geojson_file: The destination file for the GeoJSON data.
"""
ips_for_georesolution = {}
ip_counter = collections.Counter()
for visit_node in _get_graphql_campaign_visits(rpc, campaign_id):
visit = visit_node['node']
ip_counter.update((visit['ip'],))
visitor_ip = ipaddress.ip_address(visit['ip'])
if not isinstance(visitor_ip, ipaddress.IPv4Address):
continue
if visitor_ip.is_loopback or visitor_ip.is_private:
continue
if not visitor_ip in ips_for_georesolution:
ips_for_georesolution[visitor_ip] = visit['firstSeen']
elif ips_for_georesolution[visitor_ip] > visit['firstSeen']:
ips_for_georesolution[visitor_ip] = visit['firstSeen']
ips_for_georesolution = [ip for (ip, _) in sorted(ips_for_georesolution.items(), key=lambda x: x[1])]
locations = {}
for ip_addresses in iterutils.chunked(ips_for_georesolution, 50):
locations.update(rpc.geoip_lookup_multi(ip_addresses))
points = []
for ip, location in locations.items():
if not (location.coordinates and location.coordinates[0] and location.coordinates[1]):
continue
points.append(geojson.Feature(geometry=location, properties={'count': ip_counter[ip], 'ip-address': ip}))
feature_collection = geojson.FeatureCollection(points)
with open(geojson_file, 'w') as file_h:
serializers.JSON.dump(feature_collection, file_h, pretty=True)
def _get_graphql_campaign_visits(rpc, campaign_id):
results = rpc.graphql("""\
query getCampaignVisits($campaign: String!) {
db {
campaign(id: $campaign) {
visits {
edges {
node {
firstSeen
ip
}
}
}
}
}
}""", {'campaign': campaign_id})
return results['db']['campaign']['visits']['edges']
def message_data_from_kpm(target_file, dest_dir, encoding='utf-8'):
"""
Retrieve the stored details describing a message from a previously exported
file.
:param str target_file: The file to load as a message archive.
:param str dest_dir: The directory to extract data and attachment files to.
:param str encoding: The encoding to use for strings.
:return: The restored details from the message config.
:rtype: dict
"""
if not archive.is_archive(target_file):
logger.warning('the file is not recognized as a valid archive')
raise errors.KingPhisherInputValidationError('file is not in the correct format')
kpm = archive.ArchiveFile(target_file, 'r')
attachment_member_names = [n for n in kpm.file_names if n.startswith('attachments' + os.path.sep)]
attachments = []
if not kpm.has_file('message_config.json'):
logger.warning('the kpm archive is missing the message_config.json file')
raise errors.KingPhisherInputValidationError('data is missing from the message archive')
message_config = kpm.get_json('message_config.json')
message_config.pop('company_name', None)
if attachment_member_names:
attachment_dir = os.path.join(dest_dir, 'attachments')
if not os.path.isdir(attachment_dir):
os.mkdir(attachment_dir)
for file_name in attachment_member_names:
arcfile_h = kpm.get_file(file_name)
file_path = os.path.join(attachment_dir, os.path.basename(file_name))
with open(file_path, 'wb') as file_h:
shutil.copyfileobj(arcfile_h, file_h)
attachments.append(file_path)
logger.debug("extracted {0} attachment file{1} from the archive".format(len(attachments), 's' if len(attachments) > 1 else ''))
for config_name, file_name in KPM_ARCHIVE_FILES.items():
if not file_name in kpm.file_names:
if config_name in message_config:
logger.warning("the kpm archive is missing the {0} file".format(file_name))
raise errors.KingPhisherInputValidationError('data is missing from the message archive')
continue
if not message_config.get(config_name):
logger.warning("the kpm message configuration is missing the {0} setting".format(config_name))
raise errors.KingPhisherInputValidationError('data is missing from the message archive')
arcfile_h = kpm.get_file(file_name)
file_path = os.path.join(dest_dir, os.path.basename(message_config[config_name]))
with open(file_path, 'wb') as file_h:
shutil.copyfileobj(arcfile_h, file_h)
message_config[config_name] = file_path
if 'message_content.html' in kpm.file_names:
if 'html_file' not in message_config:
logger.warning('the kpm message configuration is missing the html_file setting')
raise errors.KingPhisherInputValidationError('data is missing from the message archive')
arcfile_h = kpm.get_file('message_content.html')
file_path = os.path.join(dest_dir, os.path.basename(message_config['html_file']))
with open(file_path, 'wb') as file_h:
file_h.write(message_template_from_kpm(arcfile_h.read().decode(encoding), attachments).encode(encoding))
message_config['html_file'] = file_path
elif 'html_file' in message_config:
logger.warning('the kpm archive is missing the message_content.html file')
raise errors.KingPhisherInputValidationError('data is missing from the message archive')
kpm.close()
return message_config
def message_data_to_kpm(message_config, target_file, encoding='utf-8'):
"""
Save details describing a message to the target file.
:param dict message_config: The message details from the client configuration.
:param str target_file: The file to write the data to.
:param str encoding: The encoding to use for strings.
"""
message_config = copy.copy(message_config)
kpm = archive.ArchiveFile(target_file, 'w')
for config_name, file_name in KPM_ARCHIVE_FILES.items():
if os.access(message_config.get(config_name, ''), os.R_OK):
kpm.add_file(file_name, message_config[config_name])
message_config[config_name] = os.path.basename(message_config[config_name])
continue
if len(message_config.get(config_name, '')):
logger.info("the specified {0} '{1}' is not readable, the setting will be removed".format(config_name, message_config[config_name]))
if config_name in message_config:
del message_config[config_name]
if os.access(message_config.get('html_file', ''), os.R_OK):
with codecs.open(message_config['html_file'], 'r', encoding=encoding) as file_h:
template = file_h.read()
message_config['html_file'] = os.path.basename(message_config['html_file'])
template, attachments = message_template_to_kpm(template)
logger.debug("identified {0} attachment file{1} to be archived".format(len(attachments), 's' if len(attachments) > 1 else ''))
kpm.add_data('message_content.html', template)
for attachment in attachments:
if os.access(attachment, os.R_OK):
kpm.add_file(os.path.join('attachments', os.path.basename(attachment)), attachment)
else:
if len(message_config.get('html_file', '')):
logger.info("the specified html_file '{0}' is not readable, the setting will be removed".format(message_config['html_file']))
if 'html_file' in message_config:
del message_config['html_file']
kpm.add_data('message_config.json', serializers.JSON.dumps(message_config, pretty=True))
kpm.close()
return
def _split_columns(columns):
if isinstance(columns, collections.OrderedDict):
column_names = (columns[c] for c in columns.keys())
store_columns = columns.keys()
else:
column_names = (columns[c] for c in sorted(columns.keys()))
store_columns = sorted(columns.keys())
return tuple(column_names), tuple(store_columns)
def liststore_export(store, columns, cb_write, cb_write_args, row_offset=0, write_columns=True):
"""
A function to facilitate writing values from a list store to an arbitrary
callback for exporting to different formats. The callback will be called
with the row number, the column values and the additional arguments
specified in *\\*cb_write_args*.
.. code-block:: python
cb_write(row, column_values, *cb_write_args).
:param store: The store to export the information from.
:type store: :py:class:`Gtk.ListStore`
:param dict columns: A dictionary mapping store column ids to the value names.
:param function cb_write: The callback function to be called for each row of data.
:param tuple cb_write_args: Additional arguments to pass to *cb_write*.
:param int row_offset: A modifier value to add to the row numbers passed to *cb_write*.
:param bool write_columns: Write the column names to the export.
:return: The number of rows that were written.
:rtype: int
"""
column_names, store_columns = _split_columns(columns)
if write_columns:
cb_write(0, column_names, *cb_write_args)
store_iter = store.get_iter_first()
rows_written = 0
while store_iter:
row = collections.deque()
for column in store_columns:
value = store.get_value(store_iter, column)
if isinstance(value, datetime.datetime):
value = utilities.format_datetime(value)
row.append(value)
cb_write(rows_written + 1 + row_offset, row, *cb_write_args)
rows_written += 1
store_iter = store.iter_next(store_iter)
return rows_written
def _csv_write(row, columns, writer):
writer.writerow(tuple(columns))
def liststore_to_csv(store, target_file, columns):
"""
Write the contents of a :py:class:`Gtk.ListStore` to a csv file.
:param store: The store to export the information from.
:type store: :py:class:`Gtk.ListStore`
:param str target_file: The destination file for the CSV data.
:param dict columns: A dictionary mapping store column ids to the value names.
:return: The number of rows that were written.
:rtype: int
"""
target_file_h = open(target_file, 'w')
writer = csv.writer(target_file_h, quoting=csv.QUOTE_ALL)
rows = liststore_export(store, columns, _csv_write, (writer,))
target_file_h.close()
return rows
def _xlsx_write(row, columns, worksheet, row_format=None):
for column, text in enumerate(columns):
worksheet.write(row, column, text, row_format)
def liststore_to_xlsx_worksheet(store, worksheet, columns, title_format, xlsx_options=None):
"""
Write the contents of a :py:class:`Gtk.ListStore` to an XLSX workseet.
:param store: The store to export the information from.
:type store: :py:class:`Gtk.ListStore`
:param worksheet: The destination sheet for the store's data.
:type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet`
:param dict columns: A dictionary mapping store column ids to the value names.
:param xlsx_options: A collection of additional options for formatting the Excel Worksheet.
:type xlsx_options: :py:class:`.XLSXWorksheetOptions`
:return: The number of rows that were written.
:rtype: int
"""
utilities.assert_arg_type(worksheet, xlsxwriter.worksheet.Worksheet, 2)
utilities.assert_arg_type(columns, dict, 3)
utilities.assert_arg_type(title_format, xlsxwriter.format.Format, 4)
utilities.assert_arg_type(xlsx_options, (type(None), XLSXWorksheetOptions), 5)
if xlsx_options is None:
worksheet.set_column(0, len(columns), 30)
else:
for column, width in enumerate(xlsx_options.column_widths):
worksheet.set_column(column, column, width)
column_names, _ = _split_columns(columns)
if xlsx_options is None:
start_row = 0
else:
start_row = 2
worksheet.merge_range(0, 0, 0, len(column_names) - 1, xlsx_options.title, title_format)
row_count = liststore_export(store, columns, _xlsx_write, (worksheet,), row_offset=start_row, write_columns=False)
if not row_count:
column_ = 0
for column_name in column_names:
worksheet.write(start_row, column_, column_name)
column_ += 1
return row_count
options = {
'columns': list({'header': column_name} for column_name in column_names),
'style': 'Table Style Medium 1'
}
worksheet.add_table(start_row, 0, row_count + start_row, len(column_names) - 1, options=options)
worksheet.freeze_panes(1 + start_row, 0)
return row_count
| guitarmanj/king-phisher | king_phisher/client/export.py | Python | bsd-3-clause | 20,188 | [
"VisIt"
] | 022ab9e955332fab9997ba20c387e6a961b94d9d50c8d977817ff4cdb2960b92 |
import logging
import time
import functools
import urllib
import random
try:
import simplejson as json
except ImportError:
import json # pyflakes.ignore
import tornado.ioloop
import tornado.httpclient
from backoff_timer import BackoffTimer
from client import Client
import nsq
import async
class Reader(Client):
"""
Reader provides high-level functionality for building robust NSQ consumers in Python
on top of the async module.
Reader receives messages over the specified ``topic/channel`` and calls ``message_handler``
for each message (up to ``max_tries``).
Multiple readers can be instantiated in a single process (to consume from multiple
topics/channels at once).
Supports various hooks to modify behavior when heartbeats are received, to temporarily
disable the reader, and pre-process/validate messages.
When supplied a list of ``nsqlookupd`` addresses, it will periodically poll those
addresses to discover new producers of the specified ``topic``.
It maintains a sufficient RDY count based on the # of producers and your configured
``max_in_flight``.
Handlers should be defined as shown in the examples below. The handler receives a
:class:`nsq.Message` object that has instance methods :meth:`nsq.Message.finish`,
:meth:`nsq.Message.requeue`, and :meth:`nsq.Message.touch` to respond to ``nsqd``.
When messages are not responded to explicitly, it is responsible for sending
``FIN`` or ``REQ`` commands based on return value of ``message_handler``. When
re-queueing, it will backoff from processing additional messages for an increasing
delay (calculated exponentially based on consecutive failures up to ``max_backoff_duration``).
Synchronous example::
import nsq
def handler(message):
print message
return True
r = nsq.Reader(message_handler=handler,
lookupd_http_addresses=['http://127.0.0.1:4161'],
topic='nsq_reader', channel='asdf', lookupd_poll_interval=15)
nsq.run()
Asynchronous example::
import nsq
buf = []
def process_message(message):
global buf
message.enable_async()
# cache the message for later processing
buf.append(message)
if len(buf) >= 3:
for msg in buf:
print msg
msg.finish()
buf = []
else:
print 'deferring processing'
r = nsq.Reader(message_handler=process_message,
lookupd_http_addresses=['http://127.0.0.1:4161'],
topic='nsq_reader', channel='async', max_in_flight=9)
nsq.run()
:param message_handler: the callable that will be executed for each message received
:param topic: specifies the desired NSQ topic
:param channel: specifies the desired NSQ channel
:param name: a string that is used for logging messages (defaults to 'topic:channel')
:param nsqd_tcp_addresses: a sequence of string addresses of the nsqd instances this reader
should connect to
:param lookupd_http_addresses: a sequence of string addresses of the nsqlookupd instances this
reader should query for producers of the specified topic
:param max_tries: the maximum number of attempts the reader will make to process a message after
which messages will be automatically discarded
:param max_in_flight: the maximum number of messages this reader will pipeline for processing.
this value will be divided evenly amongst the configured/discovered nsqd producers
:param lookupd_poll_interval: the amount of time in seconds between querying all of the supplied
nsqlookupd instances. a random amount of time based on thie value will be initially
introduced in order to add jitter when multiple readers are running
:param lookupd_poll_jitter: The maximum fractional amount of jitter to add to the
lookupd pool loop. This helps evenly distribute requests even if multiple consumers
restart at the same time.
:param low_rdy_idle_timeout: the amount of time in seconds to wait for a message from a producer
when in a state where RDY counts are re-distributed (ie. max_in_flight < num_producers)
:param max_backoff_duration: the maximum time we will allow a backoff state to last in seconds
:param \*\*kwargs: passed to :class:`nsq.AsyncConn` initialization
"""
def __init__(self, topic, channel, message_handler=None, name=None,
nsqd_tcp_addresses=None, lookupd_http_addresses=None,
max_tries=5, max_in_flight=1, lookupd_poll_interval=60,
low_rdy_idle_timeout=10, max_backoff_duration=128, lookupd_poll_jitter=0.3,
**kwargs):
super(Reader, self).__init__(**kwargs)
assert isinstance(topic, (str, unicode)) and len(topic) > 0
assert isinstance(channel, (str, unicode)) and len(channel) > 0
assert isinstance(max_in_flight, int) and max_in_flight > 0
assert isinstance(max_backoff_duration, (int, float)) and max_backoff_duration > 0
assert isinstance(name, (str, unicode, None.__class__))
assert isinstance(lookupd_poll_interval, int)
assert isinstance(lookupd_poll_jitter, float)
assert lookupd_poll_jitter >= 0 and lookupd_poll_jitter <= 1
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, (str, unicode))
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, (str, unicode))
lookupd_http_addresses = [lookupd_http_addresses]
random.shuffle(lookupd_http_addresses)
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.name = name or (topic + ':' + channel)
self.message_handler = None
if message_handler:
self.set_message_handler(message_handler)
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.lookupd_query_index = 0
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.low_rdy_idle_timeout = low_rdy_idle_timeout
self.total_rdy = 0
self.need_rdy_redistributed = False
self.lookupd_poll_interval = lookupd_poll_interval
self.lookupd_poll_jitter = lookupd_poll_jitter
self.random_rdy_ts = time.time()
self.conn_kwargs = kwargs
self.backoff_timer = BackoffTimer(0, max_backoff_duration)
self.backoff_timeout = None
self.backoff_block = False
self.conns = {}
self.connection_attempts = {}
self.http_client = tornado.httpclient.AsyncHTTPClient(io_loop=self.io_loop)
# will execute when run() is called (for all Reader instances)
self.io_loop.add_callback(self._run)
def _run(self):
assert self.message_handler, "you must specify the Reader's message_handler"
logging.info('[%s] starting reader for %s/%s...', self.name, self.topic, self.channel)
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port))
tornado.ioloop.PeriodicCallback(self._redistribute_rdy_state,
5 * 1000,
io_loop=self.io_loop).start()
if not self.lookupd_http_addresses:
return
# trigger the first lookup query manually
self.query_lookupd()
periodic = tornado.ioloop.PeriodicCallback(self.query_lookupd,
self.lookupd_poll_interval * 1000,
io_loop=self.io_loop)
# randomize the time we start this poll loop so that all
# consumers don't query at exactly the same time
delay = random.random() * self.lookupd_poll_interval * self.lookupd_poll_jitter
self.io_loop.add_timeout(time.time() + delay, periodic.start)
def set_message_handler(self, message_handler):
"""
Assigns the callback method to be executed for each message received
:param message_handler: a callable that takes a single argument
"""
assert callable(message_handler), 'message_handler must be callable'
self.message_handler = message_handler
def _connection_max_in_flight(self):
return max(1, self.max_in_flight / max(1, len(self.conns)))
def is_starved(self):
"""
Used to identify when buffered messages should be processed and responded to.
When max_in_flight > 1 and you're batching messages together to perform work
is isn't possible to just compare the len of your list of buffered messages against
your configured max_in_flight (because max_in_flight may not be evenly divisible
by the number of producers you're connected to, ie. you might never get that many
messages... it's a *max*).
Example::
def message_handler(self, nsq_msg, reader):
# buffer messages
if reader.is_starved():
# perform work
reader = nsq.Reader(...)
reader.set_message_handler(functools.partial(message_handler, reader=reader))
nsq.run()
"""
for conn in self.conns.itervalues():
if conn.in_flight > 0 and conn.in_flight >= (conn.last_rdy * 0.85):
return True
return False
def _on_message(self, conn, message, **kwargs):
try:
self._handle_message(conn, message)
except Exception:
logging.exception('[%s:%s] failed to handle_message() %r', conn.id, self.name, message)
def _handle_message(self, conn, message):
self.total_rdy = max(self.total_rdy - 1, 0)
rdy_conn = conn
if len(self.conns) > self.max_in_flight:
# if all connections aren't getting RDY
# occsionally randomize which connection gets RDY
time_since_random_rdy = time.time() - self.random_rdy_ts
if time_since_random_rdy > 30:
self.random_rdy_ts = time.time()
conns_with_no_rdy = [c for c in self.conns.itervalues() if not c.rdy]
rdy_conn = random.choice(conns_with_no_rdy)
if rdy_conn is not conn:
logging.info('[%s:%s] redistributing RDY to %s',
conn.id, self.name, rdy_conn.id)
self._maybe_update_rdy(rdy_conn)
success = False
try:
pre_processed_message = self.preprocess_message(message)
if not self.validate_message(pre_processed_message):
return message.finish()
if message.attempts > self.max_tries:
self.giving_up(message)
return message.finish()
success = self.process_message(message)
except Exception:
logging.exception('[%s:%s] uncaught exception while handling message %s body:%r',
conn.id, self.name, message.id, message.body)
if not message.has_responded():
return message.requeue()
if not message.is_async() and not message.has_responded():
assert success is not None, 'ambiguous return value for synchronous mode'
if success:
return message.finish()
return message.requeue()
def _maybe_update_rdy(self, conn):
if self.backoff_timer.get_interval():
return
if conn.rdy <= 1 or conn.rdy < int(conn.last_rdy * 0.25):
self._send_rdy(conn, self._connection_max_in_flight())
def _finish_backoff_block(self):
self.backoff_timeout = None
self.backoff_block = False
# test the waters after finishing a backoff round
# if we have no connections, this will happen when a new connection gets RDY 1
if self.conns:
conn = random.choice(self.conns.values())
logging.info('[%s:%s] testing backoff state with RDY 1', conn.id, self.name)
self._send_rdy(conn, 1)
# for tests
return conn
def _on_backoff_resume(self, success, **kwargs):
start_backoff_interval = self.backoff_timer.get_interval()
if success:
self.backoff_timer.success()
elif not self.backoff_block:
self.backoff_timer.failure()
self._enter_continue_or_exit_backoff(start_backoff_interval)
def _enter_continue_or_exit_backoff(self, start_backoff_interval):
# Take care of backoff in the appropriate cases. When this
# happens, we set a failure on the backoff timer and set the RDY count to zero.
# Once the backoff time has expired, we allow *one* of the connections let
# a single message through to test the water. This will continue until we
# reach no backoff in which case we go back to the normal RDY count.
current_backoff_interval = self.backoff_timer.get_interval()
# do nothing
if self.backoff_block:
return
# we're out of backoff completely, return to full blast for all conns
if start_backoff_interval and not current_backoff_interval:
rdy = self._connection_max_in_flight()
logging.info('[%s] backoff complete, resuming normal operation (%d connections)',
self.name, len(self.conns))
for c in self.conns.values():
self._send_rdy(c, rdy)
return
# enter or continue a backoff iteration
if current_backoff_interval:
self._start_backoff_block()
def _start_backoff_block(self):
self.backoff_block = True
backoff_interval = self.backoff_timer.get_interval()
logging.info('[%s] backing off for %0.2f seconds (%d connections)',
self.name, backoff_interval, len(self.conns))
for c in self.conns.values():
self._send_rdy(c, 0)
self.backoff_timeout = self.io_loop.add_timeout(time.time() + backoff_interval,
self._finish_backoff_block)
def _rdy_retry(self, conn, value):
conn.rdy_timeout = None
self._send_rdy(conn, value)
def _send_rdy(self, conn, value):
if conn.rdy_timeout:
self.io_loop.remove_timeout(conn.rdy_timeout)
conn.rdy_timeout = None
if value and self.disabled():
logging.info('[%s:%s] disabled, delaying RDY state change', conn.id, self.name)
rdy_retry_callback = functools.partial(self._rdy_retry, conn, value)
conn.rdy_timeout = self.io_loop.add_timeout(time.time() + 15, rdy_retry_callback)
return
if value > conn.max_rdy_count:
value = conn.max_rdy_count
if (self.total_rdy + value) > self.max_in_flight:
if not conn.rdy:
# if we're going from RDY 0 to non-0 and we couldn't because
# of the configured max in flight, try again
rdy_retry_callback = functools.partial(self._rdy_retry, conn, value)
conn.rdy_timeout = self.io_loop.add_timeout(time.time() + 5, rdy_retry_callback)
return
if conn.send_rdy(value):
self.total_rdy = max(self.total_rdy - conn.rdy + value, 0)
def connect_to_nsqd(self, host, port):
"""
Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to
"""
assert isinstance(host, (str, unicode))
assert isinstance(port, int)
conn = async.AsyncConn(host, port, **self.conn_kwargs)
conn.on('identify', self._on_connection_identify)
conn.on('identify_response', self._on_connection_identify_response)
conn.on('error', self._on_connection_error)
conn.on('close', self._on_connection_close)
conn.on('ready', self._on_connection_ready)
conn.on('message', self._on_message)
conn.on('heartbeat', self._on_heartbeat)
conn.on('backoff', functools.partial(self._on_backoff_resume, success=False))
conn.on('resume', functools.partial(self._on_backoff_resume, success=True))
if conn.id in self.conns:
return
# only attempt to re-connect once every 10s per destination
# this throttles reconnects to failed endpoints
now = time.time()
last_connect_attempt = self.connection_attempts.get(conn.id)
if last_connect_attempt and last_connect_attempt > now - 10:
return
self.connection_attempts[conn.id] = now
logging.info('[%s:%s] connecting to nsqd', conn.id, self.name)
conn.connect()
return conn
def _on_connection_ready(self, conn, **kwargs):
conn.send(nsq.subscribe(self.topic, self.channel))
# re-check to make sure another connection didn't beat this one done
if conn.id in self.conns:
logging.warning(
'[%s:%s] connected to NSQ but anothermatching connection already exists',
conn.id, self.name)
conn.close()
return
if conn.max_rdy_count < self.max_in_flight:
logging.warning(
'[%s:%s] max RDY count %d < reader max in flight %d, truncation possible',
conn.id, self.name, conn.max_rdy_count, self.max_in_flight)
self.conns[conn.id] = conn
# we send an initial RDY of 1 up to our configured max_in_flight
# this resolves two cases:
# 1. `max_in_flight >= num_conns` ensuring that no connections are ever
# *initially* starved since redistribute won't apply
# 2. `max_in_flight < num_conns` ensuring that we never exceed max_in_flight
# and rely on the fact that redistribute will handle balancing RDY across conns
if not self.backoff_timer.get_interval() or len(self.conns) == 1:
# only send RDY 1 if we're not in backoff (some other conn
# should be testing the waters)
# (but always send it if we're the first)
self._send_rdy(conn, 1)
def _on_connection_close(self, conn, **kwargs):
if conn.id in self.conns:
del self.conns[conn.id]
self.total_rdy = max(self.total_rdy - conn.rdy, 0)
logging.warning('[%s:%s] connection closed', conn.id, self.name)
if (conn.rdy_timeout or conn.rdy) and \
(len(self.conns) == self.max_in_flight or self.backoff_timer.get_interval()):
# we're toggling out of (normal) redistribution cases and this conn
# had a RDY count...
#
# trigger RDY redistribution to make sure this RDY is moved
# to a new connection
self.need_rdy_redistributed = True
if conn.rdy_timeout:
self.io_loop.remove_timeout(conn.rdy_timeout)
conn.rdy_timeout = None
if not self.lookupd_http_addresses:
# automatically reconnect to nsqd addresses when not using lookupd
logging.info('[%s:%s] attempting to reconnect in 15s', conn.id, self.name)
reconnect_callback = functools.partial(self.connect_to_nsqd,
host=conn.host, port=conn.port)
self.io_loop.add_timeout(time.time() + 15, reconnect_callback)
def query_lookupd(self):
"""
Trigger a query of the configured ``nsq_lookupd_http_addresses``.
"""
endpoint = self.lookupd_http_addresses[self.lookupd_query_index]
self.lookupd_query_index = (self.lookupd_query_index + 1) % len(self.lookupd_http_addresses)
lookupd_url = endpoint + '/lookup?topic=' + urllib.quote(self.topic)
req = tornado.httpclient.HTTPRequest(lookupd_url, method='GET',
connect_timeout=1, request_timeout=2)
callback = functools.partial(self._finish_query_lookupd, lookupd_url=lookupd_url)
self.http_client.fetch(req, callback=callback)
def _finish_query_lookupd(self, response, lookupd_url):
if response.error:
logging.warning('[%s] lookupd %s query error: %s',
self.name, lookupd_url, response.error)
return
try:
lookup_data = json.loads(response.body)
except ValueError:
logging.warning('[%s] lookupd %s failed to parse JSON: %r',
self.name, lookupd_url, response.body)
return
if lookup_data['status_code'] != 200:
logging.warning('[%s] lookupd %s responded with %d',
self.name, lookupd_url, lookup_data['status_code'])
return
for producer in lookup_data['data']['producers']:
# TODO: this can be dropped for 1.0
address = producer.get('broadcast_address', producer.get('address'))
assert address
self.connect_to_nsqd(address, producer['tcp_port'])
def _redistribute_rdy_state(self):
# We redistribute RDY counts in a few cases:
#
# 1. our # of connections exceeds our configured max_in_flight
# 2. we're in backoff mode (but not in a current backoff block)
# 3. something out-of-band has set the need_rdy_redistributed flag (connection closed
# that was about to get RDY during backoff)
#
# At a high level, we're trying to mitigate stalls related to low-volume
# producers when we're unable (by configuration or backoff) to provide a RDY count
# of (at least) 1 to all of our connections.
if self.disabled() or self.backoff_block:
return
if len(self.conns) > self.max_in_flight:
self.need_rdy_redistributed = True
logging.debug('redistributing RDY state (%d conns > %d max_in_flight)',
len(self.conns), self.max_in_flight)
backoff_interval = self.backoff_timer.get_interval()
if backoff_interval and len(self.conns) > 1:
self.need_rdy_redistributed = True
logging.debug('redistributing RDY state (%d backoff interval and %d conns > 1)',
backoff_interval, len(self.conns))
if self.need_rdy_redistributed:
self.need_rdy_redistributed = False
# first set RDY 0 to all connections that have not received a message within
# a configurable timeframe (low_rdy_idle_timeout).
for conn_id, conn in self.conns.iteritems():
last_message_duration = time.time() - conn.last_msg_timestamp
logging.debug('[%s:%s] rdy: %d (last message received %.02fs)',
conn.id, self.name, conn.rdy, last_message_duration)
if conn.rdy > 0 and last_message_duration > self.low_rdy_idle_timeout:
logging.info('[%s:%s] idle connection, giving up RDY count', conn.id, self.name)
self._send_rdy(conn, 0)
if backoff_interval:
max_in_flight = 1 - self.total_rdy
else:
max_in_flight = self.max_in_flight - self.total_rdy
# randomly walk the list of possible connections and send RDY 1 (up to our
# calculate "max_in_flight"). We only need to send RDY 1 because in both
# cases described above your per connection RDY count would never be higher.
#
# We also don't attempt to avoid the connections who previously might have had RDY 1
# because it would be overly complicated and not actually worth it (ie. given enough
# redistribution rounds it doesn't matter).
possible_conns = self.conns.values()
while possible_conns and max_in_flight:
max_in_flight -= 1
conn = possible_conns.pop(random.randrange(len(possible_conns)))
logging.info('[%s:%s] redistributing RDY', conn.id, self.name)
self._send_rdy(conn, 1)
# for tests
return conn
#
# subclass overwriteable
#
def process_message(self, message):
"""
Called when a message is received in order to execute the configured ``message_handler``
This is useful to subclass and override if you want to change how your
message handlers are called.
:param message: the :class:`nsq.Message` received
"""
return self.message_handler(message)
def giving_up(self, message):
"""
Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received
"""
logging.warning('[%s] giving up on message %s after max tries %d %r',
self.name, message.id, self.max_tries, message.body)
def disabled(self):
"""
Called as part of RDY handling to identify whether this Reader has been disabled
This is useful to subclass and override to examine a file on disk or a key in cache
to identify if this reader should pause execution (during a deploy, etc.).
"""
return False
def validate_message(self, message):
return True
def preprocess_message(self, message):
return message
| pombredanne/pynsq | nsq/reader.py | Python | mit | 26,239 | [
"BLAST"
] | fd26db185b4cf2a522630d2bb7d3b0b049193191652ef6c78b8e9039e55e5976 |
import h5py
import sys
import numpy as np
"""
sl files use an lz4 compression filter for which a plugin needs to be installed
Mac:
sudo port install hdf5-lz4-plugin
Ubuntu: (http://gisaxs.com/index.php/HDF5)
sudo add-apt-repository ppa:eugenwintersberger/pni
sudo apt-get update
sudo apt-get install hdf5-plugin-lz4
note that the filter isn’t actually registered until the first dataset is access so won’t be reported by h5py until after then.
Remember to restart terminal running python
Whilst h5py claims to look in the default directory for filters I had to add the environment variable:
os.environ['HDF5_PLUGIN_PATH'] = "/opt/local/lib/hdf5"
"""
class slFile():
def __init__(self, input_filename, region_name=""):
self.region_name = region_name
self.load_file(input_filename)
def _check_file_version(self):
self.file_version = self.sl['Version'][0]
print 'sl file version: {}'.format(self.file_version)
if not self.file_version in range(16, 23):
raise ValueError('File version {} out of range.'.format(self.file_version))
def _get_spotlist(self):
### get root groups from input data
if self.region_name == "":
self.spotlist = range(self.sl['SpectraGroups']['InitialMeasurement']['images'].shape[1])
else:
print self.region_name
region_name = self.sl['Regions'].visit(self.find_name)
if region_name == None:
raise ValueError("Requested region {} not found".format(self.region_name))
self.spotlist = self.sl['Regions'][region_name]['SpotList']
self.spotlist = np.asarray(self.spotlist)
def _get_spectragroup(self):
self.initialMeasurement = self.sl['SpectraGroups']['InitialMeasurement']
self.Mzs = np.asarray(self.initialMeasurement['SamplePositions'][
'SamplePositions']) # we don't write this but will use it for peak detection
self.spectra = self.initialMeasurement['spectra']
def _get_coordinates(self):
### Get Coordinates for spotlist
self.coords = np.asarray(self.sl['Registrations']['0']['Coordinates'])
if np.shape(self.coords)[0] != 3:
self.coords = self.coords.T
if np.shape(self.coords)[0] != 3:
raise ValueError('coords second dimension should be 3 {}'.format(np.shape(self.coords)))
def load_file(self, input_filename):
# get a handle on the file
self.sl = h5py.File(input_filename, 'r') # Readonly, file must exist
self._check_file_version()
self._get_spotlist()
self._get_spectragroup()
self._get_coordinates()
def get_spectrum(self, index):
intensities = np.asarray(self.spectra[index, :])
return self.Mzs, intensities
def find_name(self, name):
if 'name' in self.sl['Regions'][name].attrs.keys():
if self.sl['Regions'][name].attrs['name'] == self.region_name:
assert isinstance(name, object)
return name
def centroid_imzml(input_filename, output_filename, step=[], apodization=False, w_size=10, min_intensity=1e-5,
region_name="", prevent_duplicate_pixels=False):
# write a file to imzml format (centroided)
"""
:type min_intensity: float
"""
from pyimzml.ImzMLWriter import ImzMLWriter
from pyMSpec.centroid_detection import gradient
sl = slFile(input_filename, region_name=region_name)
mz_dtype = sl.Mzs.dtype
int_dtype = sl.get_spectrum(0)[1].dtype
# Convert coords to index -> kinda hacky
coords = np.asarray(sl.coords.copy()).T.round(5)
coords -= np.amin(coords, axis=0)
if step == []: # have a guesss
step = np.array([np.median(np.diff(np.unique(coords[sl.spotlist, i]))) for i in range(3)])
step[np.isnan(step)] = 1
print 'estimated pixel size: {} x {}'.format(step[0], step[1])
coords = coords / np.reshape(step, (3,)).T
coords = coords.round().astype(int)
ncol, nrow, _ = np.amax(coords, axis=0) + 1
print 'new image size: {} x {}'.format(nrow, ncol)
if prevent_duplicate_pixels:
b = np.ascontiguousarray(coords).view(np.dtype((np.void, coords.dtype.itemsize * coords.shape[1])))
_, coord_idx = np.unique(b, return_index=True)
print np.shape(sl.spotlist), np.shape(coord_idx)
print "original number of spectra: {}".format(len(coords))
else:
coord_idx = range(len(coords))
n_total = len(coord_idx)
print 'spectra to write: {}'.format(n_total)
with ImzMLWriter(output_filename, mz_dtype=mz_dtype, intensity_dtype=int_dtype) as imzml:
done = 0
for key in sl.spotlist:
if all((prevent_duplicate_pixels, key not in coord_idx)):# skip duplicate pixels
#print 'skip {}'.format(key)
continue
mzs, intensities = sl.get_spectrum(key)
if apodization:
from pyMSpec import smoothing
# todo - add to processing list in imzml
mzs, intensities = smoothing.apodization(mzs, intensities)
mzs_c, intensities_c, _ = gradient(mzs, intensities, min_intensity=min_intensity)
pos = coords[key]
pos = (pos[0], nrow - 1 - pos[1], pos[2])
imzml.addSpectrum(mzs_c, intensities_c, pos)
done += 1
if done % 1000 == 0:
print "[%s] progress: %.1f%%" % (input_filename, float(done) * 100.0 / n_total)
print "finished!"
def centroid_IMS(input_filename, output_filename, instrumentInfo={}, sharedDataInfo={}):
from pyMS.centroid_detection import gradient
# write out a IMS_centroid.hdf5 file
sl = slFile(input_filename)
n_total = np.shape(sl.spectra)[0]
with h5py.File(output_filename, 'w') as f_out:
### make root groups for output data
spectral_data = f_out.create_group('spectral_data')
spatial_data = f_out.create_group('spatial_data')
shared_data = f_out.create_group('shared_data')
### populate common variables - can hardcode as I know what these are for h5 data
# parameters
instrument_parameters_1 = shared_data.create_group('instrument_parameters/001')
if instrumentInfo != {}:
for tag in instrumentInfo:
instrument_parameters_1.attrs[tag] = instrumentInfo[tag]
# ROIs
# todo - determine and propagate all ROIs
roi_1 = shared_data.create_group('regions_of_interest/001')
roi_1.attrs['name'] = 'root region'
roi_1.attrs['parent'] = ''
# Sample
sample_1 = shared_data.create_group('samples/001')
if sharedDataInfo != {}:
for tag in sharedDataInfo:
sample_1.attrs[tag] = sharedDataInfo[tag]
done = 0
for key in range(0, n_total):
mzs, intensities = sl.get_spectrum(key)
mzs_c, intensities_c, _ = gradient(mzs, intensities)
this_spectrum = spectral_data.create_group(str(key))
_ = this_spectrum.create_dataset('centroid_mzs', data=np.float32(mzs_c), compression="gzip",
compression_opts=9)
# intensities
_ = this_spectrum.create_dataset('centroid_intensities', data=np.float32(intensities_c), compression="gzip",
compression_opts=9)
# coordinates
_ = this_spectrum.create_dataset('coordinates',
data=(sl.coords[0, key], sl.coords[1, key], sl.coords[2, key]))
## link to shared parameters
# ROI
this_spectrum['ROIs/001'] = h5py.SoftLink('/shared_data/regions_of_interest/001')
# Sample
this_spectrum['samples/001'] = h5py.SoftLink('/shared_data/samples/001')
# Instrument config
this_spectrum['instrument_parameters'] = h5py.SoftLink('/shared_data/instrument_parameters/001')
done += 1
if done % 1000 == 0:
print "[%s] progress: %.1f%%" % (input_filename, float(done) * 100.0 / n_total)
print "finished!"
if __name__ == '__main__':
centroid_imzml(sys.argv[1], sys.argv[1][:-3] + ".imzML")
| andy-d-palmer/pyIMS | pyImagingMSpec/convert/sl.py | Python | apache-2.0 | 8,335 | [
"VisIt"
] | 88afb3c628ba60e6360fd2315cf3ed0a462dbe7ecddb5ee7bc01d55127bc809f |
#
# Parse tree nodes for expressions
#
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object)
import sys
import copy
import operator
from Errors import error, warning, warn_once, InternalError, CompileError
from Errors import hold_errors, release_errors, held_errors, report_error
from Code import UtilityCode, TempitaUtilityCode
import StringEncoding
import Naming
import Nodes
from Nodes import Node
import PyrexTypes
from PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type, cython_memoryview_ptr_type
import TypeSlots
from Builtin import list_type, tuple_type, set_type, dict_type, \
unicode_type, str_type, bytes_type, type_type
import Builtin
import Symtab
from Cython import Utils
from Annotate import AnnotationItem
from Cython.Compiler import Future
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.",
(Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.unicode_type, PyrexTypes.c_uchar_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(Builtin.bytes_type, PyrexTypes.c_py_unicode_ptr_type) : "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')",
(Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_uchar_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_py_unicode_ptr_type) : "'str' objects do not support coercion to C types (use 'unicode'?).",
(PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif ((PyrexTypes.c_char_ptr_type in type_tuple or PyrexTypes.c_uchar_ptr_type in type_tuple)
and env.directives['c_string_encoding']):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if (node is None
or not isinstance(node.constant_result, (int, float, long))):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = 0
is_string_literal = 0
is_attribute = 0
saved_subexpr_nodes = None
is_temp = 0
is_target = 0
is_starred = 0
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s" % (
self.old_temp, self.__class__.__name__))
else:
raise RuntimeError("no temp, but release requested in %s" % (
self.__class__.__name__))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp:
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
if dst_type.is_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(
dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.constant_result not in (not_a_constant, constant_value_not_set):
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
return str(int(self.value))
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.constant_result is not not_a_constant:
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = dst_type, is_c_literal = True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = PyrexTypes.py_object_type, is_c_literal = False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
type = PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = self.value_as_c_integer_string(plain_digits=True)
self.result_code = code.get_py_num(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self, plain_digits=False):
value = self.value
if isinstance(value, basestring) and len(value) > 2:
# must convert C-incompatible Py3 oct/bin notations
if value[1] in 'oO':
if plain_digits:
value = int(value[2:], 8)
else:
value = value[0] + value[2:] # '0o123' => '0123'
elif value[1] in 'bB':
value = int(value[2:], 2)
elif plain_digits and value[1] in 'xX':
value = int(value[2:], 16)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def calculate_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.BytesLiteral(self.value[start:stop:step])
value.encoding = self.value.encoding
return BytesNode(
self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value
def analyse_as_type(self, env):
type = PyrexTypes.parse_basic_type(self.value)
if type is not None:
return type
from TreeFragment import TreeFragment
pos = (self.pos[0], self.pos[1], self.pos[2]-7)
declaration = TreeFragment(u"sizeof(%s)" % self.value, name=pos[0].filename, initial_pos=pos)
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value)
node = BytesNode(self.pos, value=self.value)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type == PyrexTypes.c_char_ptr_type:
node.type = dst_type
return node
elif dst_type == PyrexTypes.c_uchar_ptr_type:
node.type = PyrexTypes.c_char_ptr_type
return CastNode(node, PyrexTypes.c_uchar_ptr_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_string_const(self.value)
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.BytesLiteral(
self.bytes_value[start:stop:step])
bytes_value.encoding = self.bytes_value.encoding
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character Unicode string literals or surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value), constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a C char* context
return BytesNode(self.pos, value=self.bytes_value).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos, "Unicode literals do not support coercion to C types other than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* (for strings).")
elif dst_type is not py_object_type:
if not self.check_for_coercion_error(dst_type, env):
self.fail_assignment(dst_type)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring_')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.putln("#if CYTHON_PEP393_ENABLED")
code.put_error_if_neg(
self.pos, "PyUnicode_READY(%s)" % self.result_code)
code.putln("#endif")
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self.entry,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env)
if (not self.is_lvalue() and self.entry.is_cfunction and
self.entry.fused_cfunction and self.entry.as_variable):
# We need this for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'"
% self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
elif self.entry.type.is_memoryviewslice:
if self.cf_is_null or self.cf_maybe_null:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env)
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
code.putln('%s = %s;' % (
self.result(), rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % (
del_code,
code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
code.annotate(pos, AnnotationItem('py_call', 'python function', size=len(self.name)))
else:
code.annotate(pos, AnnotationItem('c_call', 'c function', size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s, %d); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
self.level,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type is list_type or \
self.sequence.type is tuple_type:
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type is list_type or \
sequence_type is tuple_type
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (PyList_CheckExact(%s) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln(
"%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value
))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (self.iter_func_ptr, self.py_result()))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
code.putln(
"if (%s >= Py%s_GET_SIZE(%s)) break;" % (
self.counter_cname,
test_name,
self.py_result()))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
for test_name in ('List', 'Tuple'):
code.putln("if (!%s && Py%s_CheckExact(%s)) {" % (
self.iter_func_ptr, test_name, self.py_result()))
self.generate_next_sequence_item(test_name, result_name, code)
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("if (PyErr_Occurred()) {")
code.putln("if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type = None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='0'))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
subexprs = ['args']
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_result_code(self, code):
if isinstance(self.args, TupleNode):
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.putln("%s = PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if isinstance(self.args, TupleNode):
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos, env):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# indices [ExprNode]
# is_buffer_access boolean Whether this is a buffer access.
#
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index', 'indices']
indices = None
is_fused_index = False
# Whether we're assigning to a buffer (in that case it needs to be
# writable)
writable_needed = False
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = \
self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception, e:
self.compile_time_value_error(e)
def is_ephemeral(self):
return self.base.is_ephemeral()
def is_simple(self):
if self.is_buffer_access or self.memslice_index:
return False
elif self.memslice_slice:
return True
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
import Nodes
type_node = Nodes.TemplatedTypeNode(
pos = self.pos,
positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
return PyrexTypes.CArrayType(base_type, int(self.index.compile_time_value(env)))
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
# a[...] = b
self.is_memslice_copy = False
# incomplete indexing, Ellipsis indexing or slicing
self.memslice_slice = False
# integer indexing
self.memslice_index = False
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = isinstance(self.index, SliceNode)
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
skip_child_analysis = False
buffer_access = False
if self.indices:
indices = self.indices
elif isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if (is_memslice and not self.indices and
isinstance(self.index, EllipsisNode)):
# Memoryviewslice copying
self.is_memslice_copy = True
elif is_memslice:
# memoryviewslice indexing or slicing
import MemoryView
skip_child_analysis = True
newaxes = [newaxis for newaxis in indices if newaxis.is_none]
have_slices, indices = MemoryView.unellipsify(indices,
newaxes,
self.base.type.ndim)
self.memslice_index = (not newaxes and
len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" %
self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if not index.is_none:
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if isinstance(index, SliceNode):
self.memslice_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.is_none:
self.memslice_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more "
"efficient access", level=2)
IndexNode.warned_untyped_idx = True
self.memslice_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified")
return self
self.memslice_index = self.memslice_index and not self.memslice_slice
self.original_indices = indices
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
# Buffer indexing
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
for x in indices:
x = x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
if buffer_access and not self.base.type.is_memoryviewslice:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
self.nogil = env.nogil
if buffer_access or self.memslice_index:
#if self.base.type.is_memoryviewslice and not self.base.is_name:
# self.base = self.base.coerce_to_temp(env)
self.base = self.base.coerce_to_simple(env)
self.indices = indices
self.index = None
self.type = self.base.type.dtype
self.is_buffer_access = True
self.buffer_type = self.base.type #self.base.entry.type
if getting and self.type.is_pyobject:
self.is_temp = True
if setting and self.base.type.is_memoryviewslice:
self.base.type.writable_needed = True
elif setting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
elif self.is_memslice_copy:
self.type = self.base.type
if getting:
self.memslice_ellipsis_noop = True
else:
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return self
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if (self.base.type.is_memoryviewslice and not
self.base.is_name and not
self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
fused_index_operation = base_type.is_cfunction and base_type.is_fused
if not fused_index_operation:
if isinstance(self.index, TupleNode):
self.index = self.index.analyse_types(
env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if self.index.constant_result in (0, -1):
# FIXME: we know that this node is redundant -
# currently, this needs to get handled in Optimize.py
pass
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
if self.index.type.is_int:
if (not setting
and (base_type in (list_type, tuple_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
self.type = py_object_type
else:
if base_type.is_ptr or base_type.is_array:
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
elif fused_index_operation:
self.parse_indexed_fused_cdef(env)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
self.wrap_in_nonecheck_node(env, getting)
return self
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
if self.base.type.is_memoryviewslice:
if self.is_memslice_copy and not getting:
msg = "Cannot assign to None memoryview slice"
elif self.memslice_slice:
msg = "Cannot slice None memoryview slice"
else:
msg = "Cannot index None memoryview slice"
else:
msg = "'NoneType' object is not subscriptable"
self.base = self.base.as_none_safe_node(msg)
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
specific_types = []
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
specific_types.append(self.index.analyse_as_type(env))
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_type = arg.analyse_as_type(env)
specific_types.append(specific_type)
else:
specific_types = [False]
if not Utils.all(specific_types):
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not Utils.any([specific_type.same_as(t)
for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def nogil_check(self, env):
if self.is_buffer_access or self.memslice_index or self.memslice_slice:
if not self.memslice_slice and env.directives['boundscheck']:
# error(self.pos, "Cannot check buffer index bounds without gil; "
# "use boundscheck(False) directive")
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
return
super(IndexNode, self).nogil_check(env)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
base_type = self.base.type
if self.type.is_ptr or self.type.is_array:
return not base_type.base_type.is_array
else:
return True
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type is list_type:
return "PyList_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
elif self.base.type is tuple_type:
return "PyTuple_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
elif (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
else:
return "(%s[%s])" % (
self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
if self.original_index_type.signed:
size_adjustment = ""
else:
size_adjustment = "+1"
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, (int, long))
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", sizeof(%s)%s, %s, %d, %d, %d" % (
self.original_index_type.declaration_code(""),
size_adjustment,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
i.generate_evaluation_code(code)
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
i.generate_disposal_code(code)
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
i.free_temps(code)
def generate_result_code(self, code):
if self.is_buffer_access or self.memslice_index:
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.temp_code,
self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.temp_code)
elif self.memslice_slice:
self.put_memoryviewslice_slice_code(code)
elif self.is_temp:
if self.type.is_pyobject:
if self.index.type.is_int:
index_code = self.index.result()
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
code.putln(
"%s = %s(%s, %s%s); if (!%s) %s" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
code.error_goto(self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
index_code = self.index.result()
function = "__Pyx_GetItemInt_Unicode"
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == (Py_UCS4)-1)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
code.error_goto(self.pos)))
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
function = "__Pyx_SetItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(
"if (%s(%s, %s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_assignment_code(self, rhs, code):
generate_evaluation_code = (self.is_memslice_scalar_assignment or
self.memslice_slice)
if generate_evaluation_code:
self.generate_evaluation_code(code)
else:
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice or self.is_memslice_copy:
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
else:
code.putln(
"%s = %s;" % (
self.result(), rhs.result()))
if generate_evaluation_code:
self.generate_disposal_code(code)
else:
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def buffer_entry(self):
import Buffer, MemoryView
base = self.base
if self.base.is_nonecheck:
base = base.arg
if base.is_name:
entry = base.entry
else:
# SimpleCallNode is_simple is not consistent with coerce_to_simple
assert base.is_simple() or base.is_temp
cname = base.result()
entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos)
if entry.type.is_buffer:
buffer_entry = Buffer.BufferEntry(entry)
else:
buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry)
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
if sys.version_info < (3,):
def next_(it):
return it.next()
else:
next_ = next
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next_(it)
if not index.stop.is_none:
index.stop = next_(it)
if not index.step.is_none:
index.step = next_(it)
else:
next_(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil,
have_slices=have_slices,
directives=code.globalstate.directives)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[...] = memslice2 or memslice1[:] = memslice2"
import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
import MemoryView
MemoryView.assign_scalar(self, rhs, code)
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, str_type, unicode_type)):
if dst_type is not bytes_type and not env.directives['c_string_encoding']:
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_char_ptr_type:
base_result = '((const char*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
self.type.name.title(),
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
self.type.name.title(),
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = '__Pyx_PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = ''
if self.start:
start_offset = self.start_code()
if start_offset == '0':
start_offset = ''
else:
start_offset += '+'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
error(self.pos,
"Slice assignments from pointers are not yet supported.")
# FIXME: fix the array size according to start/stop
array_length = self.base.type.size
for i in range(array_length):
code.putln("%s[%s%s] = %s[%d];" % (
self.base.result(), start_offset, i,
rhs.result(), i))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
slice_size = self.base.type.size + stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
start = self.base.type.size + start
slice_size -= start
start = None
except ValueError:
pass
check = None
if slice_size < 0:
if target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif start is None and stop is None:
# we know the exact slice length
if target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %d, got %d" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
check = "(%s)-(%s)" % (stop, start)
else: # stop is not None:
check = stop
if check:
code.putln("if (unlikely((%s) != %d)) {" % (check, target_size))
code.putln('PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length, expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d", (Py_ssize_t)%d, (Py_ssize_t)(%s));' % (
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
type = py_object_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice_', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
def __deepcopy__(self, memo):
"""
There is a copy bug in python 2.4 for slice objects.
"""
return SliceNode(
self.pos,
start=copy.deepcopy(self.start, memo),
stop=copy.deepcopy(self.stop, memo),
step=copy.deepcopy(self.step, memo),
is_temp=self.is_temp,
is_literal=self.is_literal,
constant_result=self.constant_result)
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(self.function, NewExprNode):
return PyrexTypes.CPtrType(self.function.class_type)
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
if self.function.type is error_type:
self.type = error_type
return
if self.self:
args = [self.self] + self.args
else:
args = self.args
if self.function.type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif (isinstance(self.function, IndexNode) and
self.function.is_fused_index):
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in xrange(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in xrange(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None \
or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.putln(
"%s = PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
if func_type.exception_value is None:
raise_py_exception = "__Pyx_CppExn2PyErr();"
elif func_type.exception_value.type.is_pyobject:
raise_py_exception = 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
func_type.exception_value.entry.cname,
func_type.exception_value.entry.cname)
else:
raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % func_type.exception_value.entry.cname
code.putln("try {")
code.putln("%s%s;" % (lhs, rhs))
code.putln("} catch(...) {")
if self.nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if self.nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in xrange(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or
not isinstance(self.positional_args, TupleNode)):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not isinstance(self.keyword_args, DictNode):
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from Cython.Compiler.UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.putln(
"%s = PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_unbound_cmethod_node(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_unbound_cmethod_node(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_unbound_cmethod_node(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
type = self.obj.analyse_as_extension_type(env)
if type:
entry = type.scope.lookup_here(self.attribute)
if entry and entry.is_cmethod:
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
ubcm_entry = Symtab.Entry(entry.name,
"%s->%s" % (type.vtabptr_cname, entry.cname),
entry.type)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and self.obj.is_name
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute')
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return not self.type.is_array
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s' % (
self.result(),
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
code.annotate(self.pos, AnnotationItem('py_attr', 'python attribute', size=len(self.attribute)))
else:
code.annotate(self.pos, AnnotationItem('c_attr', 'c attribute', size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredTargetNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment targets such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be removed during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
def __init__(self, pos, target):
ExprNode.__init__(self, pos)
self.target = target
def analyse_declarations(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_declarations(env)
def analyse_types(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i in range(len(self.args)):
arg = self.args[i]
if not skip_children: arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(Builtin.list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = Builtin.list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if isinstance(mult_factor.constant_result, (int,long)) \
and mult_factor.constant_result > 0:
size_factor = ' * %s' % mult_factor.constant_result
else:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
if self.type is Builtin.tuple_type and self.is_literal and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join([ arg.py_result() for arg in self.args ]),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is Builtin.list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is Builtin.tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in xrange(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
code.put_giveref(arg.py_result())
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = '1'
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = list_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = tuple_check
else:
sequence_types = ['Tuple', 'List']
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if rhs.type is tuple_type:
# if not a tuple: None => save some code by generating the error directly
code.putln("} else if (1) {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("} else")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
node = self
node.is_temp = False
node.is_literal = True
else:
node = SequenceNode.analyse_types(self, env, skip_children)
for child in node.args:
if not child.is_literal:
break
else:
if not node.mult_factor or node.mult_factor.is_literal and \
isinstance(node.mult_factor.constant_result, (int, long)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple_', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple_', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_ptr and dst_type.base_type is not PyrexTypes.c_void_type:
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, len(self.args))
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too may members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
for i, arg in enumerate(self.args):
code.putln("%s[%s] = %s;" % (
self.result(),
i,
arg.result()))
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ScopedExprNode):
# An inlined generator expression for which the result is
# calculated inside of the loop. This will only be created by
# transforms when replacing builtin calls on generator
# expressions.
#
# loop ForStatNode the for-loop, not containing any YieldExprNodes
# result_node ResultRefNode the reference to the result value temp
# orig_func String the name of the builtin function this node replaces
child_attrs = ["loop"]
loop_analysed = False
type = py_object_type
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def may_be_none(self):
return False
def annotate(self, code):
self.loop.annotate(code)
def infer_type(self, env):
return self.result_node.infer_type(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop_analysed = True
self.loop = self.loop.analyse_expressions(env)
self.type = self.result_node.type
self.is_temp = True
return self
def analyse_scoped_expressions(self, env):
self.loop_analysed = True
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def coerce_to(self, dst_type, env):
if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed:
# We can optimise by dropping the aggregation variable and
# the add operations into C. This can only be done safely
# before analysing the loop body, after that, the result
# reference type will have infected expressions and
# assignments.
self.result_node.type = self.type = dst_type
return self
return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
self.result_node.result_code = self.result()
self.loop.generate_execution_code(code)
class SetNode(ExprNode):
# Set constructor.
type = set_type
subexprs = ['args']
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(Builtin.py_set_utility_code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
if self.type.is_pyobject:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
function = 'PyDict_Keys'
else:
function = 'PyMapping_Keys'
code.putln('%s = %s(%s); %s' % (
self.result(), function, dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def set_qualified_name(self, env, self_name):
self.module_name = env.global_scope().qualified_name
qualified_name = [self_name]
while env and not env.is_module_scope:
if env.is_closure_scope:
qualified_name.append('<locals>')
qualified_name.append(env.name)
env = env.parent_scope
self.qualname = StringEncoding.EncodedString('.'.join(qualified_name[::-1]))
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
#TODO(craig,haoyu) This should be moved to a better place
self.set_qualified_name(env, self.name)
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItemString(%s, "__doc__", %s)' % (
self.dict.py_result(),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.metaclass.result(),
cname,
self.bases.py_result(),
self.dict.py_result(),
self.mkw.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class KeywordArgsNode(ExprNode):
# Helper class for keyword arguments.
#
# starstar_arg DictNode
# keyword_args [DictItemNode]
subexprs = ['starstar_arg', 'keyword_args']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
result = dict(self.starstar_arg.constant_result)
for item in self.keyword_args:
key, value = item.constant_result
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = self.starstar_arg.compile_time_value(denv)
pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.keyword_args ]
try:
result = dict(result)
for key, value in pairs:
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception, e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
arg = self.starstar_arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
self.starstar_arg = arg.as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
self.keyword_args = [ item.analyse_types(env)
for item in self.keyword_args ]
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.starstar_arg.generate_evaluation_code(code)
if self.starstar_arg.type is not Builtin.dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_Check(%s))) {' %
self.starstar_arg.py_result())
if self.keyword_args:
code.putln(
"%s = PyDict_Copy(%s); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
code.putln("%s = %s;" % (
self.result(),
self.starstar_arg.py_result()))
code.put_incref(self.result(), py_object_type)
if self.starstar_arg.type is not Builtin.dict_type:
code.putln('} else {')
code.putln(
"%s = PyObject_CallFunctionObjArgs("
"(PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln('}')
self.starstar_arg.generate_disposal_code(code)
self.starstar_arg.free_temps(code)
if not self.keyword_args:
return
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
for item in self.keyword_args:
item.generate_evaluation_code(code)
code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % (
self.result(),
item.key.py_result()))
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
item.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
self.starstar_arg.annotate(code)
for item in self.keyword_args:
item.annotate(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_Py3MetaclassGet(%s, %s); %s" % (
self.result(),
self.bases.result(),
self.mkw.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
#TODO(craig,haoyu) This should be moved to a better place
self.set_qualified_name(env, self.name)
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
self.metaclass.result(),
self.bases.result(),
cname,
qualname,
self.mkw.result(),
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing an bound method"
def generate_result_code(self, code):
code.putln(
"%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
#TODO(craig,haoyu) This should be moved to a better place
self.set_qualified_name(env, self.def_node.name)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
if def_node.star_arg:
args.append(def_node.star_arg)
if def_node.starstar_arg:
args.append(def_node.starstar_arg)
local_vars = [ arg for arg in def_node.local_scope.var_entries
if arg.name ]
self.varnames = TupleNode(
def_node.pos,
args = [ IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars ],
is_temp = 0,
is_literal = 1)
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
self.result_code = code.get_py_const(py_object_type, 'codeobj_', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, 0, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args), # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_name string name of the C label used for this yield
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'yield' not supported here")
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
self.label_name = code.new_label('resume_from_yield')
code.use_label(self.label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, self.label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(self.label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.put_xgotref(cname)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln('%s = %s; %s' %
(self.result(), Naming.sent_value_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_incref(self.result(), py_object_type)
else:
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c"))
self.arg.generate_evaluation_code(code)
code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % (
Naming.retval_cname,
Naming.generator_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
# YieldExprNode has allocated the result temp for us
code.putln("if (__Pyx_PyGen_FetchStopIterationValue(&%s) < 0) %s" % (
self.result(),
code.error_goto(self.pos)))
else:
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (!PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) %s" %
code.error_goto(self.pos))
code.putln("PyErr_Clear();")
code.putln("}")
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = [entry.name for entry in env.entries.values()
if entry.name]
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env):
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception, e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if not cpp_type:
error(self.pos, "'!' operator not defined for %s" % operand_type)
self.type = PyrexTypes.error_type
return
self.type = cpp_type
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
cpp_type = argtype.find_cpp_operation_type(self.operator)
if cpp_type is not None:
self.type = cpp_type
return self
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue")
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
# FIXME: the type cast node isn't needed in this case
# and can be dropped once analyse_types() can return a
# different node
self.operand = CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck and self.type.is_pyobject:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
MemoryView.validate_memslice_dtype(self.pos, array_dtype)
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=dimsize,
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.declaration_code("")
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
elif self.operator == '%' \
and type1 in (Builtin.str_type, Builtin.unicode_type):
# note that b'%s' % b'abc' doesn't work in Py3
return type1
if type1.is_builtin_type:
if type1 is type2:
if self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
elif self.operator == '*':
if type1 in (Builtin.bytes_type, Builtin.str_type, Builtin.unicode_type):
return type1
# multiplication of containers/numbers with an
# integer value always (?) returns the same type
if type2.is_int:
return type1
elif type2.is_builtin_type and type1.is_int and self.operator == '*':
# multiplication of containers/numbers with an
# integer value always (?) returns the same type
return type2
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function()
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_Format(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power"
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_int_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if (type1.is_string and type2.is_int) \
or (type2.is_string and type1.is_int):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, (int,long)) and isinstance(op2, (int,long)):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def analyse_operation(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
code.put_ensure_gil()
code.putln('PyErr_Format(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
code.putln("else if (sizeof(%s) == sizeof(long) && unlikely(%s == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.declaration_code(''),
self.operand2.result(),
self.operand1.result()))
code.put_ensure_gil()
code.putln('PyErr_Format(PyExc_OverflowError, "value too large to perform division");')
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(cdivision_warning_utility_code)
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("if (__Pyx_cdivision_warning(%(FILENAME)s, "
"%(LINENO)s)) {" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
})
code.put_release_ensured_gil()
code.put_goto(code.error_label)
code.putln("}")
code.put_release_ensured_gil()
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
mod_int_utility_code.specialize(self.type))
else: # float
code.globalstate.use_utility_code(
mod_float_utility_code.specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# note: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
int_pow_utility_code.specialize(
func_name=self.pow_func,
type=self.type.declaration_code(''),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
# Note: This class is temporarily "shut down" into an ineffective temp
# allocation mode.
#
# More sophisticated temp reuse was going on before, one could have a
# look at adding this again after /all/ classes are converted to the
# new temp scheme. (The temp juggling cannot work otherwise).
class BoolBinopNode(ExprNode):
# Short-circuiting boolean operation.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
subexprs = ['operand1', 'operand2']
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
if self.operator == 'and':
self.constant_result = \
self.operand1.constant_result and \
self.operand2.constant_result
else:
self.constant_result = \
self.operand1.constant_result or \
self.operand2.constant_result
def compile_time_value(self, denv):
if self.operator == 'and':
return self.operand1.compile_time_value(denv) \
and self.operand2.compile_time_value(denv)
else:
return self.operand1.compile_time_value(denv) \
or self.operand2.compile_time_value(denv)
def coerce_to_boolean(self, env):
return BoolBinopNode(
self.pos,
operator = self.operator,
operand1 = self.operand1.coerce_to_boolean(env),
operand2 = self.operand2.coerce_to_boolean(env),
type = PyrexTypes.c_bint_type,
is_temp = self.is_temp)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
self.is_temp = 1
return self
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.operand1.generate_evaluation_code(code)
test_result, uses_temp = self.generate_operand1_test(code)
if self.operator == 'and':
sense = ""
else:
sense = "!"
code.putln(
"if (%s%s) {" % (
sense,
test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
self.operand1.generate_disposal_code(code)
self.operand2.generate_evaluation_code(code)
self.allocate_temp_result(code)
self.operand2.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand2.result()))
self.operand2.generate_post_assignment_code(code)
self.operand2.free_temps(code)
code.putln("} else {")
self.operand1.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand1.result()))
self.operand1.generate_post_assignment_code(code)
self.operand1.free_temps(code)
code.putln("}")
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(PyrexTypes.c_bint_type,
manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(self.true_val.type, self.false_val.type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
self.is_temp = 1
if self.type == PyrexTypes.error_type:
self.type_error()
return self
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result() )
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
result = func(operand1_result, operand2_result)
if self.cascade:
self.cascade.calculate_cascaded_constant_result(operand2_result)
if self.cascade.constant_result:
self.constant_result = result and self.cascade.constant_result
else:
self.constant_result = result
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
# FIXME: I bet this must call cascaded_compile_time_value()
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if type1.is_builtin_type and type2.is_builtin_type:
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_Contains"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Contains"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_Contains"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def optimise_comparison(self, operand1, env):
if self.find_special_bool_compare_function(env, operand1):
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2,
inplace = inplace)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
elif arg.type.is_string or arg.type.is_cpp_string:
if type is not bytes_type and not env.directives['c_string_encoding']:
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = type
else:
# FIXME: check that the target type and the resulting type are compatible
pass
if arg.type.is_memoryviewslice:
# Register utility codes at this point
arg.type.get_to_py_function(env, arg)
self.env = env
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
arg_type = self.arg.type
if arg_type.is_memoryviewslice:
funccall = arg_type.get_to_py_function(self.env, self.arg)
else:
func = arg_type.to_py_function
if ((arg_type.is_string or arg_type.is_cpp_string)
and self.type in (bytes_type, str_type, unicode_type)):
func = func.replace("Object", self.type.name.title())
funccall = "%s(%s)" % (func, self.arg.result())
code.putln('%s = %s; %s' % (
self.result(),
funccall,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_Format(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_ephemeral():
error(arg.pos,
"Obtaining '%s' from temporary Python value" % result_type)
elif self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
function = self.type.from_py_function
operand = self.arg.py_result()
rhs = "%s(%s)" % (function, operand)
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type : 'PyList_GET_SIZE',
Builtin.tuple_type : 'PyTuple_GET_SIZE',
Builtin.bytes_type : 'PyBytes_GET_SIZE',
Builtin.unicode_type : 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_expressions(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
#------------------------------------------------------------------------------------
int_pow_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if %(signed)s
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
""")
# ------------------------------ Division ------------------------------------
div_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
""")
mod_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
""")
mod_float_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
""")
cdivision_warning_utility_code = UtilityCode(
proto="""
static int __Pyx_cdivision_warning(const char *, int); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
#if CYTHON_COMPILING_IN_PYPY
filename++; // avoid compiler warnings
lineno++;
return PyErr_Warn(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ");
#else
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
#endif
}
""")
# from intobject.c
division_overflow_test_code = UtilityCode(
proto="""
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/Cython/Compiler/ExprNodes.py | Python | gpl-2.0 | 420,289 | [
"VisIt"
] | ef8e378f69a16b2f09789fdf625f85b3a12eb41659c1e485c1572349f2dc1e34 |
"""
mlab: a simple scripting interface to Mayavi2 for 3D plotting.
Can be used inside Mayavi2 itself, in "ipython -wthread", or in any
application with the WxWidget mainloop running.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2007-2010, Enthought, Inc.
# License: BSD Style.
# Try forcing the use of wx 2.8 before any other import.
import sys
if not 'wx' in sys.modules:
try:
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit in ('wx', ''):
import wxversion
wxversion.ensureMinimal('2.8')
except ImportError:
""" wxversion not installed """
# Mayavi imports
from mayavi.tools.camera import view, roll, yaw, pitch, move
from mayavi.tools.figure import figure, clf, gcf, savefig, \
draw, sync_camera, close, screenshot
from mayavi.tools.engine_manager import get_engine, show_pipeline, \
options, set_engine
from mayavi.tools.show import show
from mayavi.tools.animator import animate
def show_engine():
""" This function is deprecated, please use show_pipeline.
"""
import warnings
warnings.warn('The show_engine function is deprecated, please use'
'show_pipeline', stacklevel=2)
return show_pipeline()
from tools.helper_functions import contour3d, test_contour3d, \
quiver3d, test_quiver3d, test_quiver3d_2d_data, \
points3d, test_points3d, test_molecule, \
flow, test_flow, \
imshow, test_imshow, \
surf, test_surf, mesh, test_mesh, test_simple_surf, \
test_mesh_sphere, test_fancy_mesh,\
contour_surf, test_contour_surf, \
plot3d, test_plot3d, \
test_plot3d_anim, test_points3d_anim, test_contour3d_anim,\
test_simple_surf_anim, test_flow_anim, test_mesh_sphere_anim, \
triangular_mesh, test_triangular_mesh, barchart, \
test_barchart, test_mesh_mask_custom_colors
from tools.decorations import colorbar, scalarbar, vectorbar, \
outline, axes, xlabel, ylabel, zlabel, text, title, \
orientation_axes, text3d
import tools.pipeline as pipeline
from tools.tools import start_recording, stop_recording
if __name__ == "__main__":
import numpy
n_mer, n_long = 6, 11
pi = numpy.pi
dphi = pi/1000.0
phi = numpy.arange(0.0, 2*pi + 0.5*dphi, dphi, 'd')
mu = phi*n_mer
x = numpy.cos(mu)*(1+numpy.cos(n_long*mu/n_mer)*0.5)
y = numpy.sin(mu)*(1+numpy.cos(n_long*mu/n_mer)*0.5)
z = numpy.sin(n_long*mu/n_mer)*0.5
pl = plot3d(x, y, z, numpy.sin(mu), tube_radius=0.05, colormap='Spectral')
colorbar(orientation='vertical')
t = numpy.linspace(0, 4*numpy.pi, 100)
cos = numpy.cos
sin = numpy.sin
x = sin(2*t)
y = cos(t)
z = sin(2*t)
s = sin(t)
pts = points3d(x, y, z, s, colormap="YlGnBu", scale_factor=0.1,
extent=(-0.3,0.3, -0.3, 0.3, -0.2,0.2))
axes(xlabel='X', ylabel='Y', zlabel='Z')
outline(pl)
title('Mayavi rocks', height=0.85)
| liulion/mayavi | mayavi/mlab.py | Python | bsd-3-clause | 3,018 | [
"Mayavi"
] | 762bd5a1bc96f9dd56a51e2996d0cacc198be964e2be9efb6f359f754b21c79c |
import re
import copy
from numpy import array,floor,sqrt,zeros,prod,dot,ones,empty,min,max
from numpy import pi,sin,cos,arccos as acos,arctan2 as atan2
from numpy.linalg import inv,det
from extended_numpy import simplestats,ndgrid,ogrid,arange,simstats
from hdfreader import HDFgroup
from qaobject import QAobject
#simple constants
o2pi = 1./(2.*pi)
#simple functions
def is_integer(i):
return abs(floor(i)-i)<1e-6
#end def is_integer
class SpaceGridInitializer(QAobject):
def __init__(self):
self.coord = None # string
return
#end def __init__
def check_complete(self,exit_on_fail=True):
succeeded = True
for k,v in self._iteritems():
if v==None:
succeeded=False
if exit_on_fail:
self.error(' SpaceGridInitializer.'+k+' must be provided',exit=False)
#end if
#end if
#end if
if not succeeded and exit_on_fail:
self.error(' SpaceGridInitializer is incomplete')
#end if
return succeeded
#end def check_complete
#end class SpaceGridInitializer
class SpaceGridBase(QAobject):
cnames=['cartesian','cylindrical','spherical','voronoi']
coord_s2n = dict()
coord_n2s = dict()
i=0
for name in cnames:
exec name+'='+str(i)
coord_s2n[name]=i
coord_n2s[i]=name
i+=1
#end for
xlabel = 0
ylabel = 1
zlabel = 2
rlabel = 3
plabel = 4
tlabel = 5
axlabel_s2n = {'x':xlabel,'y':ylabel,'z':zlabel,'r':rlabel,'phi':plabel,'theta':tlabel}
axlabel_n2s = {xlabel:'x',ylabel:'y',zlabel:'z',rlabel:'r',plabel:'phi',tlabel:'theta'}
axindex = {'x':0,'y':1,'z':2,'r':0,'phi':1,'theta':2}
quantities=['D','T','V','E','P']
def __init__(self,initobj,options):
if options==None:
options = QAobject()
options.wasNone = True
options.points = None
options.exit_on_fail = True
options.nblocks_exclude = 0
else:
if 'points' not in options:
options.points = None
if 'exit_on_fail' not in options:
options.exit_on_fail = True
if 'nblocks_exclude' not in options:
options.nblocks_exclude = 0
#end if
self.points = options.points
self.init_exit_fail = options.exit_on_fail
self.nblocks_exclude = options.nblocks_exclude
self.keep_data = True
delvars = ['init_exit_fail','keep_data']
self.coord = None # string
self.coordinate = None
self.ndomains = None
self.domain_volumes = None
self.domain_centers = None
self.nvalues_per_domain = -1
self.nblocks = -1
self.D = QAobject() #Number Density
self.T = QAobject() #Kinetic Energy Density
self.V = QAobject() #Potential Energy Density
self.E = QAobject() #Energy Density, T+V
self.P = QAobject() #Local Pressure, (Volume)*P=(2*T+V)/3
self.init_special()
if initobj==None:
return
#end if
self.DIM=3
iname = initobj.__class__.__name__
self.iname=iname
if iname==self.__class__.__name__+'Initializer':
self.init_from_initializer(initobj)
elif iname==self.__class__.__name__:
self.init_from_spacegrid(initobj)
elif iname=='HDFgroup':
self.init_from_hdfgroup(initobj)
elif iname=='XMLelement':
self.init_from_xmlelement(initobj)
else:
self.error('Spacegrid cannot be initialized from '+iname)
#end if
delvars.append('iname')
self.check_complete()
for dv in delvars:
del self[dv]
#end for
self._reset_dynamic_methods()
self._register_dynamic_methods()
return
#end def __init__
def copy(self,other):
None
#end def copy
def init_special(self):
None
#end def init_special
def init_from_initializer(self,init):
None
#end def init_from_initializer
def init_from_spacegrid(self,init):
None
#end def init_from_spacegrid
def init_from_hdfgroup(self,init):
#copy all datasets from hdf group
value_pattern = re.compile('value')
gmap_pattern = re.compile(r'gmap\d*')
for k,v in init._iteritems():
exclude = k[0]=='_' or gmap_pattern.match(k) or value_pattern.match(k)
if not exclude:
self.__dict__[k]=v
#end if
#end for
#convert 1x and 1x1 numpy arrays to just numbers
#convert Nx1 and 1xN numpy arrays to Nx arrays
array_type = type(array([]))
exclude = set(['value','value_squared'])
for k,v in self._iteritems():
if k[0]!='_' and type(v)==array_type and k not in exclude:
sh=v.shape
ndim = len(sh)
if ndim==1 and sh[0]==1:
self.__dict__[k]=v[0]
elif ndim==2:
if sh[0]==1 and sh[1]==1:
self.__dict__[k]=v[0,0]
elif sh[0]==1 or sh[1]==1:
self.__dict__[k]=v.reshape((sh[0]*sh[1],))
#end if
#end if
#end if
#end for
#set coord string
self.coord = SpaceGridBase.coord_n2s[self.coordinate]
#determine if chempot grid
chempot = 'min_part' in init
self.chempot = chempot
if chempot:
npvalues = self.max_part-self.min_part+1
self.npvalues = npvalues
#end if
#process the data in hdf value,value_squared
nbe = self.nblocks_exclude
nquant = self.nvalues_per_domain
ndomains = self.ndomains
nblocks,ntmp = init.value.shape
self.nblocks = nblocks
if not chempot:
value = init.value.reshape(nblocks,ndomains,nquant).transpose(2,1,0)
else:
value = init.value.reshape(nblocks,ndomains,npvalues,nquant).transpose(3,2,1,0)
#end if
value = value[...,nbe:]
#(mean,error)=simplestats(value)
(mean,var,error,kappa)=simstats(value)
quants = ['D','T','V']
for i in range(len(quants)):
q=quants[i]
self[q].mean = mean[i,...]
self[q].error = error[i,...]
exec 'i'+q+'='+str(i)
#end for
E = value[iT,...]+value[iV,...]
# (mean,error)=simplestats(E)
(mean,var,error,kappa)=simstats(E)
self.E.mean = mean
self.E.error = error
P = 2./3.*value[iT,...]+1./3.*value[iV,...]
#(mean,error)=simplestats(P)
(mean,var,error,kappa)=simstats(P)
self.P.mean = mean
self.P.error = error
#convert all quantities into true densities
ovol = 1./self.domain_volumes
sqovol = sqrt(ovol)
for q in SpaceGridBase.quantities:
self[q].mean *= ovol
self[q].error *= sqovol
#end for
#keep original data, if requested
if self.keep_data:
self.data = QAobject()
for i in range(len(quants)):
q=quants[i]
self.data[q] = value[i,...]
#end for
self.data.E = E
self.data.P = P
#end if
#print 'sg'
#import code
#code.interact(local=locals())
return
#end def init_from_hdfgroup
def init_from_xmlelement(self,init):
None
#end def init_from_xmlelement
def check_complete(self,exit_on_fail=True):
succeeded = True
for k,v in self._iteritems():
if k[0]!='_' and v==None:
succeeded=False
if exit_on_fail:
self.error('SpaceGridBase.'+k+' must be provided',exit=False)
#end if
#end if
#end if
if not succeeded:
self.error('SpaceGrid attempted initialization from '+self.iname,exit=False)
self.error('SpaceGrid is incomplete',exit=False)
if exit_on_fail:
exit()
#end if
#end if
return succeeded
#end def check_complete
def _reset_dynamic_methods(self):
None
#end def _reset_dynamic_methods
def _unset_dynamic_methods(self):
None
#end def _unset_dynamic_methods
def add_all_attributes(self,o):
for k,v in o.__dict__.iteritems():
if not k.startswith('_'):
vc = copy.deepcopy(v)
self._add_attribute(k,vc)
#end if
#end for
return
#end def add_all_attributes
def reorder_atomic_data(self,imap):
None
#end if
def integrate(self,quantity,domain=None):
if quantity not in SpaceGridBase.quantities:
msg = 'requested integration of quantity '+quantity+'\n'
msg +=' '+quantity+' is not a valid SpaceGrid quantity\n'
msg +=' valid quantities are:\n'
msg +=' '+str(SpaceGridBase.quantities)
self.error(msg)
#end if
dv = self.domain_volumes
if domain==None:
mean = (self[quantity].mean*dv).sum()
error = sqrt((self[quantity].error**2*dv).sum())
else:
mean = (self[quantity].mean[domain]*dv[domain]).sum()
error = sqrt((self[quantity].error[domain]**2*dv[domain]).sum())
#end if
return mean,error
#end def integrate
def integrate_data(self,quantity,*domains,**kwargs):
return_list = False
if 'domains' in kwargs:
domains = kwargs['domains']
return_list = True
#end if
if 'return_list' in kwargs:
return_list = kwargs['return_list']
#end if
if quantity not in SpaceGridBase.quantities:
msg = 'requested integration of quantity '+quantity+'\n'
msg +=' '+quantity+' is not a valid SpaceGrid quantity\n'
msg +=' valid quantities are:\n'
msg +=' '+str(SpaceGridBase.quantities)
self.error(msg)
#end if
q = self.data[quantity]
results = list()
nblocks = q.shape[-1]
qi = zeros((nblocks,))
if len(domains)==0:
for b in xrange(nblocks):
qi[b] = q[...,b].sum()
#end for
(mean,var,error,kappa)=simstats(qi)
else:
for domain in domains:
for b in xrange(nblocks):
qb = q[...,b]
qi[b] = qb[domain].sum()
#end for
(mean,var,error,kappa)=simstats(qi)
res = QAobject()
res.mean = mean
res.error = error
res.data = qi.copy()
results.append(res)
#end for
#end for
if len(domains)<2:
return mean,error
else:
if not return_list:
return tuple(results)
else:
means = list()
errors = list()
for res in results:
means.append(res.mean)
errors.append(res.error)
#end for
return means,errors
#end if
#end if
#end def integrate_data
#end class SpaceGridBase
class RectilinearGridInitializer(SpaceGridInitializer):
def __init__(self):
SpaceGridInitializer.__init__(self)
self.origin = None # 3x1 array
self.axes = None # 3x3 array
self.axlabel = None # 3x1 string list
self.axgrid = None # 3x1 string list
#end def __init__
#end class RectilinearGridInitializer
class RectilinearGrid(SpaceGridBase):
def __init__(self,initobj=None,options=None):
SpaceGridBase.__init__(self,initobj,options)
return
#end def __init__
def init_special(self):
self.origin = None # 3x1 array
self.axes = None # 3x3 array
self.axlabel = None # 3x1 string list
self.axinv = None
self.volume = None
self.dimensions = None
self.gmap = None
self.umin = None
self.umax = None
self.odu = None
self.dm = None
self.domain_uwidths = None
return
#end def init_special
def copy(self):
return RectilinearGrid(self)
#end def copy
def _reset_dynamic_methods(self):
p2d=[self.points2domains_cartesian, \
self.points2domains_cylindrical, \
self.points2domains_spherical]
self.points2domains = p2d[self.coordinate]
p2u=[self.point2unit_cartesian, \
self.point2unit_cylindrical, \
self.point2unit_spherical]
self.point2unit = p2u[self.coordinate]
return
#end def _reset_dynamic_methods
def _unset_dynamic_methods(self):
self.points2domains = None
self.point2unit = None
return
#end def _unset_dynamic_methods
def init_from_initializer(self,init):
init.check_complete()
for k,v in init._iteritems():
if k[0]!='_':
self.__dict__[k]=v
#end if
#end for
self.initialize()
return
#end def init_from_initializer
def init_from_spacegrid(self,init):
for q in SpaceGridBase.quantities:
self[q].mean = init[q].mean.copy()
self[q].error = init[q].error.copy()
#end for
array_type = type(array([1]))
exclude = set(['point2unit','points2domains','points'])
for k,v in init._iteritems():
if k[0]!='_':
vtype = type(v)
if k in SpaceGridBase.quantities:
self[k].mean = v.mean.copy()
self[k].error = v.error.copy()
elif vtype==array_type:
self[k] = v.copy()
elif vtype==HDFgroup:
self[k] = v
elif k in exclude:
None
else:
self[k] = vtype(v)
#end if
#end for
#end for
self.points = init.points
return
#end def init_from_spacegrid
def init_from_hdfgroup(self,init):
SpaceGridBase.init_from_hdfgroup(self,init)
self.gmap=[init.gmap1,init.gmap2,init.gmap3]
#set axlabel strings
self.axlabel=list()
for d in range(self.DIM):
label = SpaceGridBase.axlabel_n2s[self.axtypes[d]]
self.axlabel.append(label)
#end for
del self.axtypes
for i in range(len(self.gmap)):
self.gmap[i]=self.gmap[i].reshape((len(self.gmap[i]),))
#end for
return
#end def init_from_hdfgroup
def init_from_xmlelement(self,init):
DIM=self.DIM
self.axlabel=list()
self.axgrid =list()
#coord
self.coord = init.coord
#origin
p1 = self.points[init.origin.p1]
if 'p2' in init.origin:
p2 = self.points[init.origin.p2]
else:
p2 = self.points['zero']
#end if
if 'fraction' in init.origin:
frac = eval(init.origin.fraction)
else:
frac = 0.0
self.origin = p1 + frac*(p2-p1)
#axes
self.axes = zeros((DIM,DIM))
for d in range(DIM):
exec 'axis=init.axis'+str(d+1)
p1 = self.points[axis.p1]
if 'p2' in axis:
p2 = self.points[axis.p2]
else:
p2 = self.points['zero']
#end if
if 'scale' in axis:
scale = eval(axis.scale)
else:
scale = 1.0
#end if
for dd in range(DIM):
self.axes[dd,d] = scale*(p1[dd]-p2[dd])
#end for
self.axlabel.append(axis.label)
self.axgrid.append(axis.grid)
#end for
self.initialize()
return
#end def init_from_xmlelement
def initialize(self): #like qmcpack SpaceGridBase.initialize
write=False
succeeded=True
ndomains=-1
DIM = self.DIM
coord = self.coord
origin = self.origin
axes = self.axes
axlabel = self.axlabel
axgrid = self.axgrid
del self.axgrid
ax_cartesian = ["x" , "y" , "z" ]
ax_cylindrical = ["r" , "phi" , "z" ]
ax_spherical = ["r" , "phi" , "theta"]
cmap = dict()
if(coord=="cartesian"):
for d in range(DIM):
cmap[ax_cartesian[d]]=d
axlabel[d]=ax_cartesian[d]
#end
elif(coord=="cylindrical"):
for d in range(DIM):
cmap[ax_cylindrical[d]]=d
axlabel[d]=ax_cylindrical[d]
#end
elif(coord=="spherical"):
for d in range(DIM):
cmap[ax_spherical[d]]=d
axlabel[d]=ax_spherical[d]
#end
else:
self.error(" Coordinate supplied to spacegrid must be cartesian, cylindrical, or spherical\n You provided "+coord,exit=False)
succeeded=False
#end
self.coordinate = SpaceGridBase.coord_s2n[self.coord]
coordinate = self.coordinate
#loop over spacegrid xml elements
naxes =DIM
# variables for loop
utol = 1e-5
dimensions=zeros((DIM,),dtype=int)
umin=zeros((DIM,))
umax=zeros((DIM,))
odu=zeros((DIM,))
ndu_per_interval=[None,None,None]
gmap=[None,None,None]
for dd in range(DIM):
iaxis = cmap[axlabel[dd]]
grid = axgrid[dd]
#read in the grid contents
# remove spaces inside of parentheses
inparen=False
gtmp=''
for gc in grid:
if(gc=='('):
inparen=True
gtmp+=' '
#end
if(not(inparen and gc==' ')):
gtmp+=gc
if(gc==')'):
inparen=False
gtmp+=' '
#end
#end
grid=gtmp
# break into tokens
tokens = grid.split()
if(write):
print " grid = ",grid
print " tokens = ",tokens
#end
# count the number of intervals
nintervals=0
for t in tokens:
if t[0]!='(':
nintervals+=1
#end
#end
nintervals-=1
if(write):
print " nintervals = ",nintervals
# allocate temporary interval variables
ndom_int = zeros((nintervals,),dtype=int)
du_int = zeros((nintervals,))
ndu_int = zeros((nintervals,),dtype=int)
# determine number of domains in each interval and the width of each domain
u1=1.0*eval(tokens[0])
umin[iaxis]=u1
if(abs(u1)>1.0000001):
self.error(" interval endpoints cannot be greater than 1\n endpoint provided: "+str(u1),exit=False)
succeeded=False
#end
is_int=False
has_paren_val=False
interval=-1
for i in range(1,len(tokens)):
if not tokens[i].startswith('('):
u2=1.0*eval(tokens[i])
umax[iaxis]=u2
if(not has_paren_val):
du_i=u2-u1
#end
has_paren_val=False
interval+=1
if(write):
print " parsing interval ",interval," of ",nintervals
print " u1,u2 = ",u1,",",u2
#end
if(u2<u1):
self.error(" interval ("+str(u1)+","+str(u2)+") is negative",exit=False)
succeeded=False
#end
if(abs(u2)>1.0000001):
self.error(" interval endpoints cannot be greater than 1\n endpoint provided: "+str(u2),exit=False)
succeeded=False
#end
if(is_int):
du_int[interval]=(u2-u1)/ndom_i
ndom_int[interval]=ndom_i
else:
du_int[interval]=du_i
ndom_int[interval]=floor((u2-u1)/du_i+.5)
if(abs(u2-u1-du_i*ndom_int[interval])>utol):
self.error(" interval ("+str(u1)+","+str(u2)+") not divisible by du="+str(du_i),exit=False)
succeeded=False
#end
#end
u1=u2
else:
has_paren_val=True
paren_val=tokens[i][1:len(tokens[i])-1]
if(write):
print " interval spacer = ",paren_val
#end if
is_int=tokens[i].find(".")==-1
if(is_int):
ndom_i = eval(paren_val)
du_i = -1.0
else:
ndom_i = 0
du_i = eval(paren_val)
#end
#end
#end
# find the smallest domain width
du_min=min(du_int)
odu[iaxis]=1.0/du_min
# make sure it divides into all other domain widths
for i in range(len(du_int)):
ndu_int[i]=floor(du_int[i]/du_min+.5)
if(abs(du_int[i]-ndu_int[i]*du_min)>utol):
self.error("interval {0} of axis {1} is not divisible by smallest subinterval {2}".format(i+1,iaxis+1,du_min),exit=False)
succeeded=False
#end
#end
if(write):
print " interval breakdown"
print " interval,ndomains,nsubdomains_per_domain"
for i in range(len(ndom_int)):
print " ",i,",",ndom_int[i],",",ndu_int[i]
#end
#end
# set up the interval map such that gmap[u/du]==domain index
gmap[iaxis] = zeros((floor((umax[iaxis]-umin[iaxis])*odu[iaxis]+.5),),dtype=int)
n=0
nd=-1
if(write):
print " i,j,k ax,n,nd "
#end if
for i in range(len(ndom_int)):
for j in range(ndom_int[i]):
nd+=1
for k in range(ndu_int[i]):
gmap[iaxis][n]=nd
if(write):
print " ",i,",",j,",",k," ",iaxis,",",n,",",nd
#end
n+=1
#end
#end
#end
dimensions[iaxis]=nd+1
#end read in the grid contents
#save interval width information
ndom_tot=sum(ndom_int)
ndu_per_interval[iaxis] = zeros((ndom_tot,),dtype=int)
idom=0
for i in range(len(ndom_int)):
for ii in range(ndom_int[i]):
ndu_per_interval[iaxis][idom] = ndu_int[i]
idom+=1
#end
#end
#end
axinv = inv(axes)
#check that all axis grid values fall in the allowed intervals
cartmap = dict()
for d in range(DIM):
cartmap[ax_cartesian[d]]=d
#end for
for d in range(DIM):
if axlabel[d] in cartmap:
if(umin[d]<-1.0 or umax[d]>1.0):
self.error(" grid values for {0} must fall in [-1,1]\n".format(axlabel[d])+" interval provided: [{0},{1}]".format(umin[d],umax[d]),exit=False)
succeeded=False
#end if
elif(axlabel[d]=="phi"):
if(abs(umin[d])+abs(umax[d])>1.0):
self.error(" phi interval cannot be longer than 1\n interval length provided: {0}".format(abs(umin[d])+abs(umax[d])),exit=False)
succeeded=False
#end if
else:
if(umin[d]<0.0 or umax[d]>1.0):
self.error(" grid values for {0} must fall in [0,1]\n".format(axlabel[d])+" interval provided: [{0},{1}]".format(umin[d],umax[d]),exit=False)
succeeded=False
#end if
#end if
#end for
#set grid dimensions
# C/Python style indexing
dm=array([0,0,0],dtype=int)
dm[0] = dimensions[1]*dimensions[2]
dm[1] = dimensions[2]
dm[2] = 1
ndomains=prod(dimensions)
volume = abs(det(axes))*8.0#axes span only one octant
#compute domain volumes, centers, and widths
domain_volumes = zeros((ndomains,))
domain_centers = zeros((ndomains,DIM))
domain_uwidths = zeros((ndomains,DIM))
interval_centers = [None,None,None]
interval_widths = [None,None,None]
for d in range(DIM):
nintervals = len(ndu_per_interval[d])
interval_centers[d] = zeros((nintervals))
interval_widths[d] = zeros((nintervals))
interval_widths[d][0]=ndu_per_interval[d][0]/odu[d]
interval_centers[d][0]=interval_widths[d][0]/2.0+umin[d]
for i in range(1,nintervals):
interval_widths[d][i] = ndu_per_interval[d][i]/odu[d]
interval_centers[d][i] = interval_centers[d][i-1] \
+.5*(interval_widths[d][i]+interval_widths[d][i-1])
#end for
#end for
du,uc,ubc,rc = zeros((DIM,)),zeros((DIM,)),zeros((DIM,)),zeros((DIM,))
vol = -1e99
vol_tot=0.0
vscale = abs(det(axes))
for i in range(dimensions[0]):
for j in range(dimensions[1]):
for k in range(dimensions[2]):
idomain = dm[0]*i + dm[1]*j + dm[2]*k
du[0] = interval_widths[0][i]
du[1] = interval_widths[1][j]
du[2] = interval_widths[2][k]
uc[0] = interval_centers[0][i]
uc[1] = interval_centers[1][j]
uc[2] = interval_centers[2][k]
if(coordinate==SpaceGridBase.cartesian):
vol=du[0]*du[1]*du[2]
ubc=uc
elif(coordinate==SpaceGridBase.cylindrical):
uc[1]=2.0*pi*uc[1]-pi
du[1]=2.0*pi*du[1]
vol=uc[0]*du[0]*du[1]*du[2]
ubc[0]=uc[0]*cos(uc[1])
ubc[1]=uc[0]*sin(uc[1])
ubc[2]=uc[2]
elif(coordinate==SpaceGridBase.spherical):
uc[1]=2.0*pi*uc[1]-pi
du[1]=2.0*pi*du[1]
uc[2]= pi*uc[2]
du[2]= pi*du[2]
vol=(uc[0]*uc[0]+du[0]*du[0]/12.0)*du[0] \
*du[1] \
*2.0*sin(uc[2])*sin(.5*du[2])
ubc[0]=uc[0]*sin(uc[2])*cos(uc[1])
ubc[1]=uc[0]*sin(uc[2])*sin(uc[1])
ubc[2]=uc[0]*cos(uc[2])
#end if
vol*=vscale
vol_tot+=vol
rc = dot(axes,ubc) + origin
domain_volumes[idomain] = vol
for d in range(DIM):
domain_uwidths[idomain,d] = du[d]
domain_centers[idomain,d] = rc[d]
#end for
#end for
#end for
#end for
#find the actual volume of the grid
du = umax-umin
uc = .5*(umax+umin)
if coordinate==SpaceGridBase.cartesian:
vol=du[0]*du[1]*du[2]
elif coordinate==SpaceGridBase.cylindrical:
uc[1]=2.0*pi*uc[1]-pi
du[1]=2.0*pi*du[1]
vol=uc[0]*du[0]*du[1]*du[2]
elif coordinate==SpaceGridBase.spherical:
uc[1]=2.0*pi*uc[1]-pi
du[1]=2.0*pi*du[1]
uc[2]= pi*uc[2]
du[2]= pi*du[2]
vol=(uc[0]*uc[0]+du[0]*du[0]/12.0)*du[0]*du[1]*2.0*sin(uc[2])*sin(.5*du[2])
#end if
volume = vol*abs(det(axes))
for q in SpaceGridBase.quantities:
self[q].mean = zeros((ndomains,))
self[q].error = zeros((ndomains,))
#end for
#save the results
self.axinv = axinv
self.volume = volume
self.gmap = gmap
self.umin = umin
self.umax = umax
self.odu = odu
self.dm = dm
self.dimensions = dimensions
self.ndomains = ndomains
self.domain_volumes = domain_volumes
self.domain_centers = domain_centers
self.domain_uwidths = domain_uwidths
#succeeded = succeeded and check_grid()
if(self.init_exit_fail and not succeeded):
self.error(" in def initialize")
#end
return succeeded
#end def initialize
def point2unit_cartesian(point):
u = dot(self.axinv,(point-self.origin))
return u
#end def point2unit_cartesian
def point2unit_cylindrical(point):
ub = dot(self.axinv,(point-self.origin))
u=zeros((self.DIM,))
u[0] = sqrt(ub[0]*ub[0]+ub[1]*ub[1])
u[1] = atan2(ub[1],ub[0])*o2pi+.5
u[2] = ub[2]
return u
#end def point2unit_cylindrical
def point2unit_spherical(point):
ub = dot(self.axinv,(point-self.origin))
u=zeros((self.DIM,))
u[0] = sqrt(ub[0]*ub[0]+ub[1]*ub[1]+ub[2]*ub[2])
u[1] = atan2(ub[1],ub[0])*o2pi+.5
u[2] = acos(ub[2]/u[0])*o2pi*2.0
return u
#end def point2unit_spherical
def points2domains_cartesian(self,points,domains,points_outside):
u = zeros((self.DIM,))
iu = zeros((self.DIM,),dtype=int)
ndomains=-1
npoints,ndim = points.shape
for p in xrange(npoints):
u = dot(self.axinv,(points[p]-self.origin))
if (u>self.umin).all() and (u<self.umax).all():
points_outside[p]=False
iu=floor( (u-self.umin)*self.odu )
iu[0] = self.gmap[0][iu[0]]
iu[1] = self.gmap[1][iu[1]]
iu[2] = self.gmap[2][iu[2]]
ndomains+=1
domains[ndomains,0] = p
domains[ndomains,1] = dot(self.dm,iu)
#end
#end
ndomains+=1
return ndomains
#end def points2domains_cartesian
def points2domains_cylindrical(self,points,domains,points_outside):
u = zeros((self.DIM,))
iu = zeros((self.DIM,),dtype=int)
ndomains=-1
npoints,ndim = points.shape
for p in xrange(npoints):
ub = dot(self.axinv,(points[p]-self.origin))
u[0] = sqrt(ub[0]*ub[0]+ub[1]*ub[1])
u[1] = atan2(ub[1],ub[0])*o2pi+.5
u[2] = ub[2]
if (u>self.umin).all() and (u<self.umax).all():
points_outside[p]=False
iu=floor( (u-self.umin)*self.odu )
iu[0] = self.gmap[0][iu[0]]
iu[1] = self.gmap[1][iu[1]]
iu[2] = self.gmap[2][iu[2]]
ndomains+=1
domains[ndomains,0] = p
domains[ndomains,1] = dot(self.dm,iu)
#end
#end
ndomains+=1
return ndomains
#end def points2domains_cylindrical
def points2domains_spherical(self,points,domains,points_outside):
u = zeros((self.DIM,))
iu = zeros((self.DIM,),dtype=int)
ndomains=-1
npoints,ndim = points.shape
for p in xrange(npoints):
ub = dot(self.axinv,(points[p]-self.origin))
u[0] = sqrt(ub[0]*ub[0]+ub[1]*ub[1]+ub[2]*ub[2])
u[1] = atan2(ub[1],ub[0])*o2pi+.5
u[2] = acos(ub[2]/u[0])*o2pi*2.0
if (u>self.umin).all() and (u<self.umax).all():
points_outside[p]=False
iu=floor( (u-self.umin)*self.odu )
iu[0] = self.gmap[0][iu[0]]
iu[1] = self.gmap[1][iu[1]]
iu[2] = self.gmap[2][iu[2]]
ndomains+=1
domains[ndomains,0] = p
domains[ndomains,1] = dot(self.dm,iu)
#end
#end
ndomains+=1
return ndomains
#end def points2domains_spherical
def shift_origin(self,shift):
self.origin += shift
for i in range(self.domain_centers.shape[0]):
self.domain_centers[i,:] += shift
#end for
return
#end def shift_origin
def set_origin(self,origin):
self.shift_origin(origin-self.origin)
return
#end def set_origin
def interpolate_across(self,quantities,spacegrids,outside,integration=False,warn=False):
#if the grid is to be used for integration confirm that domains
# of this spacegrid subdivide source spacegrid domains
if integration:
#setup checking variables
am_cartesian = self.coordinate==Spacegrid.cartesian
am_cylindrical = self.coordinate==Spacegrid.cylindrical
am_spherical = self.coordinate==Spacegrid.spherical
fine_interval_centers = [None,None,None]
fine_interval_domains = [None,None,None]
for d in range(self.DIM):
ndu = round( (self.umax[d]-self.umin[d])*self.odu[d] )
if len(self.gmap[d])!=ndu:
self.error('ndu is different than len(gmap)')
#end if
du = 1./self.odu[d]
fine_interval_centers[d] = self.umin + .5*du + du*array(range(ndu))
find_interval_domains[d] = zeros((ndu,))
#end for
#checks are done on each source spacegrid to determine interpolation compatibility
for s in spacegrids:
# all the spacegrids must have coordinate system to satisfy this
if s.coordinate!=self.coordinate:
if warn:
self.warn('SpaceGrids must have same coordinate for interpolation')
#end if
return False
#end if
# each spacegrids' axes must be int mult of this spacegrid's axes
# (this ensures that isosurface shapes conform)
tile = dot(self.axinv,s.axes)
for d in range(self.DIM):
if not is_integer(tile[d,d]):
if warn:
self.warn("source axes must be multiples of interpolant's axes")
#end if
return False
#end if
#end for
# origin must be at r=0 for cylindrical or spherical
uo = self.point2unit(s.origin)
if am_cylindrical or am_spherical:
if uo[0]>1e-6:
if warn:
self.warn('source origin must lie at interpolant r=0')
#end if
return False
#end if
#end if
# fine meshes must align
# origin must be an integer multiple of smallest dom width
if am_cylindrical:
mdims=[2]
elif am_cartesian:
mdims=[0,1,2]
else:
mdims=[]
#end if
for d in mdims:
if not is_integer(uo[d]*self.odu[d]):
if warn:
self.warn('source origin does not lie on interpolant fine mesh')
#end if
return False
#end if
#end for
# smallest dom width must be multiple of this smallest dom width
for d in range(self.DIM):
if not is_integer(self.odu[d]/s.odu[d]):
if warn:
self.warn('smallest source domain width must be a multiple of interpolants smallest domain width')
#end if
return False
#end if
#end for
# each interval along each direction for interpolant must map to only one source interval
# construct points at each fine interval center of interpolant, run them through source gmap to get interval indices
for d in range(self.DIM):
fine_interval_domains[d][:]=-2
gmlen = len(s.gmap[d])
for i in range(len(fine_interval_centers[d])):
uc = fine_interval_centers[d][i]
ind = floor((uc-s.umin[d])*s.odu[d])
if ind < gmlen:
idom=s.gmap[d][ind]
else:
idom=-1
#end if
fine_interval_domains[d][i]=idom
#end for
cind = self.gmap[d][0]
istart = 0
iend = 0
for i in range(len(self.gmap[d])):
if self.gmap[d][i]==cind:
iend+=1
else:
source_ind = fine_interval_domains[istart]
for j in range(istart+1,iend):
if fine_interval_domains[j]!=source_ind:
if warn:
self.warn('an interpolant domain must not fall on multiple source domains')
#end if
return False
#end if
#end for
istart=iend
#end if
#end for
#end for
#end for
#end if
#get the list of domains points from this grid fall in
# and interpolate requested quantities on them
domain_centers = self.domain_centers
domind = zeros((self.ndomains,2),dtype=int)
domout = ones((self.ndomains,) ,dtype=int)
for s in spacegrids:
domind[:,:] = -1
ndomin = s.points2domains(domain_centers,domind,domout)
for q in quantities:
self[q].mean[domind[0:ndomin,0]] = s[q].mean[domind[0:ndomin,1]].copy()
self[q].error[domind[0:ndomin,0]] = s[q].error[domind[0:ndomin,1]].copy()
#end for
#end for
for d in xrange(self.ndomains):
if domout[d]:
for q in quantities:
self[q].mean[d] = outside[q].mean
self[q].error[d] = outside[q].error
#end for
#end if
#end for
return True
#end def interpolate_across
def interpolate(self,points,quantities=None):
if quantities==None:
quantities=SpaceGridBase.quantities
#end if
npoints,ndim = points.shape
ind = empty((npoints,2),dtype=int)
out = ones((npoints,) ,dtype=int)
nin = self.points2domains(points,ind,out)
result = QAobject()
for q in quantities:
result._add_attribute(q,QAobject())
result[q].mean = zeros((npoints,))
result[q].error = zeros((npoints,))
result[q].mean[ind[0:nin,0]] = self[q].mean[ind[0:nin,1]].copy()
result[q].error[ind[0:nin,0]] = self[q].error[ind[0:nin,1]].copy()
#end for
return result
#end def interpolate
def isosurface(self,quantity,contours=5,origin=None):
if quantity not in SpaceGridBase.quantities:
self.error()
#end if
dimensions = self.dimensions
if origin==None:
points = self.domain_centers
else:
npoints,ndim = self.domain_centers.shape
points = empty((npoints,ndim))
for i in range(npoints):
points[i,:] = origin + self.domain_centers[i,:]
#end for
#end if
scalars = self[quantity].mean
name = quantity
self.plotter.isosurface(points,scalars,contours,dimensions,name)
return
#end def isosurface
def surface_slice(self,quantity,x,y,z,options=None):
if quantity not in SpaceGridBase.quantities:
self.error()
#end if
points = empty( (x.size,self.DIM) )
points[:,0] = x.ravel()
points[:,1] = y.ravel()
points[:,2] = z.ravel()
val = self.interpolate(points,[quantity])
scalars = val[quantity].mean
scalars.shape = x.shape
self.plotter.surface_slice(x,y,z,scalars,options)
return
#end def surface_slice
def plot_axes(self,color=None,radius=.025,origin=None):
if color is None:
color = (0.,0,0)
#end if
if origin is None:
origin = array([0.,0,0])
#end if
colors=array([[1.,0,0],[0,1.,0],[0,0,1.]])
for d in range(self.DIM):
a=self.axes[:,d]+origin
ax=array([-a[0],a[0]])
ay=array([-a[1],a[1]])
az=array([-a[2],a[2]])
self.plotter.plot3d(ax,ay,az,tube_radius=radius,color=tuple(colors[:,d]))
#end for
return
#end def plot_axes
def plot_box(self,color=None,radius=.025,origin=None):
if color is None:
color = (0.,0,0)
#end if
if origin is None:
origin = array([0.,0,0])
#end if
p = self.points
p1=p.cmmm+origin
p2=p.cmpm+origin
p3=p.cpmm+origin
p4=p.cppm+origin
p5=p.cmmp+origin
p6=p.cmpp+origin
p7=p.cpmp+origin
p8=p.cppp+origin
bline = array([p1,p2,p4,p3,p1,p5,p6,p8,p7,p5,p7,p3,p4,p8,p6,p2])
self.plotter.plot3d(bline[:,0],bline[:,1],bline[:,2],color=color)
return
#end def plot_box
#end class RectilinearGrid
class VoronoiGridInitializer(SpaceGridInitializer):
def __init__(self):
SpaceGridInitializer.__init__(self)
#end def __init__
#end class VoronoiGridInitializer
class VoronoiGrid(SpaceGridBase):
def __init__(self,initobj=None,options=None):
SpaceGridBase.__init__(self,initobj,options)
return
#end def __init__
def copy(self,other):
return VoronoiGrid(other)
#end def copy
def reorder_atomic_data(self,imap):
for q in self.quantities:
qv = self[q]
qv.mean = qv.mean[...,imap]
qv.error = qv.error[...,imap]
#end for
if 'data' in self:
data = self.data
for q in self.quantities:
data[q] = data[q][...,imap,:]
#end for
#end if
#end def reorder_atomic_data
#end class VoronoiGrid
def SpaceGrid(init,opts=None):
SpaceGrid.count+=1
iname = init.__class__.__name__
if iname=='HDFgroup':
coordinate = init.coordinate[0]
#end if
coord = SpaceGrid.coord_n2s[coordinate]
if coord in SpaceGrid.rect:
return RectilinearGrid(init,opts)
elif coord=='voronoi':
return VoronoiGrid(init,opts)
else:
print 'SpaceGrid '+coord+' has not been implemented, exiting...'
exit()
#end if
#end def SpaceGrid
SpaceGrid.count = 0
SpaceGrid.coord_n2s = SpaceGridBase.coord_n2s
SpaceGrid.rect = set(['cartesian','cylindrical','spherical'])
| habanero-rice/hcpp | test/performance-regression/full-apps/qmcpack/nexus/library/spacegrid.py | Python | bsd-3-clause | 45,391 | [
"QMCPACK"
] | 0696880867ab09ba75ea54697525e9161fd2bca003d1f3e590cedff7bad0014c |
#! /usr/bin/env python3
#
# SCANIT - Control A spectrometer and collect data
#
# LICENSE:
# This work is licensed under the Creative Commons Zero License
# Creative Commons CC0.
# To view a copy of this license, visit
# http://directory.fsf.org/wiki/License:CC0
# or send a letter to:
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
# Author: James Luscher, jluscher@gmail.com
#
import sys, string, time
import serial
#
from pathlib import Path
#
from tkinter import *
from tkinter import font
from tkinter import filedialog
from tkinter.ttk import Progressbar
# from tkinter import ttk
# from tkinter.scrolledtext import *
import tkinter.messagebox as mBox
# import tkinter.simpledialog as simpledialog
import matplotlib
from matplotlib.widgets import Cursor
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy
from numpy import searchsorted
siTitle = 'SCANIT for RetroSPEX [v032]' # Program name and version
TANBG = '#F8E2CD' # Background color
WARNC = '#FFBBFF' # Warning color (pinkish)
ACTIVB = '#F07748' # activebackground color for buttons
jjltest = True # print messages, testing
comtest = False # print communication diagnostic details (much!)
## code based on example from: http://robotic-controls.com/learn/python-guis/tkinter-serial
# modified for Python3
#
# Serial() argument added: rtscts=1
#
## NOTE: PATCH @ http://sourceforge.net/p/pyserial/patches/37/
# /usr/local/lib/python3.4/dist-packages/serial/serialposix.py
# at (about) line # 480:
# except select.error as e:
# # ignore EAGAIN errors. all other errors are shown
# * # see also http://www.python.org/dev/peps/pep-3151/#select
# * # patch: James Luscher (re:
# * # http://sourceforge.net/p/pyserial/patches/37/ )
# * #if e[0] != errno.EAGAIN:
# * if e.errno != errno.EAGAIN:
# raise SerialException('read failed: %s' % (e,))
#
# communication commands
COMmands = '''
Command => Response Command sent => Response received
----------------------- LOW LEVEL -- FPGA --------------
? => <Help text> Help (display SPEX commands)
c => <header> Clear Screen
i => i Warm Initialize
f => f Reset FPGA
r AA => r AA DD Read DD from address AA (hex)
w AA DD => w AA DD Write data DD to address AA (or V/V??)
s => s AA DD Show AdrREG and DataReg (AA DD ??)
p => p FF Report PMT Control Register setting
b => B n Report Button State, 0/1 (Off/On)
v n => v n Verbose n=0/1 (Off/On)
----------------------- HIGH LEVEL -- SPECTROMETER -----
L n => L n Set LED n=0/1 (Off/On)
D n FFFF => D n FFFF Load DAC #n with FFFF (hex)
A n => A n FFFF Report High Voltage on #n
E n => E n Enable PMT counter #n (0~7), Clears count
T 7FFFFFFF => T 7FFFFFFF Set Integration time, milliseconds
> => Wait For Bang Start Measurement ('!' signals Done)
P n => P n FFFEFFFEFFFF Dump PMT counter #n (0~2)
X s7FFFFFFF => X s7FFFFFFF Move eXcitation, s=+/- (direction), 7FFFFFFF (steps)
M s7FFFFFFF => M s7FFFFFFF Move eMission, s=+/- (direction), 7FFFFFFF (steps)
----------------------- CONTROLLER INITIATED ALERTS ----
=> ! FF Limit reached [bits?] Motion (done?) time (done?)
=> # n Button activity (reports state? 0/1 (Off/On))
'''
COMchr0 = list('?cifrwspbvLDAET>PXM')
RSPalert = ['!','#']
RSPnorm = ['?','r','s','p','B','A','P']
#make our own buffers
serialPort = None # we always start before any port is found
portName = 'OFFLINE' # ... and any connection established
serOutReady = False # RetroSPEX has been Initialized
#
serInBuffer = "" # 'string' type (character input storage)
serOutBuffer = "".encode() # 'byte' type
serInLines = [] # list of complete input lines
#=====================================================================
## SCANIT Window (GUI window for Spectrometer Control & Data Capture)
#
siWinW = 1260 # width
siWinH = 760 # height
#
siWin = Tk()
siWin.title(siTitle)
siWin['bg'] = TANBG # background color
if jjltest:
siWin.geometry('+670+50') # shift to right for testing
transGeom = '+780+250' # ... for 'transient' screens
else:
siWin.geometry('{}x{}+0+0'.format(siWinW,siWinH))
transGeom = '+110+200' # ... for 'transient' screens
#siWin.geometry('{}x{}+80+50'.format(siWinW,siWinH))
#siWin.geometry('+50+50') # window in upper-left of screen
#
monoFont10 = font.Font(family='Ubuntu Mono', size=10)
monoFont12 = font.Font(family='Ubuntu Mono', size=12)
monoFont14 = font.Font(family='Ubuntu Mono', size=14)
monoFont16 = font.Font(family='Ubuntu Mono', size=16)
monoFont24 = font.Font(family='Ubuntu Mono', size=24)
#=====================================================================
## Global variables (for Spectrometer Control & Data Capture)
#
#==============
# settings: configuration data (from 'settings.txt')
#
#
#
#
# Transient (settable but not saved/restored)
offLine = True # No Spectrometer connection made (serial - USB)
#
# User Default Settings to be used for Measurement
# (settable and saved/restored)
varEXinc = StringVar() # Setting EX Inc Wavelength (nm)
varEMinc = StringVar() # Setting EM Inc Wavelength (nm)
varTMinc = StringVar() # Setting TM Inc time (s)
varEXslit = StringVar() # Slit size EX (nm)
varEXslit = StringVar() # Slit size EM (nm)
varEMhv = StringVar() # EM PMT high voltage (v)
varREFhv = StringVar() # REF PMT high voltage (v)
varREFdiode = StringVar() # REF DIODE Gain setting [0,1,2,3]
#
#==============
# scan data acquired
#
scanDataX = [] # X value sample was taken at (wavelength / time)
scanDataY = [] # Y value of sample - PMT counts
#
ax = None # forward referenc for Plot Object (setPlotTitle())
#
#==============
# background: input data from previous scan (for reference)
#
inputFileHdr = [] # Header section from fileLoad
inputFileData = [] # Data section from fileload
#
backgroundDataX = [] # X value sample was taken at (wavelength / time)
backgroundDataY = [] # Y value of sample - PMT counts
#
#==============
# dayfile: data about the experiments being done today
#
dayFileData = [] # Data section from fileload / or for writing
#
varDayDate = StringVar() # Date this data was entered
varDayMeaning1 = StringVar() # Meaning of Experiment
varDayMeaning2 = StringVar() # Meaning of Experiment
varDayMeaning3 = StringVar() # Meaning of Experiment
varDayEXslit = StringVar() # Excitation slit wavelength nm
varDayEMslit = StringVar() # Emission slit Wavelength nm
varDayBulb = StringVar() # Bulb Intensity
varDayNotebook = StringVar() # Notebook Page
varDayOther1 = StringVar() # Other comments
varDayOther2 = StringVar() # Other comments
varDayOther3 = StringVar() # Other comments
#
#==============
# type of scan
EXscan = 0
EMscan = 1
TMscan = 2
varScanMode = IntVar() # Determines type of scan taken
varRTDsignal = StringVar() # Real Time Data
varRTDreference = StringVar() # Real Time Data
#
# settings used for scanned data waveforms
#
varEXwaveStart = StringVar() # Excitation Start Wavelength nm
varEXwaveEnd = StringVar() # Excitation End Wavelength nm
varEXwaveInc = StringVar() # Excitation Inc Wavelength nm
#
varEMwaveStart = StringVar() # Emission Start Wavelength nm
varEMwaveEnd = StringVar() # Emission End Wavelength nm
varEMwaveInc = StringVar() # Emission Inc Wavelength nm
#
varTMwavePause = StringVar() # Pause (s)
varTMwaveEnd = StringVar() # End (s)
#
varEXslit = StringVar() # Inc time (s)
varEMslit = StringVar() # Inc time (s)
#
varSpecimenDetails = StringVar() # Description of sample
#
varEXstepsNm = StringVar() # EX StepMotor steps per (nm)
varEMstepsNm = StringVar() # EM StepMotor steps per (nm)
#
varEXposition = StringVar() # EX monochrometer position (nm)
varEMposition = StringVar() # EM monochrometer position (nm)
#
varPCTdone = IntVar() # % completion of scan
varPCTdone.set(45) # testing: software completion % ;-)
#
MINnm = 200 # Minimum nanoMeters for monochrometer position
MAXnm = 1000 # Maximum nanoMeters for monochrometer position
#
#
# system and communication constants:
#
thisSys = sys.platform # 'linux','win32,'cygwin','darwin'
firmwareVer = '' # RetroSPEX_Rev-8 (for example)
print('This System: {}'.format(thisSys))
#
if thisSys == 'linux':
portList = ['/dev/ttyACM0','/dev/ttyACM1', '/dev/ttyACM2', 'OFFLINE']
elif thisSys == 'win32':
portList = list('COM'+str(x) for x in range(99,0,-1)).append('OFFLINE')
# ports = ['COM99', 'COM98', ... 'COM2', 'COM1', 'COM0', 'OFFLINE']
else:
msg_ = 'ERROR','Operating System not recognized: {}'
messagebox.showifo(msg_.format(thisSys))
#
EOL = '\n'
OKser = 1 # serial character sent OK. (number sent > 0 Ubuntu)
if thisSys == 'linux':
#monoFont = font.Font(family='Ubuntu Mono', size=10)
monoFont = font.Font(family='Ubuntu Mono', size=16)
elif thisSys == 'win32':
monoFont = font.Font(family='Miriam Fixed', size=10)
EOL = '\r\n'
OKser = 0 # serial character sent OK (no error).
else:
messagebox.showifo('ERROR','Operating System not recognized: {}'.format(thisSys))
#
#
def setScanMode_FR(mode): # Forward Reference for setting Scan Mode
if jjltest:
print('CALLED: setScanMode_FR(mode) => pass')
pass
#
def setScanMode(mode):
if jjltest:
print('CALLED: setScanMode(mode) => setScanMode_FR(mode)')
setScanMode_FR(mode)
def updatePlot_FR(): # Forward Reference FUNCTION NAME ... for updating Plot
pass
#
def updatePlot(event=None): # Call the function defined later on...
updatePlot_FR() # ... maps old references to the new routine
#===================
## Utility functions
#
# Set and Read High Voltage Power Supply
#
# D 1 FFFF -> 1000 v (neg) Emission PMT
# 0 ~ 65535 -> 1000 v : 65.535 / volt
#
# HV 1:
# SET: [ 900 to E666] :: E666/FFFF -> 0.90000 (58982/65535)*1000 = 900.00
# READ: [BCD4 to 900] :: BCD4/D1B6 -> 0.90042 (48340/53686)*1000 = 900.42
#
# 2.048/2.5 = 0.8192 ** ratio of DAC/ADC reference voltages
# 65.535 * 0.8192 = 53.686 ** ADC conversion divisor (53686) / D1B6
#
#
# HV 1:
# VOLTStoHEX = hex(int(volts * 65.535))[2:]
# 900.0 * 65.535 => hex( int( 58982 ))[2:] = 'e666'
# HEXtoVOLTS = int(setHV1str,16) /( 65.535 * 0.8192 )
# (BCD4) 48340 / 53.686 => 900.42
#
#----
def VOLTStoHEX(volts):
""" DAC: 1000.0 volts full scale (FFFF).
(for setting DAC output)
VOLTStoHEX(1000.0) => 'FFFF'
VOLTStoHEX( 900.0) => 'E665' """
return hex(int(volts * 65.535))[2:].upper()
#
#----
def HEXtoVOLTS(ADChexStr):
"""ADC: 1000.0 volts full scale (D1B6).
(for scaling ADC input)
HEXtoVOLTS('D1B6') => 1000
HEXtoVOLTS('BCD4') => 900 """
return int(int(ADChexStr,16) / 53.686 + 0.5)
#
#----
def updateTitle():
'''Display com port, spectrometer firmware and System information
on window title bar.'''
msgSys = ', System: {}'.format(thisSys)
msgPort = ', Port: {}'.format(portName)
msgSPEX = ', Firmware: {}'.format(firmwareVer)
siWin.wm_title( siTitle + msgSys + msgPort + msgSPEX)
return
def digitsOnly(text):
s = ''
for c in text:
if c in string.digits:
s = s + c
if s.strip() == '':
s = '0'
return str( int(s) ) # no leading zeros
def floatOnly(text):
'''get StringVar's value as float().'''
point = False
s = ''
r = ''
for c in text:
if point == False: # no decimal yet
if c in string.digits:
s = s + c
elif c == '.':
point = True
else:
if c in string.digits:
r = r + c
# supress leading zeros
s = s.lstrip('0')
# but keep at least one zero(!)
if len(s) == 0:
s = '0'
# resolution limited to mS
if len(r) > 3:
r = r[0:3]
s = s+ '.' +r
return s
def getVarInt(v):
'''get StringVar's value as int().'''
s = v.get()
if s.strip() == '':
return 0
return int(s)
def getVarFloat(v):
'''get StrinvVar's float value.'''
s = v.get()
#print('getVarFloat(v): s={}, v={}'.format(s,v))
if s.strip() == '':
return 0.0
return float(s)
def setFocus(obj):
obj.focus_set()
return
def toggleBtnVar(var, btn, iconOff, iconOn):
'''Toggle boolean state of Button and set matching icon.'''
if var:
var = False
btn['image'] = iconOff
else:
var = True
btn['image'] = iconOn
return var
def getDateToday():
t = time.localtime()
return '{}-{:02d}-{:02d}'.format(t[0],t[1],t[2])
def timeNow():
t = time.localtime()
return '{}-{:02d}-{:02d};{:02d}:{:02d}'.format(t[0],t[1],t[2],t[3],t[4])
#
def setPlotTitle():
'''Plot Title is 2 lines:
Top; filename where scan data was saved (post acquisition).
Second; filename where Reference data was loaded from. (background).'''
return
def readSerial():
global serInBuffer, serOutReady, serInLines, serOutBuffer, firmwareVer
full = True
while full:
# try:
# c = serialPort.read() # attempt to read a 'byte' from Serial
# if c != b'':
# print('readSerial(), RECV: {}'.format(c))
# else:
# full = False
# except:
# siWin.after(20, readSerial) # check serial again soon
# return
c = serialPort.read() # attempt to read a 'byte' from Serial
#print('RECV: {}'.format(c))
c = c.decode('utf-8') # CONVERT 'bytes' to 'string' type
#was anything read and decoded?
if len(c) == 0:
if comtest:
print('^',end='')
full = False
continue
# else: # print each received character
# print('RECV: {}'.format(c))
# check if character is a delimeter
if c == '\r':
continue # don't want returns. ignore it
# synch up first time
if serOutReady == False and c == '\n':
#
# test for RetroSPEX initialized
#
if serInBuffer.startswith('RetroSPEX'):
if jjltest:
print('\nRetroSPEX DETECTED')
firmwareVer = serInBuffer[:]
updateTitle() # Title includes RetroSPEX Rev-#
serInBuffer = ''
serInLines = []
serOutReady = True # RetroSPEX is ready !
serOutBuffer = b'*' # send a response byte
xmitSerial() # acknowledge synch to RetroSPEX
continue
if c == '\n':
if serInBuffer.startswith('Retro'):
firmwareVer = serInBuffer[:] # RetroSPEX firmware version
updateTitle()
else:
# the buffer contains an entire line - less the 'newline'
# proccess the line now.
if jjltest:
print('LINE: {}'.format(serInBuffer))
serInLines.append(serInBuffer) # add to list of input lines
serInBuffer = '' # empty the buffer
else:
serInBuffer += c # add to the buffer
siWin.after(20, readSerial) # check serial again soon
return
def xmitSerial():
global serOutBuffer,serialPort,serOutReady
if comtest:
print('.',end='')
while serOutReady and len(serOutBuffer) > 0: # Anything to send out?
# send byte
c = serOutBuffer[0:1]
nSent = serialPort.write( c )
# print('nSent: {}'.format(nSent))
if nSent != OKser: # show transmit error
if comtest:
patrn = 'xmitSerial({}),ERROR: status => {}, retry.'
print(patrn.format(c,repr(nSent)))
else: # GOOD send
if comtest:
patrn = 'xmitSerial({}), SENT: status => {}.'
print(patrn.format(c,repr(nSent)))
# remove sent character from the buffer
serOutBuffer = serOutBuffer[1:]
siWin.after(2, xmitSerial) # check serial out until empty
siWin.after(20, xmitSerial) # check serial out until empty
return
def writeSerial(text):
global serOutBuffer
#
# Log Communications
# textBox.insert(END, text, 'user_cmd')
# textBox.see(END)
#
# convert 'string' characters to 'bytes' for output
serOutBuffer = serOutBuffer + text.encode() # add to transmit buffer
xmitSerial()
return
timeCmd = 0
def sendCommand(text,limit):
'''Send command 'text' to SPEX.
Wait 'limit' seconds for echo.'''
global timeCmd
print('sendCommand("{}")'.format(text))
timeCmd = int(time.time()) # note the time
writeSerial(text+EOL)
checkSerialIn(text,limit) # wait 'limit' seconds for echo
return
#
def writePositions():
'''Write monochrometer positions to "positions.txt" file.'''
#
global varEXposition,varEMposition
#
data = 'EX: ' + varEXposition.get() + ' EM: ' + varEMposition.get() + '\n'
fo = open('positions.txt','w')
fo.write(data)
fo.close()
return
def notImplemented(msg):
mBox.showwarning('NOTICE: Not Implemented',msg)
return
def readPositions():
'''Recover monochrometer positions from "positions.txt" file.'''
#
global varEXposition,varEMposition
#
try: # one line file: "EX: nnnn EM: mmmm"
tmpFile = open('positions.txt').read().splitlines()
for s in tmpFile:
t = s.split()
if len(t) == 4 and t[0] == 'EX:' and t[2] == 'EM:':
varEXposition.set(t[1])
varEMposition.set(t[3])
tmpFile = None
except:
varEXposition.set('0')
varEMposition.set('0')
writePositions()
return
def dataFileREAD():
'''Read Data file, seperate into lists of header and data.'''
#
global inputFileHdr # Header section from fileLoad
global inputFileData # Data section from fileload
#
inputFileHdr = []
inputFileData = []
#
dataFileTypes = [("Data ", ("*.txt","*.TXT")), ]
dataDir = '~/SCANS'
fInp = filedialog.askopenfilename(filetypes = dataFileTypes
,initialdir=dataDir)
#
tmpFile = open(fInp).read().splitlines()
#
header = True # looking for header lines first
#
for line in tmpFile: # examine each line in list
if header:
if line.startswith('...'): # end of Header line mark
header = False
else:
inputFileHdr.append(line.strip()) # save Header lines
else:
if line.startswith('___'): # end of Data line mark
break
else:
inputFileData.append(line.strip()) # save data lines
tmpFile = None # discard temp file data now
return
#
#
def checkSerialIn(response,limit):
'''Verify the expected serial response happened within limit Sec.'''
global timeCmd
for t in range(limit):
print('checkSerialIn("{}")'.format(int(time.time())-timeCmd))
time.sleep(1) # one second
if len(serInLines) == 0:
continue
else:
print('checkSerialIn({},{})'.format(response,limit))
line = serInLines[0].strip() # get next line
if line == response:
serInLines.pop(0) # remove it
print('checkSerialIn(-): OK; got({})'.format(response))
return True
else:
print('checkSerialIn(-): BAD response: ({})'.format(line))
return False
print('checkSerialIn(-): timeout.')
return False # timeout
## Serial - ensure non-blocking
# -- NOTE: rtscts=1 hardware handshake for 'ready' with data
#
# establish serial module
# baudrate for RetroSPEX is 115200 (ArduinoDue max)
#
# look for serial port
def portTry(name):
'''Test 'name' to see if it is an available serial port.'''
global serialPort
try:
serialPort = serial.Serial(port=name
,baudrate=115200
,timeout=0
,rtscts=1
,writeTimeout=0)
#serialPort.open() just opened !
serialStatus = serialPort.isOpen()
print('portTry({}): serialPort.isOpen = {}'.format(name,serialStatus))
return serialStatus
except:
print('portTry(name): NOT portName: {}'.format(name))
return False
def portScan():
'''Search for serial USB port for Spectrometer, or "OFFLINE".'''
global serialPort, portList, portName, serInLines
#
for portName in portList :
if portName != 'OFFLINE':
if portTry(portName): # a serialPort found
readSerial() # start monitoring serial input
if comtest:
print('FOUND serialPort={}'.format(serialPort))
print('FOUND portName: {}'.format(portName))
updateTitle() # new "Port" name
return
# # send LED off
# sendCommand('L 0',2)
# # # check response: echo in 2 sec
# # retro = checkSerialIn('L 0',20)
# # # send LED on
# sendCommand('L 1',2)
# # # check response: echo in 200 ms
# # retro = checkSerialIn('L 1',20)
# #
else: # end of list 'OFFLINE' reached, no port found
print('Serial Port not found... Operating OFFLINE')
# show ports tried
if comtest:
for name in portList[0:-1]:
print('tried: {}'.format(name))
#
# operating OFFLINE
updateTitle() # new "Port" or "OFFLINE"
return
title = siTitle + ', Port: {} on System: {}'
siWin.wm_title( title.format(portName, thisSys))
return
#
## 'sanityCheck' functions
#
# COMPARISONS:
# Within [min <= var <= max]
# Order [var1 < var2]
# Min [min <= var]
#
# Button lookup dictionary - defined as buttons are created below
btnLookup = {} # entries of form: 'EXS':<EX-Start_button>
# 'EXE':<EX-END_button>
# test variable min max EntryType
chkEntVal =[ [['Within', varEXwaveStart, MINnm, MAXnm] , 'EXS' ]
, [['Within', varEXwaveEnd, MINnm, MAXnm] , 'EXE' ]
, [['Order' , varEXwaveStart, varEXwaveEnd] , 'EXE' ]
, [['Min' , varEXinc, 1] , 'EXI' ]
, [['Within', varEMwaveStart, MINnm, MAXnm] , 'EMS' ]
, [['Within', varEMwaveEnd, MINnm, MAXnm] , 'EME' ]
, [['Order' , varEMwaveStart, varEMwaveEnd] , 'EME' ]
, [['Min' , varEMinc, 1] , 'EMI' ]
, [['Min' , varTMwaveEnd, 0.100] , 'TME' ]
, [['Order' , varTMwavePause, varTMwaveEnd] , 'TME' ]
, [['Min' , varTMinc, 0.001] , 'TMI' ]
]
#
def scanSanityCheck(warn = False):
'''Check that measurement parameters have "sane" values.
If not color Entry field WARNC color.
If "warn" argument is True also generate popup message.'''
#
isSane = True # start assuming that no errors were found ;-)
#
for e in chkEntVal:
test,entryType = e # get test list and Entry-Type
#
# are any of these Entry objects 'DISABLED'?
# - don't check values for disabled Entry fields
if btnLookup[entryType]['state'] == DISABLED:
continue # try next test
#
if test[0] == 'Min': # is entry at least equal to the minimum
#print('sanity()"Min":{}; {}'.format(test,entryType))
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
if entryType[0] == 'T': # float value for Time settings
#print('sanity()"entryType": {}'.format(entryType))
var = getVarFloat(test[1])
#print('sanity() var: {}'.format(var))
else:
var = getVarInt(test[1])
#if entryType == 'TMI':
# print('TMI:.........var={} < min={}'.format(var,test[2]))
if var < test[2]: # BELOW minimum = Error
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
elif test[0] == 'Within': # entry not OUTSIDE limits
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
var = getVarInt(test[1])
#print('.........var={} < min={}'.format(var,test[2]))
limLow = test[2]
limHi = test[3]
#print('.........limLow={} < limHi={}'.format(limLow,limHi))
if var < limLow or var > limHi: # outside range
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
elif test[0] == 'Order': # entry 1 less-than entry 2
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
if entryType[0] == 'T': # float value
print('scanSanityCheck() #318... test[1]={}, '
'test[2]={}'.format(test[1],test[2]))
var1 = getVarFloat(test[1])
var2 = getVarFloat(test[2])
print('scanSanityCheck() #322... var1={}, var2={}'.format(
var1,var2))
else:
var1 = getVarInt(test[1])
var2 = getVarInt(test[2])
#print('.........var1={} < var2={}'.format(var1,var2))
if var1 >= var2: # improper order
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
#
# set the selected color for the Entry object
btnObj = btnLookup[entryType]
btnObj['bg'] = bgColor # set button color
return isSane
#
## 'legacy' data file input functions
def dataFileMGET():
'''Read Data file, seperate into header and data.
Parse header into measurement parameters.
Set the parameters for taking another scan.
'''
global inputFileHdr # Header section from fileLoad
#
dataFileREAD() # read in data file, prepare header list
#
# Parse Header information - "classic format"
# Emission only - for now
scanIs = None
for line in inputFileHdr:
if line.startswith('Emission Scan'):
scanIs = EMscan
break
#
if scanIs == EMscan: # restore measurement variables
#
setScanMode(EMscan)
#
# varEMwaveStart = StringVar() # Emission Start Wavelength nm
# varEMwaveEnd = StringVar() # Emission End Wavelength nm
# varEMwaveInc = StringVar() # Emission Inc Wavelength nm
# varTMinc = StringVar() # Time Inc time S
# varEXwaveStart = StringVar() # Excitation Start Wavelength nm
#
for line in inputFileHdr:
if line.startswith('Start '): # Start , End
s,e = line.split(',')
s = s.split(' ')[1] # "Start 5.000000e+002"
n = int( float( s))
varEMwaveStart.set( str(n))
#
e = e.strip()
e = e.split(' ')[1] # "End 7.000000e+002"
n = int( float( e))
varEMwaveEnd.set( str(n))
continue
if line.startswith('Increment '):
c,t = line.split(',')
c = c.split(' ')[1] # "Increment 1.000000e+000"
n = int( float( c))
varEMwaveInc.set( str(n))
#
t = t.strip()
t = t.split(' ')[2] # "Integration Time 1.000000e-001"
n = float( t)
varTMinc.set( str(n))
continue
if line.startswith('Excit Mono Slits:'):
continue
if line.startswith('Excit Mono'):
x = line.split(' ')[2] # "Excit Mono 4.880000e+002"
n = int( float( x))
varEXwaveStart.set( str(n))
else:
# if scanIs != EMscan: # Error
if jjltest:
print("Can't handle non-Emission Scan yet.")
sys.exit(0)
scanSanityCheck()
return
def dataFileLOAD():
'''Read Data file, seperate into header and data.
Parse header into measurement parameters.
Parse data into x,y values for plotting.
'''
global inputFileData # Data section from fileload
global backgroundDataX # X value sample was taken at (wavelength / time)
global backgroundDataY # Y value of sample - PMT counts
#
dataFileMGET() # Read data file, setup measurement parameters.
#
backgroundDataX = []
backgroundDataY = []
#
for line in inputFileData:
pos,val = line.split('\t')
backgroundDataX.append( int( float( pos )))
backgroundDataY.append( float( val ))
updatePlot()
#
## 'dayfile.txt' - functions for recording Experimental Plan
#
#
# 'dayfile.txt' format:
#
# DATE: 2015-01-29
# Meaning of Experiment:
# #m#... (text: additional lines of meaning)
# Slit Widths EX: 2 (integer in nm)
# Slit Widths EM: 2 (integer in nm)
# Bulb Intensity: ?? (integer in ??)
# Notebook page: ?? (text)
# Other comments:
# #c#... (text: additional lines of comments)
#
# dayFileData = [] # Data section from fileload
# #
# varDayDate = StringVar() # Date this data was entered
# varDayMeaning1 = StringVar() # Meaning of Experiment
# varDayMeaning2 = StringVar() # Meaning of Experiment
# varDayMeaning3 = StringVar() # Meaning of Experiment
# varEXslit = StringVar() # Excitation slit size nm
# varEMslit = StringVar() # Emission slit size nm
# varDayBulb = StringVar() # Measured Bulb Intensity
# varDayNotebook = StringVar() # Notebook Page for Experiment Data
# varDayOther1 = StringVar() # Other comments
# varDayOther2 = StringVar() # Other comments
# varDayOther3 = StringVar() # Other comments
#
def makeDayFile():
'''Create new GUI screen for entering Experimental Data.
This data is constant for each day and recorded with data scans.'''
#
if jjltest:
print('makeDayFile()')
#
varDayDate.set( getDateToday() )
#
froot = Toplevel()
froot.title('Edit Experiment Information for {}'.format(varDayDate.get()))
froot.geometry(transGeom)
#siWin.withdraw()
#
# ========
#
#-------
frootFrame = Frame(froot, bg = TANBG)
frootFrame.grid()
#-------
dayTopFrame = LabelFrame(frootFrame, bg = TANBG, borderwidth=4
,text=' Meaning of Experiment: '
, font=monoFont14)
dayTopFrame.grid(row = 0, padx=4, pady=4, sticky=NSEW)
#
#
#-------
varDayMeaning1.set('')
dayMeanEnt1 = Entry(dayTopFrame, textvariable=varDayMeaning1
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt1.grid(row=1, padx=4, pady=0, sticky=EW)
dayMeanEnt1.focus_set()
#-------
varDayMeaning2.set('')
dayMeanEnt2 = Entry(dayTopFrame, textvariable=varDayMeaning2
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt2.grid(row=2, padx=4, pady=0, sticky=EW)
dayMeanEnt1.bind("<Return>", lambda e: setFocus(dayMeanEnt2))
#-------
varDayMeaning3.set('')
dayMeanEnt3 = Entry(dayTopFrame, textvariable=varDayMeaning3
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt3.grid(row=3, padx=4, pady=0, sticky=EW)
dayMeanEnt2.bind("<Return>", lambda e: setFocus(dayMeanEnt3))
#
# ========
#
#-------
dayMidFrame = Frame(frootFrame, bg = TANBG, borderwidth=0)
dayMidFrame.grid(row = 1, sticky=NSEW)
#
# Slit Width EX:
#-------
daySlitExLab = Label(dayMidFrame, text='Slit Width EX:'
, font=monoFont14, bg = TANBG )
daySlitExLab.grid(row=0, sticky=W)
#-------
daySlitExEnt = Entry(dayMidFrame, textvariable=varEXslit
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
daySlitExEnt.grid(row=0, column=1, padx=4, pady=4, sticky=E)
dayMeanEnt3.bind("<Return>", lambda e: setFocus(daySlitExEnt))
#
# Slit Width EM:
#-------
daySlitEmLab = Label(dayMidFrame, text='Slit Width EM:'
, font=monoFont14, bg = TANBG )
daySlitEmLab.grid(row=1, sticky=W)
#-------
daySlitEmEnt = Entry(dayMidFrame, textvariable=varEMslit
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
daySlitEmEnt.grid(row=1, column=1, padx=4, pady=4, sticky=E)
daySlitExEnt.bind("<Return>", lambda e: setFocus(daySlitEmEnt))
#
# Bulb Intensity:
#-------
dayBulbIntLab = Label(dayMidFrame, text='Bulb Intensity:'
, font=monoFont14, bg = TANBG )
dayBulbIntLab.grid(row=2, sticky=W)
#-------
dayBulbIntEnt = Entry(dayMidFrame, textvariable=varDayBulb
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
dayBulbIntEnt.grid(row=2, column=1, padx=4, pady=4, sticky=E)
daySlitEmEnt.bind("<Return>", lambda e: setFocus(dayBulbIntEnt))
#
# Notebook Page:
#-------
dayNbPageLab = Label(dayMidFrame, text='Notebook Page:'
, font=monoFont14, bg = TANBG )
dayNbPageLab.grid(row=3, sticky=W)
#-------
dayNbPageEnt = Entry(dayMidFrame, textvariable=varDayNotebook
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
dayNbPageEnt.grid(row=3, column=1, padx=4, pady=4, sticky=E)
dayBulbIntEnt.bind("<Return>", lambda e: setFocus(dayNbPageEnt))
#
# Other Comments:
#-------
dayBotFrame = LabelFrame(frootFrame, bg = TANBG, borderwidth=4
,text=' Other comments: ', font=monoFont14)
dayBotFrame.grid(row = 2, padx=4, pady=4, sticky=NSEW)
#-------
dayOtherEnt1 = Entry(dayBotFrame, textvariable=varDayOther1
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt1.grid(padx=4, pady=0, sticky=EW)
dayNbPageEnt.bind("<Return>", lambda e: setFocus(dayOtherEnt1))
#-------
dayOtherEnt2 = Entry(dayBotFrame, textvariable=varDayOther2
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt2.grid(padx=5, pady=0, sticky=EW)
dayOtherEnt1.bind("<Return>", lambda e: setFocus(dayOtherEnt2))
#-------
dayOtherEnt3 = Entry(dayBotFrame, textvariable=varDayOther3
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt3.grid(padx=6, pady=0, sticky=EW)
dayOtherEnt2.bind("<Return>", lambda e: setFocus(dayOtherEnt3))
#
# ========
#
def makeDayFileDone(root=froot):
#siWin.deiconify()
print('makeDayFileDone(root=froot): [A]')
froot.destroy()
print('makeDayFileDone(root=froot): [b]')
return
#
#-------
dayButFrame = Frame(frootFrame, bg = TANBG, borderwidth=4)
dayButFrame.grid(row = 3, padx=2, pady=2, sticky=NSEW)
#-------
dayButBut = Button(dayButFrame, bg = TANBG, borderwidth=4
,text = 'DONE', command = makeDayFileDone
,activebackground=ACTIVB, font=monoFont16)
dayButBut.grid()
dayOtherEnt3.bind("<Return>", lambda e: setFocus(dayButBut))
dayButBut.bind("<Return>", makeDayFileDone)
#
froot.transient(siWin)
froot.grab_set()
siWin.wait_window(froot)
#
# ======== NOW write out the data that was entered
#
dayFileData = [ 'DATE: ' + getDateToday()
, 'Meaning of Experiment: '
]
dayFileData.append( '# ' + varDayMeaning1.get() )
dayFileData.append( '# ' + varDayMeaning2.get() )
dayFileData.append( '# ' + varDayMeaning3.get() )
dayFileData.extend(
[ 'Slit Widths EX: ' + varEXslit.get()
, 'Slit Widths EM: ' + varEMslit.get()
, 'Bulb Intensity: ' + varDayBulb.get()
, 'Notebook page: ' + varDayNotebook.get()
, 'Other comments: '
] )
dayFileData.append( '# ' + varDayOther1.get() )
dayFileData.append( '# ' + varDayOther2.get() )
dayFileData.append( '# ' + varDayOther3.get() )
#
#
dayf = open('dayfile.txt','w')
dayf.write( '\n'.join(dayFileData) )
dayf.close()
#
print('makeDayFile(): CREATED')
print('dayFileData: {}'.format(dayFileData))
return
#
def checkDayFile():
'''Read 'dayfile.txt' and if not created today, update it.'''
global dayFileData
#
try:
dayf = open('dayfile.txt','r')
except:
print('dayfile.txt does not exist, CREATE (and write) it.')
makeDayFile()
return
#
# Check that the day file is for TODAY's date
dayFileData = dayf.read().splitlines()
dayf.close()
# file have data ?
if len(dayFileData)<1: # not one line !
makeDayFile() # create a new file
return
# examine the previous date
#print('len(dayFileData): {}'.format(len(dayFileData)))
today = dayFileData[0]
#print('checkDayFile(): dayFile.txt, line #1: {}'.format(today))
#
date = dayFileData[0].strip() # look at first line of file
#print('checkDayFile() READ: {}'.format(date))
if date.startswith( 'DATE: ' + getDateToday()) :
print('checkDayFile() CURRENT')
return # file has current data
# create a new file
makeDayFile()
return
#
## Settings Read (default settings, etc.) for measurement
#
def readSettings():
'''Read 'settings.txt' and recover default values.'''
if jjltest:
print('readSettings()')
#
# First set these to:
# "Factory Default Settings" (if no others are established)
#
# "EXinc: 1" # Setting EX Inc Wavelength (nm)
varEXinc.set('1')
# "EMinc: 1" # Setting EM Inc Wavelength (nm)
varEMinc.set('1')
# "TMinc: 0.1" # Setting TM Inc time (s)
varTMinc.set('0.1')
# "varEXslit: 2.9" # Setting EX slit width (nm)
varEXslit.set('2.9')
# "varEMslit: 2.9" # Setting EM slit width (nm)
varEMslit.set('2.9')
# "EMhv: -900" # Setting EM PMT high voltage (v)
varEMhv.set('-900')
# "REFdiode: 0" # Setting REF DIODE Gain setting [0,1,2,3]
varREFdiode.set('0')
# "REFhv: -450" # Setting REF PMT high voltage (v)
varREFhv.set('0')
# CALIBRATION SETTINGS:
# "EXstepsNm: 10" # EX Steper Motor Cal: steps/nm
varEXstepsNm.set('10')
# "EMstepsNm: 10" # EM Steper Motor Cal: steps/nm
varEMstepsNm.set('10')
#
# Now OVER-WRITE FACTORY with SITE'S SETTINGS
try:
tmpFile = open('settings.txt','r').read().splitlines()
for line in tmpFile:
#print('line = {}'.format(line))
items = line.split()
#
# parse 'settings.txt' for 'site default values'
# (SITE DEFAULT SETTINGS)
# EXinc: 1
# EMinc: 1
# TMinc: 0.1
# (SITE ESTABLISHED SETTINGS)
# EXslit: 2.9
# EMslit: 2.9
# EMhv: -900
# REFhv: -450
# EXstepsNm: 10
# EMstepsNm: 10
#
if items[0] == "EXinc:":
varEXinc.set(items[1])
elif items[0] == "EMinc:":
varEMinc.set(items[1])
elif items[0] == "TMinc:":
varTMinc.set(items[1])
elif items[0] == "EXslit:":
varEXslit.set(items[1])
elif items[0] == "EMslit:":
varEMslit.set(items[1])
elif items[0] == "EMhv:":
varEMhv.set(items[1])
elif items[0] == "REFdiode:":
varREFdiode.set(items[1])
elif items[0] == "REFhv:":
varREFhv.set(items[1])
elif items[0] == "EXstepsNm:":
varEXstepsNm.set(items[1])
elif items[0] == "EMstepsNm:":
varEMstepsNm.set(items[1])
except:
pass # no SITE SETTINGS WERE SAVED
if jjltest:
print('settings.txt does not exist!')
#
scanSanityCheck() # verify ranges are 'reasonalbe' tint any not so
return
#
## Settings Edit (default settings, etc.) for measurement
def editSettings():
'''Edit 'settings.txt' to alter default values.'''
#
edset = Toplevel()
edset.geometry(transGeom)
edset.title("Spectrometer Settings")
#
#-------
edsetTop = Frame(edset, bg = TANBG)
edsetTop.grid()
#
# User Default Settings SETTINGS - defaults to load for editing
#
# varEXinc = StringVar() # Setting EX Inc Wavelength (nm)
# varEMinc = StringVar() # Setting EM Inc Wavelength (nm)
# varTMinc = StringVar() # Setting TM Inc time (s)
# varEXslit = StringVar() # Setting EX Slit Opening (nm)
# varEMslit = StringVar() # Setting EM Slit Opening (nm)
# varEMhv = StringVar() # Setting EM PMT high voltage (v)
# varREFdiode = StringVar() # Setting for REF DIODE Gain
# varREFhv = StringVar() # Setting REF PMT high voltage (v)
#
#-------
edsetPf = LabelFrame(edsetTop, text="Site Default Settings."
,bg = TANBG, font=monoFont16
,borderwidth=6)
edsetPf.grid(row=0, padx=4, pady=4, sticky=EW)
#
# EX default increment (nm)
#-------
EXiPL = Label(edsetPf, text = "EX default increment (nm):"
, bg = TANBG, font=monoFont14)
EXiPL.grid(row=0, column=0, padx=4, sticky=W)
#-------
EXiPE = Entry(edsetPf, textvariable = varEXinc, font=monoFont14)
EXiPE.grid(row=0, column=1, padx=4, sticky=E)
#
# EM default increment (nm)
#-------
EMiPL = Label(edsetPf, text = "EM default increment (nm):"
, bg = TANBG, font=monoFont14)
EMiPL.grid(row=1, column=0, padx=4, sticky=W)
#-------
EMiPE = Entry(edsetPf, textvariable = varEMinc, font=monoFont14)
EMiPE.grid(row=1, column=1, padx=4, sticky=E)
#
# TM default increment (S)
#-------
TMiPL = Label(edsetPf, text = "TM default increment (S):"
, bg = TANBG, font=monoFont14)
TMiPL.grid(row=2, column=0, padx=4, sticky=W)
#-------
TMiPE = Entry(edsetPf, textvariable = varTMinc, font=monoFont14)
TMiPE.grid(row=2, column=1, padx=4, sticky=E)
#
# Site Established Settings - due to instrument setup. I.E.
# CALIBRATION SETTINGS - measured during calibration of spectrometer
# {stepper motor calibration values - should not need changing}
# varEXstepsNm = StringVar() # EX StepMotor steps per (nm)
# varEMstepsNm = StringVar() # EM StepMotor steps per (nm)
#
#-------
edsetCf = LabelFrame(edsetTop, text="Site Established Settings."
,bg = TANBG, font=monoFont16
,borderwidth=6)
edsetCf.grid(row=1, padx=4, pady=4, sticky=EW)
#
# EX Slit size (nm)
#-------
EXiPL = Label(edsetCf, text = "EX Slit size (nm):"
, bg = TANBG, font=monoFont14)
EXiPL.grid(row=0, column=0, padx=4, sticky=W)
#-------
EXiPE = Entry(edsetCf, textvariable = varEXslit, font=monoFont14)
EXiPE.grid(row=0, column=1, padx=4, sticky=E)
#
# EM Slit size (nm)
#-------
EMiPL = Label(edsetCf, text = "EM Slit size (nm):"
, bg = TANBG, font=monoFont14)
EMiPL.grid(row=1, column=0, padx=4, sticky=W)
#-------
EMiPE = Entry(edsetCf, textvariable = varEMslit, font=monoFont14)
EMiPE.grid(row=1, column=1, padx=4, sticky=E)
#
# EM PMT high voltage (v)
#-------
EMhvL = Label(edsetCf, text = "EM PMT high voltage (v):"
, bg = TANBG, font=monoFont14)
EMhvL.grid(row=2, column=0, padx=4, sticky=W)
#-------
EMhvE = Entry(edsetCf, textvariable = varEMhv, font=monoFont14)
EMhvE.grid(row=2, column=1, padx=4, sticky=E)
#
# REF DIODE Gain setting [0,1,2,3]
#-------
REFhvL = Label(edsetCf, text = "REF DIODE Gain Setting:"
, bg = TANBG, font=monoFont14)
REFhvL.grid(row=3, column=0, padx=4, sticky=W)
#-------
# varREFdiode = StringVar() # REF DIODE Gain setting [0,1,2,3]
REFhvE = Entry(edsetCf, textvariable = varREFdiode, font=monoFont14)
REFhvE.grid(row=3, column=1, padx=4, sticky=E)
#
# REF PMT high voltage (v)
#-------
REFhvL = Label(edsetCf, text = "REF PMT high voltage (v):"
, bg = TANBG, font=monoFont14)
REFhvL.grid(row=4, column=0, padx=4, sticky=W)
#-------
REFhvE = Entry(edsetCf, textvariable = varREFhv, font=monoFont14)
REFhvE.grid(row=4, column=1, padx=4, sticky=E)
#
# EX Steper Motor Cal: steps/nm
#-------
EXnmCL = Label(edsetCf, text = "EX motor steps/nm:"
, bg = TANBG, font=monoFont14)
EXnmCL.grid(row=5, column=0, padx=4, sticky=W)
#-------
EXnmCE = Entry(edsetCf, textvariable = varEXstepsNm, font=monoFont14)
EXnmCE.grid(row=5, column=1, padx=4, sticky=E)
#
# EM Steper Motor Cal: steps/nm
#-------
EMnmCL = Label(edsetCf, text = "EM motor steps/nm:"
, bg = TANBG, font=monoFont14)
EMnmCL.grid(row=6, column=0, padx=4, sticky=W)
#-------
EMnmCE = Entry(edsetCf, textvariable = varEMstepsNm, font=monoFont14)
EMnmCE.grid(row=6, column=1, padx=4, sticky=E)
#
#
# DONE
def edsetDone(x=None):
# Write out Settings to 'settings.txt'
fo = open('settings.txt','w')
tempData = [ '# site default settings'
, 'EXinc: ' + varEXinc.get()
, 'EMinc: ' + varEMinc.get()
, 'TMinc: ' + varTMinc.get()
, '# site calibrated settings'
, 'EXslit: ' + varEXslit.get()
, 'EMslit: ' + varEMslit.get()
, 'EMhv: ' + varEMhv.get()
, 'REFdiode: ' + varREFdiode.get()
, 'REFhv: ' + varREFhv.get()
, 'EXstepsNm: ' + varEXstepsNm.get()
, 'EMstepsNm: ' + varEMstepsNm.get()
]
#
fo.write( '\n'.join(tempData) )
fo.close()
# next read in (apply) settings
readSettings()
# lastly Close Edit window
edset.destroy()
return # ignore
#
bDone = Button(edsetTop, text = 'DONE', bg = TANBG, borderwidth=4
,command = edsetDone
,activebackground=ACTIVB, font=monoFont16)
bDone.grid(row=2,padx=4, pady=2, sticky=W)
#
edset.transient(siWin)
edset.grab_set()
siWin.wait_window(edset)
# if jjltest:
# print( 'editSettings(): edsetDone!')
return
#
## initialize hardware for RetroSPEX controller
#
def RetroSPEXinit():
'''Load initial settings into controller.
High Voltage = off (0 volts),
Gain set to preset value,
LED on front blink and then OFF,
etc.'''
return
#
## Calibration Input (odometer settings) for monochrometer
#
# varEXposition = StringVar() # EX monochrometer position (nm)
# varEMposition = StringVar() # EM monochrometer position (nm)
#
def monoCal():
'''Get 'odometer' values for the monochrometers.
(i.e. Calibrate SPEX monochrometers; EX and EM.)'''
#
cal = Toplevel()
cal.geometry(transGeom)
cal.title("Monochronometer Calibration")
#
calTop = Frame(cal, bg = TANBG)
calTop.grid()
#
calf = LabelFrame(calTop, text="Verify odometer values."
,bg = TANBG, font=monoFont16
,borderwidth=6)
calf.grid(padx=4,pady=4)
#
lEX = Label(calf, text = "EXcitation:"
, bg = TANBG, font=monoFont14)
lEX.grid(row=0, column=0, padx=4, sticky=E)
eEX = Entry(calf, textvariable = varEXposition, font=monoFont14)
eEX.grid(row=0, column=1, padx=4, sticky=E)
def eEXchk(x=None):
eEX['bg'] = 'white'
return
eEX.bind('<KeyRelease>',eEXchk)
eEX.focus_set()
#
lEM = Label(calf, text = "EMission:"
, bg = TANBG, font=monoFont14)
lEM.grid(row=1, column=0, padx=4, sticky=E)
eEM = Entry(calf, textvariable = varEMposition, font=monoFont14)
eEM.grid(row=1, column=1, padx=4, sticky=E)
def eEMchk(x=None):
eEM['bg'] = 'white'
return
eEM.bind('<KeyRelease>',eEMchk)
#
#
def monoCheck(val, ent):
'''True if val in 'legal' range, False otherwise.
Sets Entry field pink when val is outside 'legal'.'''
n = getVarInt(val)
if n >= MINnm and n<= MAXnm:
ent['bg'] = 'white' # 'legal' value
return True
else:
ent['bg'] = WARNC # 'illegal' value
ent.focus_set()
return False
#
def monoCalDone(x=None):
# Close window if both values are in 'normal' range
if monoCheck(varEXposition, eEX) and monoCheck(varEMposition, eEM):
writePositions() # save Verified positions to file
cal.destroy()
return # ignore
#
bDone = Button(calTop, text = 'DONE', bg = TANBG, borderwidth=4
,command = monoCalDone
,activebackground=ACTIVB, font=monoFont16)
bDone.grid(row=1, column=0, padx=4, pady=2, sticky=W)
#
cal.transient(siWin)
cal.grab_set()
siWin.wait_window(cal)
print( '\nmonoCal(): done!')
#
## Power Up - operations to sequence initialization of hardware/software
#
def PowerUp():
'''Load "settings" and calibrate SPEX.'''
#
readSettings() # load the Default settings for the spectrometer
#
#TODO establish serial connection to RetroSPEX controller
print("TODO: establish serial connection to RetroSPEX controller")
print("TODO: or, set to 'offline' mode to look at files, etc.")
#
portScan() # search for serialPort to spectrometer
#
if portName != 'OFFLINE':
#
#TODO log "run time" (bulb life? - i.e. need start time)
#
#TODO if connected: Initialize RetroSPEX controller settings
#TODO i.e. HV levels (0 volts initially), 'G'ain setting, etc.
print("TODO: if connected: Initialize RetroSPEX controller settings")
RetroSPEXinit()
#
#TODO if connected: Move Monochrometers by 10nm (anti-backlash)
print("TODO: if connected: Monochrometers by 10nm (anti-backlash)")
readPositions()
# #TODO recover monochrometer positions 'positions.txt' file
# print("TODO: recover monochrometer positions 'positions.txt' file")
#
# perform monochrometer calibration (verification)
monoCal()
#TODO ( => always move POS dir (or sept NEG val+10 and then POS 10)
#TODO ( => real time display values initialize)
#
checkDayFile()
#
return
#
## Power Down - operations to sequence shutdown of hardware/software
#
def PowerDown():
#
if portName != 'OFFLINE':
#
#TODO stop scan if one is in process
print("TODO: scan if one is in process")
#
#TODO if connected: Initialize RetroSPEX controller settings
#TODO i.e. HV levels (0 volts initially), 'G'ain setting, etc.
print("TODO: if connected: Initialize RetroSPEX controller settings")
RetroSPEXinit()
#
#TODO log "run time" (bulb life? - i.e. need start time)
#
#TODO log data such as monochrometer position on shutdown
print("TODO: log data such as monochrometer position on shutdown")
#
return
#====================================
## Scan Control Frame
#
#-------
controlsFrame = Frame(siWin, bg = TANBG, borderwidth=0)
controlsFrame.grid(row=0,column=0, sticky=N)
#
#-------
scfScanControlFrame = LabelFrame(controlsFrame,text='Control',
bg = TANBG, borderwidth=4)
scfScanControlFrame.grid(row=0,column=0, sticky=N)
## Scan; START/STOP - Spectrometer scan control
#
scanStopIcon = PhotoImage(file='icons/icon_scanSTOP.gif')
scanStartIcon = PhotoImage(file='icons/icon_scanSTART.gif')
runOn = False # default == OFF
#
def toggleScan():
'''Scan Start/Stop - Spectrometer scan control'''
global runOn
if runOn: # then STOP the scan !!
if jjltest:
print('STOPPING NOT IMPLEMENTED YET ;-)')
runOn = False
runScfB00['image'] = scanStartIcon
else: # START up a scan
# perform sanity checks before starting scan
sane = scanSanityCheck( warn = True )
if jjltest:
print('STARTING A SCAN NOT IMPLEMENTED YET ;-)')
sane = False
if sane:
runOn = True
runScfB00['image'] = scanStopIcon
return
#
#-------
runScfB00 = Button(scfScanControlFrame,image=scanStartIcon
,borderwidth = 0,activebackground=ACTIVB
,bg = TANBG, command = toggleScan )
runScfB00.grid(column=0,row=0, padx=2)
## HV - On/Off - High Voltage (red: safety concern)
#
hvOffIcon = PhotoImage(file='icons/icon_hvOff.gif')
hvOnIcon = PhotoImage(file='icons/icon_hvOn.gif')
hvOn = False # default == OFF
#
def toggleHV():
'''HV - On/Off - High Voltage (red: safety concern)'''
global hvOn
hvOn = toggleBtnVar(hvOn, hvScfB01, hvOffIcon, hvOnIcon)
return
#
#-------
hvScfB01 = Button(scfScanControlFrame, image = hvOffIcon
,activebackground=ACTIVB
,borderwidth = 0, bg = TANBG, command = toggleHV)
hvScfB01.grid(column=0,row=1)
#====================================
## Scan Data Frame -- Load previous Scan Data for Reference or Settings recall
#
#-------
filesFrame = LabelFrame(controlsFrame,text='Scan Data',
bg = TANBG, borderwidth=4)
filesFrame.grid(row=1,column=0, padx=2, sticky=NW)
#
# LOAD experimental settings from disk
dataLoadIcon = PhotoImage(file='icons/icon_dataLOAD.gif')
#
#-------
fileFileDataLoad = Button(filesFrame, image=dataLoadIcon
, bg = TANBG, activebackground=ACTIVB
,command = dataFileLOAD
,borderwidth = 0, font=monoFont14 )
fileFileDataLoad.grid(row=0, column=0, sticky=NW)
#
#
dataMgetIcon = PhotoImage(file='icons/icon_dataMGET.gif')
#
#-------
fileSettingsGet = Button(filesFrame, image=dataMgetIcon, bg = TANBG
,command = dataFileMGET,activebackground=ACTIVB
,borderwidth = 0, font=monoFont14 )
fileSettingsGet.grid(row=1, column=0,sticky=NW)
#====================================
## Macro Files Frame
#
#-------
macroFrame = LabelFrame(controlsFrame,text='Macro Files',
bg = TANBG, borderwidth=4)
macroFrame.grid(row=2,column=0, padx=2, sticky=NW)
#
# LOAD scan settings from disk
macroLoadIcon = PhotoImage(file='icons/icon_macroLOAD.gif')
#
#-------
macroFileLoad = Button(macroFrame, image=macroLoadIcon, bg = TANBG
,borderwidth = 0
,activebackground=ACTIVB, font=monoFont14 )
macroFileLoad.grid(row=0, column=0,sticky=NW)
#
#
macroEditIcon = PhotoImage(file='icons/icon_macroEDIT.gif')
#
#-------
macroFileEdit = Button(macroFrame, image=macroEditIcon, bg = TANBG
, borderwidth = 0
,activebackground=ACTIVB, font=monoFont14 )
macroFileEdit.grid(row=1, column=0,sticky=NW)
#====================================
## Settings Frame
#
#-------
settingsFrame = LabelFrame(controlsFrame,text='Settings',
bg = TANBG, borderwidth=4)
settingsFrame.grid(row=12,column=0, sticky=S)
#
#
settingsIcon = PhotoImage(file='icons/icon_settings.gif')
#
#-------
settingsBtn = Button(settingsFrame, image=settingsIcon, bg = TANBG
,borderwidth = 0, command = editSettings
,activebackground=ACTIVB, font=monoFont14 )
settingsBtn.grid()
#====================================
## Quit Frame
#
def quitCommand():
#
# Shutdown equipment
#
PowerDown()
#
siWin.destroy()
#-------
quitFrame = LabelFrame(controlsFrame,text='Quit',
bg = TANBG, borderwidth=4)
quitFrame.grid(row=13,column=0, sticky=S)
#
#
quitIcon = PhotoImage(file='icons/icon_quit.gif')
#
#-------
quitBtn = Button(quitFrame, image=quitIcon, bg = TANBG, borderwidth = 0
,command = quitCommand
,activebackground=ACTIVB, font=monoFont14 )
quitBtn.grid()
#====================================
## Experiment Frame -- Window to right of Control frame
#
#-------
efFrame = Frame(siWin, bg = TANBG, borderwidth=0)
efFrame.grid(row=0,column=1,sticky=NW)
#====================================
## Experiment Settings Frame
#
#-------
esfFrame = Frame(efFrame, bg = TANBG, borderwidth=0)
esfFrame.grid(row=0,column=0,sticky=NW)
#====================================
## Spectrometer / Specimen Box Frame
#
#-------
ssbFrame = Frame(esfFrame, bg = TANBG, borderwidth=0)
ssbFrame.grid(row=0,column=0,sticky=EW)
#====================================
## Spectrometer Settings Frame
#
#-------
ssfFrame = LabelFrame(ssbFrame,text='Spectrometer Settings',
bg = TANBG, borderwidth=4)
ssfFrame.grid(row=0,column=0,sticky=NW)
#====================================
## Spectrometer EX Frame - EXcitation
#
# EX scan
#
#-------
sEXfFrame = Frame(ssfFrame, bg = TANBG)
sEXfFrame.grid(row=0,column=0,sticky=NW)
#
#
sEXfB00_FR = NotImplemented # forward reference to Button
sEMfB00_FR = NotImplemented # forward reference to Button
sTMfB00_FR = NotImplemented # forward reference to Button
#
exIconT = PhotoImage(file='icons/icon_modeEXt.gif')
exIconF = PhotoImage(file='icons/icon_modeEXf.gif')
#
emIconT = PhotoImage(file='icons/icon_modeEMt.gif')
emIconF = PhotoImage(file='icons/icon_modeEMf.gif')
#
tmIconT = PhotoImage(file='icons/icon_modeTMt.gif')
tmIconF = PhotoImage(file='icons/icon_modeTMf.gif')
#
def buttonEX():
'''Display/Change scanning mode: to EXcitation.'''
setScanMode(EXscan)
return
#
#-------
sEXfB00 = Button(sEXfFrame, image = exIconT, bg = TANBG
,borderwidth=0, command = buttonEX,activebackground=ACTIVB)
sEXfB00.grid(row=0,column=0,sticky=W)
sEXfB00_FR = sEXfB00 # resolve the forward reference to this button
#
# Wavelength Setting (frame)
#-------
sEXwavFrame = Frame(sEXfFrame, bg = TANBG)
sEXwavFrame.grid(row=0,column=2,sticky=NW)
#
# Wavelength Start - Label
#-------
sEXwavSLabel = Label(sEXwavFrame, text='Start (nm)', font=monoFont12, bg = TANBG )
sEXwavSLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# Wavelength End - Label
#-------
sEXwavELabel = Label(sEXwavFrame, text='End (nm)', font=monoFont12, bg = TANBG )
sEXwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Wavelength Inc - Label
#-------
sEXwavILabel = Label(sEXwavFrame, text='Inc (nm)', font=monoFont12, bg = TANBG )
sEXwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
# Start wavelength - Enter
#
def validateEXwaveStart(eventKeyRelease):
sEXwavSEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavSEntry = Entry(sEXwavFrame, textvariable=varEXwaveStart,
border=2, relief=SUNKEN, width=8, font=monoFont14 )
sEXwavSEntry.grid(row=1, column=0, padx=4, pady=2, sticky=W)
sEXwavSEntry.bind('<KeyRelease>',validateEXwaveStart)
#
btnLookup['EXS'] = sEXwavSEntry # put button into dictionary by name
#
# End wavelength - Enter
#
def validateEXwaveEnd(eventKeyRelease):
sEXwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavEEntry = Entry(sEXwavFrame, textvariable=varEXwaveEnd,
border=2, relief=SUNKEN, width=7, font=monoFont14 )
sEXwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=W)
sEXwavEEntry.bind('<KeyRelease>',validateEXwaveEnd)
#
btnLookup['EXE'] = sEXwavEEntry # put button into dictionary by name
#
# Inc wavelength - Enter
#
def validateEXwaveInc(eventKeyRelease):
sEXwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavIEntry = Entry(sEXwavFrame, textvariable=varEXinc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sEXwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=W)
sEXwavIEntry.bind('<KeyRelease>',validateEXwaveInc)
#
btnLookup['EXI'] = sEXwavIEntry # put button into dictionary by name
#====================================
## Spectrometer EM Frame - EMission
#
# EM scan
#
#-------
sEMfFrame = Frame(ssfFrame, bg = TANBG)
sEMfFrame.grid(row=0,column=1,sticky=NW)
#
def buttonEM():
'''Display/Change scanning mode: to EMission.'''
setScanMode(EMscan)
return
#
#-------
sEMfB00 = Button(sEMfFrame, image = emIconF, bg = TANBG
,borderwidth=0, activebackground=ACTIVB, command = buttonEM)
sEMfB00.grid(row=0,column=0,sticky=W)
sEMfB00_FR = sEMfB00 # resolve the forward reference to this button
#
# Wavelength Setting (frame)
#-------
sEMwavFrame = Frame(sEMfFrame, bg = TANBG)
sEMwavFrame.grid(row=0,column=2,sticky=NW)
#
# Wavelength Start - Label
#-------
sEMwavSLabel = Label(sEMwavFrame, text='Start (nm)', font=monoFont12, bg = TANBG )
sEMwavSLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# Wavelength End - Label
#-------
sEMwavELabel = Label(sEMwavFrame, text='End (nm)', font=monoFont12, bg = TANBG )
sEMwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Wavelength Inc - Label
#-------
sEMwavILabel = Label(sEMwavFrame, text='Inc (nm)', font=monoFont12, bg = TANBG )
sEMwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
# Start wavelength - Enter
#
def validateEMwaveStart(eventKeyRelease):
sEMwavSEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavSEntry = Entry(sEMwavFrame, textvariable=varEMwaveStart,
border=2, relief=SUNKEN, width=8, font=monoFont14 )
sEMwavSEntry.grid(row=1, column=0, padx=4, pady=2, sticky=E)
sEMwavSEntry.bind('<KeyRelease>',validateEMwaveStart)
#
btnLookup['EMS'] = sEMwavSEntry # put button into dictionary by name
#
# End wavelength - Enter
#
def validateEMwaveEnd(eventKeyRelease):
sEMwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavEEntry = Entry(sEMwavFrame, textvariable=varEMwaveEnd,
border=2, relief=SUNKEN, width=7, font=monoFont14 )
sEMwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=EW)
sEMwavEEntry.bind('<KeyRelease>',validateEMwaveEnd)
#
btnLookup['EME'] = sEMwavEEntry # put button into dictionary by name
#
# Inc wavelength - Enter
#
def validateEMwaveInc(eventKeyRelease):
sEMwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavIEntry = Entry(sEMwavFrame, textvariable=varEMinc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sEMwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=EW)
sEMwavIEntry.bind('<KeyRelease>',validateEMwaveInc)
#
btnLookup['EMI'] = sEMwavIEntry # put button into dictionary by name
#====================================
## Spectrometer TM Frame - TiMe
#
# TM scan
#
#-------
sTMfFrame = Frame(ssfFrame, bg = TANBG)
sTMfFrame.grid(row=0,column=2,sticky=NW)
#
def buttonTM():
'''Display/Change scanning mode: to EXcitation.'''
setScanMode(TMscan)
return
#
#-------
sTMfB00 = Button(sTMfFrame, image = tmIconF, bg = TANBG,
borderwidth=0,activebackground=ACTIVB, command = buttonTM)
sTMfB00.grid(row=0,column=0,sticky=W)
sTMfB00_FR = sTMfB00 # resolve the forward reference to this button
#
#
# Time Setting (frame)
#-------
sTMwavFrame = Frame(sTMfFrame, bg = TANBG)
sTMwavFrame.grid(row=0,column=1,sticky=NW)
#
# Pause step# - Label
#-------
sTMwavPLabel = Label(sTMwavFrame, text='Pause(S)'
, font=monoFont12, bg = TANBG )
sTMwavPLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# End step# - Label
#-------
sTMwavELabel = Label(sTMwavFrame, text='End (S)'
, font=monoFont12, bg = TANBG )
sTMwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Increment Time - Label
#-------
sTMwavILabel = Label(sTMwavFrame, text='Inc (S)'
, font=monoFont12, bg = TANBG )
sTMwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
#
# Pause (step#) - Enter
#
def validateTMwavePause(eventKeyRelease):
sTMwavPEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavPEntry = Entry(sTMwavFrame, textvariable=varTMwavePause,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavPEntry.grid(row=1, column=0, padx=4, pady=2, sticky=EW)
sTMwavPEntry.bind('<KeyRelease>',validateTMwavePause)
#
btnLookup['TMP'] = sTMwavPEntry # put button into dictionary by name
#
# End step# - Enter
#
def validateTMwaveEnd(eventKeyRelease):
sTMwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavEEntry = Entry(sTMwavFrame, textvariable=varTMwaveEnd,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=EW)
sTMwavEEntry.bind('<KeyRelease>',validateTMwaveEnd)
#
btnLookup['TME'] = sTMwavEEntry # put button into dictionary by name
#
# Increment Time - Enter
#
def validateTMwaveInc(eventKeyRelease):
sTMwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavIEntry = Entry(sTMwavFrame, textvariable=varTMinc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=W)
sTMwavIEntry.bind('<KeyRelease>',validateTMwaveInc)
#
btnLookup['TMI'] = sTMwavIEntry # put button into dictionary by name
#====================================
## S+R Frame - record Reference data?
#
# S+R
#
#-------
srFrame = Frame(ssfFrame, bg = TANBG)
srFrame.grid(row=0,column=3,sticky=NW)
#
# Reference Data - On/Off - 'S'(signal) alone or with 'R'(reference) too?
#
refOffIcon = PhotoImage(file='icons/icon_refOff.gif')
refOnIcon = PhotoImage(file='icons/icon_refOn.gif')
refOn = False # default == OFF (i.e. 'S' and 'R')
#
def toggleRef():
'''Ref - On/Off - 'S'(signal) alone or with 'R'(reference) too?'''
global refOn
refOn = toggleBtnVar(refOn, refScfB02, refOffIcon, refOnIcon)
return
#
#-------
refScfB02 = Button(srFrame, image = refOffIcon, borderwidth = 0
,bg = TANBG,activebackground=ACTIVB, command = toggleRef)
refScfB02.grid(row=0,column=0,sticky=W)
#====================================
## Set 'scan mode' - complete forward reference
#
def setScanMode(mode):
'''Select the type of spectrometer scan to perform.
Sets the EX, EM and TM Icons to incidate scan type.
Sets the 'state' (NORMAL/DISABLE) for scan setting params.'''
#
# any change?
if varScanMode.get() == mode:
if jjltest:
print('setScanMode(): NO change.')
return # no change
#
varScanMode.set(mode) # set the scan mode
#
# update icons
if varScanMode.get() == EXscan :
sEXfB00_FR['image'] = exIconT # SCAN MODE - back to Default
sEMfB00_FR['image'] = emIconF
sTMfB00_FR['image'] = tmIconF
elif varScanMode.get() == EMscan :
sEXfB00_FR['image'] = exIconF
sEMfB00_FR['image'] = emIconT # SCAN MODE
sTMfB00_FR['image'] = tmIconF
elif varScanMode.get() == TMscan :
sEXfB00_FR['image'] = exIconF
sEMfB00_FR['image'] = emIconF
sTMfB00_FR['image'] = tmIconT # SCAN MODE
else:
if jjltest:
print('Bad scan mode found in setScanMode(mode)')
sys.exit(0)
#
updatePlot() # synchronize plot with scan mode
#
# set the correct 'state' for wavelength/time icons
#
if varScanMode.get() == EXscan:
sEXwavSLabel['text'] = 'Start (nm)' # EXscan - Start wavelength
sEXwavELabel['text'] = 'End (nm)' # - End label set
sEXwavEEntry['state'] = NORMAL # - End entry enabled
sEXwavILabel['text'] = 'Inc (nm)' # - Inc label set
sEXwavIEntry['state'] = NORMAL # - Inc entry enabled
sEMwavSLabel['text'] = 'Park (nm)' # EMscan - EM wavelength Parked
sEMwavELabel['text'] = '' # - End label cleared
sEMwavEEntry['state'] = DISABLED # - End entry disabled
sEMwavILabel['text'] = '' # - Inc label cleared
sEMwavIEntry['state'] = DISABLED # - Inc entry disabled
sTMwavPLabel['text'] = '' # TMscam - Pause label cleared
sTMwavPEntry['state'] = DISABLED # - Pause entry disabled
sTMwavELabel['text'] = '' # - End label cleared
sTMwavEEntry['state'] = DISABLED # - End entry disabled
elif varScanMode.get() == EMscan:
sEXwavSLabel['text'] = 'Park (nm)' # EXscan - EX wavelength Parked
sEXwavELabel['text'] = '' # - End label cleared
sEXwavEEntry['state'] = DISABLED # - End entry disabled
sEXwavILabel['text'] = '' # - Inc label cleared
sEXwavIEntry['state'] = DISABLED # - End entry disabled
sEMwavSLabel['text'] = 'Start (nm)' # EMscan - EM wavelength set
sEMwavELabel['text'] = 'End (nm)' # - End label set
sEMwavEEntry['state'] = NORMAL # - End entry enabled
sEMwavILabel['text'] = 'Inc (nm)' # - Inc label set
sEMwavIEntry['state'] = NORMAL # - Inc entry enabled
sTMwavPLabel['text'] = '' # TMscam - Pause label cleared
sTMwavPEntry['state'] = DISABLED # - Pause entry disabled
sTMwavELabel['text'] = '' # - End label cleared
sTMwavEEntry['state'] = DISABLED # - End entry disabled
elif varScanMode.get() == TMscan:
sEXwavSLabel['text'] = 'Park (nm)' # EXscan - EX wavelength Parked
sEXwavELabel['text'] = '' # - End label cleared
sEXwavEEntry['state'] = DISABLED # - End entry disabled
sEXwavILabel['text'] = '' # - Inc label cleared
sEXwavIEntry['state'] = DISABLED # - End entry disabled
sEMwavSLabel['text'] = 'Park (nm)' # EMscan - EM wavelength Parked
sEMwavELabel['text'] = '' # - End label cleared
sEMwavEEntry['state'] = DISABLED # - End entry disabled
sEMwavILabel['text'] = '' # - Inc label cleared
sEMwavIEntry['state'] = DISABLED # - Inc entry disabled
sTMwavPLabel['text'] = 'Pause(S)' # TMscam - Pause label set
sTMwavPEntry['state'] = NORMAL # - Pause entry enabled
sTMwavELabel['text'] = 'End (S)' # - End label set
sTMwavEEntry['state'] = NORMAL # - End entry enabled
else:
err = 'Internal Errr: undefined scan mode: {} !'
mBox.showerror(title='Fatal Error'
,message=err.format(varScanMode.get()))
sys.exit(0)
#
scanSanityCheck() # update out-of-bounds parameter coloring
return
#
setScanMode_FR = setScanMode # resolve the Forward Reference to function
#====================================
## Specimen Details Frame
#
#-------
sdFrame = LabelFrame(ssbFrame,text='Specimen Details', bg = TANBG, borderwidth=0)
sdFrame.grid(row=1,column=0, pady=4, sticky=NW)
sdEntry = Entry(sdFrame, textvariable=varSpecimenDetails ,
width=96, bg = 'white', border=2, relief=SUNKEN, font=monoFont14)
sdEntry.grid(row=0, column=0, padx=20, pady=2, sticky=EW)
sdEntry.bind('<KeyRelease>',updatePlot)
#====================================
## Real Time data Frame -- frame inside Experiment Frame
#
# Frame to hold real time data
#-------
rtdmFrame = LabelFrame(esfFrame, text='Live Data', bg = TANBG, borderwidth=4)
rtdmFrame.grid(row=0,column=1, padx=4, pady=2,sticky=NS+E)
#
#
# Real Time Data -- Row 0 => Signal
#-------
rtdmLabel00 = Label(rtdmFrame, text='S:', font=monoFont14, bg = TANBG )
rtdmLabel00.grid(row=0, column=0,sticky=E)
#-------
rtdmLabel00 = Label(rtdmFrame, textvariable=varRTDsignal
,border=0, relief=FLAT, bg='white'
,width=15, font=monoFont12, anchor=E )
rtdmLabel00.grid(row=0, column=1, padx=4, pady=2, sticky=W)
#
# Real Time Data -- Row 1 => Reference
#-------
rtdmLabel10 = Label(rtdmFrame, text='R:', font=monoFont14, bg = TANBG )
rtdmLabel10.grid(row=1, column=0,sticky=E)
#-------
rtdmLabel11 = Label(rtdmFrame, textvariable=varRTDreference
,border=0, relief=FLAT, bg='white'
,width=15, font=monoFont12, anchor=E )
rtdmLabel11.grid(row=1, column=1, padx=4, pady=2, sticky=W)
#
# Real Time Data -- Row 2 => PCT (%) scan complete
#-------
rtdmLabel40 = Label(rtdmFrame, text='%:', font=monoFont14, bg = TANBG )
rtdmLabel40.grid(row=2, column=0,sticky=E)
rtdmProgress41 = Progressbar(rtdmFrame, orient='horizontal'
,mode='determinate', variable=varPCTdone
,length=124)
rtdmProgress41.grid(row=2, column=1, padx=4, pady=2,sticky=W)
#
#
# FRAME for Real Time Data2 -- EX/EM position and HV readings
#
rtdmFrame2 = Frame(rtdmFrame, bg = TANBG)
rtdmFrame2.grid(row=3,columnspan=2, padx=0, pady=0,sticky=NSEW)
#
# Real Time Data2 -- Row 0,[Col 0&1] => EX monochrometer position (nm)
#-------
rtdm2Label00 = Label(rtdmFrame2, text='EX:', font=monoFont14, bg = TANBG )
rtdm2Label00.grid(row=0, column=0,sticky=E)
#-------
rtdm2Label01 = Label(rtdmFrame2, textvariable=varEXposition
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label01.grid(row=0, column=1, padx=2, pady=2, sticky=W)
#
# Real Time Data -- Row 0,[Col 2&3] => EM monochrometer position (nm)
#-------
rtdm2Label02 = Label(rtdmFrame2, text='EM:', font=monoFont14, bg = TANBG )
rtdm2Label02.grid(row=0, column=2,sticky=E)
#-------
rtdm2Label03 = Label(rtdmFrame2, textvariable=varEMposition
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label03.grid(row=0, column=3, padx=2, pady=2, sticky=W)
#
# Real Time Data2 -- Row 1,[Col 0&1] => EM PMT HV readings (v)
#-------
rtdm2Label10 = Label(rtdmFrame2, text='HVm:', font=monoFont14, bg = TANBG )
rtdm2Label10.grid(row=1, column=0,sticky=E)
#-------
rtdm2Label11 = Label(rtdmFrame2, textvariable=varEMhv
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label11.grid(row=1, column=1, padx=2, pady=2, sticky=W)
#
# Real Time Data -- Row 1,[Col 2&3] => REF PMT HV readings (v)
#-------
rtdm2Label22 = Label(rtdmFrame2, text='HVr:', font=monoFont14, bg = TANBG )
rtdm2Label22.grid(row=1, column=2,sticky=E)
#-------
rtdm2Label23 = Label(rtdmFrame2, textvariable=varREFhv
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label23.grid(row=1, column=3, padx=2, pady=2, sticky=W)
#====================================
## Plotting Frame
#
#-------
plotFrame = Frame(efFrame, bg = TANBG, borderwidth=0)
plotFrame.grid(row=2,column=0, sticky=NSEW)
#
fig = Figure(figsize = (11.56,6), dpi=100) # TopLevel container for all plot elements
#
# initialize the "plot" element as "ax"
#
ax = fig.add_subplot(111, axisbg='w')
#
canvas = FigureCanvasTkAgg(fig, master=plotFrame)
canvas.get_tk_widget().grid(row=0,column=0, padx=2)
#
def updatePlot():
global ax
global scanDataX,scanDataY
global backgroundDataX,backgroundDataY
# #
# # returns Axes instance for single plot
# try:
# fig.axes.remove(ax)
# except:
# pass
#print('CALLED: updatePlot() len(scanDataX)={}'.format(len(scanDataX)))
#
# remove 'old' lines before re-draw
while len(ax.lines):
ax.lines.remove(ax.lines[-1])
#
# Get correct scaling for X axis
#
minX = 200
maxX = 1000
sm = varScanMode.get()
if sm == EXscan:
if jjltest:
print('Error: EXscan not implemented.')
else:
mBox.showerror(message='Error: EXscan not implemented.')
startX = minX
endX = maxX
elif sm == EMscan:
if getVarInt(varEMwaveEnd) - getVarInt(varEMwaveStart) < 2:
startX = minX
endX = maxX
else:
startX = getVarInt(varEMwaveStart)
endX = getVarInt(varEMwaveEnd)
elif sm == TMscan:
if jjltest:
print('Error: TMscan not implemented.')
else:
mBox.showerror(message='Error: TMscan not implemented.')
startX = minX
endX = maxX
else:
mErr('Error: updatePlot() invalid varScanMode')
sys.exit(0)
#
# Get correct scaling for Y axis
#
if len(scanDataY) < 2 :
maxScanY = 5000 # default if NO scan data
else:
maxScanY = 1.1*max(scanDataY)
#
if len(backgroundDataY) < 2 :
maxInputY = 5000 # default if NO input (reference) data
else:
maxInputY = 1.1*max(backgroundDataY)
#
maxY = max(5000, maxScanY, maxInputY)
#
# set the X & Y sizes for axes now
#
ax.axis([startX, endX, 0, maxY ])
#
ax.set_title( timeNow() + ' - Specimen Details:\n'
+ varSpecimenDetails.get() )
ax.set_ylabel('counts')
#
# plot "background" waveform (IF one has been loaded)
if len(backgroundDataX) > 1:
if jjltest:
print('\nbefore: len(ax.lines)={}'.format(len(ax.lines)))
#ax.plot(scanDataX, scanDataY, 'b')
if jjltest:
print('mid: len(ax.lines)={}'.format(len(ax.lines)))
ax.plot(backgroundDataX, backgroundDataY, 'g')
if jjltest:
print('after: len(ax.lines)={}'.format(len(ax.lines)))
if jjltest:
txt_ = 'len(backgroundDataX):{}, len(backgroundDataY):{}'
print(txt_.format(len(backgroundDataX),len(backgroundDataY)))
#
# xlabel depends upon type of scan: (varScanMode)
# EXscan = 0, EMscan = 1, TMscan = 2;
#
if varScanMode.get() == TMscan:
ax.set_xlabel('time (S)') # scan by time
else:
ax.set_xlabel('wavelength (nm)') # scan by wavelength
#
# set up "cursor" to display values from plot
#
cursor = Cursor(ax, horizOn=False, useblit=True, color='red', linewidth=2 )
#cursor = Cursor(ax, horizOn=False, color='red', linewidth=2 )
#
canvas.show()
#
updatePlot_FR = updatePlot # resolve the Forward Reference to updatePlot()
# ========================
#=================
## Start up Window
#
setScanMode(EMscan) # establish default EX scan type
updatePlot() # draw the graph
#
PowerUp() # initialize settings & calibrate SPEX
#
siWin.mainloop()
| jluscher/SCANIT | scanit_v032.py | Python | cc0-1.0 | 81,068 | [
"VisIt"
] | 1ffccdb492845519d02ce1dcd41eb9ce527a78f321cde46ea2fd97bffd7ca613 |
#!/usr/bin/env python
from __future__ import print_function
from operator import itemgetter
import math
import numpy
import vtk
import math
class CoarsenSurface:
EPS = 1.23456789e-10
TWOPI = 2.0 * math.pi
def __init__(self, pdata):
"""
Constructor
@param pdata vtkPolyData instance
"""
self.polydata = vtk.vtkPolyData()
self.polydata.DeepCopy(pdata)
# will need to be able to interpolate the nodal data to the
# new vertices
self.pointData = self.polydata.GetPointData()
self.numPointData = self.pointData.GetNumberOfArrays()
# old point id -> new point id
self.degeneratePtIdMap = {}
# required so we can get the connectivity between points and
# cells
self.polydata.BuildLinks()
self.polydata.BuildCells()
def getVtkPolyData(self):
"""
Get the vtkPolyData instance associated with the grid
@return vtkPolyData instance
"""
return self.polydata
def getPolygonVectorArea(self, ptIds):
"""
Get the polygon vector area
@param ptIds boundary points of the cell
@return vector
"""
points = self.polydata.GetPoints()
areaVec = numpy.zeros((3,), numpy.float64)
p0 = numpy.array(points.GetPoint(ptIds.GetId(0)))
numPts = ptIds.GetNumberOfIds()
for i in range(1, numPts - 1):
p1 = numpy.array(points.GetPoint(ptIds.GetId(i )))
p2 = numpy.array(points.GetPoint(ptIds.GetId(i + 1)))
areaVec += numpy.cross(p1 - p0, p2 - p0)
return areaVec
def getPolygonArea(self, ptIds):
"""
Compute the (scalar) area of a polygon
@param ptIds list of point indices
@return area
"""
areaVec = self.getPolygonVectorArea(ptIds)
return numpy.linalg.norm(areaVec)
def getPolygonNormalVector(self, ptIds):
"""
Compute the normal vector of the polygon
@param ptIds list of point indices
@return area
"""
areaVec = self.getPolygonVectorArea(ptIds)
area = numpy.linalg.norm(areaVec)
return areaVec / area
def getTotalAngle(self, ptId):
"""
Compute the sum of the angles between this point and the surrounding edges
@param ptId point Id
@return total angle in radiants, should be about 2*pi for an internal point
"""
cellIds = vtk.vtkIdList()
# get the cell Ids sharing this point
ptIds = vtk.vtkIdList()
self.polydata.GetPointCells(ptId, cellIds)
pt0 = numpy.zeros((3,), numpy.float64)
edge1 = numpy.zeros((3,), numpy.float64)
edge2 = numpy.zeros((3,), numpy.float64)
points = self.polydata.GetPoints()
# get the point coorindates of ptId
points.GetPoint(ptId, pt0)
# iterate over the cells sharing ptId
angle = 0.
for iCell in range(cellIds.GetNumberOfIds()):
# get the points of this cell
self.polydata.GetCellPoints(cellIds.GetId(iCell), ptIds)
# find the two edges incident to ptId
n = ptIds.GetNumberOfIds()
for j in range(n):
p1 = ptIds.GetId(j)
p2 = ptIds.GetId((j + 1) % n)
if p1 == ptId:
self.polydata.GetPoints().GetPoint(p2, edge1)
elif p2 == ptId:
self.polydata.GetPoints().GetPoint(p1, edge2)
# the two edges
edge1 -= pt0
edge2 -= pt0
# add the angle between pt1 and pt2
crossProduct = numpy.linalg.norm(numpy.cross(edge1, edge2))
dotProduct = numpy.dot(edge1, edge2)
angle += math.atan2(crossProduct, dotProduct)
return angle
def collapsePolygon(self, cellId):
"""
Collapse the vertices of a cell
@param cellId Id of the polygon
@param return list of points that have been moved
"""
# points of the cell
ptIds = vtk.vtkIdList()
self.polydata.GetCellPoints(cellId, ptIds)
npts = ptIds.GetNumberOfIds()
# typically the center of the cell, in some
# cases the center of an edge
center = numpy.zeros( (3,), numpy.float64 )
# coordinates of the poinrt
pt = numpy.zeros( (npts,), numpy.float64 )
points = self.polydata.GetPoints()
pointsToMove = []
# determine which points are internal/boundary
internalPointIds = []
boundaryPointIds = []
for i in range(npts):
ptId = ptIds.GetId(i)
totalAngle = self.getTotalAngle(ptId)
if abs(totalAngle - self.TWOPI) < 1.e-6: #0.01:
internalPointIds.append(ptId)
else:
boundaryPointIds.append(ptId)
# compute the (central) point where the cell collapses to
numBoundaryPoints = len(boundaryPointIds)
if numBoundaryPoints == 0:
# all points are internal, average the (internal) points
for ptId in internalPointIds:
points.GetPoint(ptId, pt)
center += pt
center /= float(len(internalPointIds))
pointsToMove = internalPointIds
#elif numBoundaryPoints > 1 and numBoundaryPoints < npts:
elif numBoundaryPoints > npts and numBoundaryPoints < npts: # not working well so turning off for the time being
# average the boundary points and move all points to this position
for ptId in boundaryPointIds:
points.GetPoint(ptId, pt)
center += pt
center /= float(numBoundaryPoints)
pointsToMove = internalPointIds + boundaryPointIds
else:
# likely all the points are boundary or there are more than two boundary
# edges, nothing to do
pass
# move the selected points to the center
for ptId in pointsToMove:
points.SetPoint(ptId, center)
# store in memory which point ids are degenerate
for ptId in pointsToMove[1:]:
self.degeneratePtIdMap[ptId] = pointsToMove[0]
# average the nodal data at the new vertex location
self.averagePointData(pointsToMove)
return pointsToMove
def coarsen(self, min_cell_area = 1.e-10):
"""
Coarsen surface by collapsing small polygons
@param min_cell_area cell area tolerance
@note operation is in place
"""
polys = self.polydata.GetPolys()
numPolys = polys.GetNumberOfCells()
ptIds = vtk.vtkIdList()
polys.InitTraversal()
for cellId in range(numPolys):
polys.GetNextCell(ptIds)
area = abs(self.getPolygonArea(ptIds))
if area < min_cell_area and area > self.EPS:
# collapse the cell and get the point Ids that have moved
movedPointIds = self.collapsePolygon(cellId)
# clean up the connectivity by removing degenerate point Ids
self.removeDegeneratePointsInConnectivity(movedPointIds)
self.deleteZeroPolys()
def removeDegeneratePointsInConnectivity(self, movedPointIds):
"""
Remove the degenerate point Ids from the connectivity
@param movedPointIds list of point Ids that have moved
"""
neighCellIds = vtk.vtkIdList()
neighCellPointIds = vtk.vtkIdList()
if len(movedPointIds) > 1:
# all other point Ids will be replaced by newPointId
newPointId = movedPointIds[0]
for oldPointId in movedPointIds[1:]:
# get all the cells that contain oldPointId
self.polydata.GetPointCells(oldPointId, neighCellIds)
numNeighCells = neighCellIds.GetNumberOfIds()
for neighCellId in range(numNeighCells):
# extract the ptIds of that cell, requires BuildCells
# to be called
self.polydata.GetCellPoints(neighCellId, neighCellPointIds)
npts = neighCellPointIds.GetNumberOfIds()
for i in range(npts):
pi = neighCellPointIds.GetId(i)
if pi == oldPointId:
# replace point Id
self.polydata.ReplaceCellPoint(neighCellId, oldPointId, newPointId)
def deleteZeroPolys(self):
"""
Delete all the polygons whose areas are (nearly) zero
"""
ptIds = vtk.vtkIdList()
numPolys = self.polydata.GetPolys().GetNumberOfCells()
for polyId in range(numPolys):
self.polydata.GetCellPoints(polyId, ptIds)
if abs(self.getPolygonArea(ptIds)) <= self.EPS:
self.polydata.DeleteCell(polyId)
self.polydata.RemoveDeletedCells()
self.polydata.BuildLinks() # not sure if this is required
self.polydata.BuildCells()
def averagePointData(self, pids):
"""
Average the field at the point locations and set the nodal field values
to the average value
@param pids list of point Ids
"""
numPts = len(pids)
if numPts == 0: return
for el in range(self.numPointData):
arr = self.pointData.GetArray(el)
numComps = arr.GetNumberOfComponents()
vals = numpy.zeros((numComps,), numpy.float64)
baryVals = numpy.zeros((numComps,), numpy.float64)
# mid cell values
for i in range(numPts):
ptId = pids[i]
vals[:] = arr.GetTuple(ptId)
baryVals += vals
baryVals /= float(numPts)
# set the field values to the mid cell values
for j in range(numPts):
arr.SetTuple(pids[j], baryVals)
##############################################################################
def printVtkPolyData(pdata):
points = pdata.GetPoints()
polys = pdata.GetPolys()
ptIds = vtk.vtkIdList()
numPolys = polys.GetNumberOfCells()
print('Number of polygons: {0}'.format(numPolys))
polys.InitTraversal()
for i in range(numPolys):
numPts = ptIds.GetNumberOfIds()
print('\tCell {0} has {1} points: '.format(i, numPts))
for j in range(numPts):
ptId = ptIds.GetId(j)
pt = points.GetPoint(ptId)
print('\t\t{0} -> {1}'.format(ptId, pt))
def testNoCoarsening():
points = vtk.vtkPoints()
points.InsertNextPoint((0., 0., 0.))
points.InsertNextPoint((1., 0., 0.))
points.InsertNextPoint((1., 1., 0.))
pdata = vtk.vtkPolyData()
pdata.SetPoints(points)
ptIds = vtk.vtkIdList()
ptIds.InsertNextId(0)
ptIds.InsertNextId(1)
ptIds.InsertNextId(2)
pdata.Allocate(1, 1)
pdata.InsertNextCell(vtk.VTK_POLYGON, ptIds)
rs = CoarsenSurface(pdata)
rs.coarsen(min_cell_area=1.e-5)
pdata2 = rs.getVtkPolyData()
assert(pdata2.GetNumberOfPolys() == 1)
def testAddingThreePointsThenMore():
points = vtk.vtkPoints()
points.InsertNextPoint((0., 0., 0.))
points.InsertNextPoint((2., 0., 0.))
points.InsertNextPoint((2., 1., 0.))
pdata = vtk.vtkPolyData()
pdata.SetPoints(points)
ptIds = vtk.vtkIdList()
ptIds.InsertNextId(0)
ptIds.InsertNextId(1)
ptIds.InsertNextId(2)
pdata.Allocate(1, 1)
pdata.InsertNextCell(vtk.VTK_POLYGON, ptIds)
rs = CoarsenSurface(pdata)
rs.coarsen(min_cell_area=1.e-5)
assert(rs.getVtkPolyData().GetNumberOfPolys() == 1)
def testStartingWithTwoCells():
points = vtk.vtkPoints()
points.InsertNextPoint((0., 0., 0.))
points.InsertNextPoint((2., 0., 0.))
points.InsertNextPoint((2., 1., 0.))
points.InsertNextPoint((0., 1., 0.))
pdata = vtk.vtkPolyData()
pdata.SetPoints(points)
pdata.Allocate(2, 1)
ptIds = vtk.vtkIdList()
ptIds.SetNumberOfIds(3)
ptIds.SetId(0, 0)
ptIds.SetId(1, 1)
ptIds.SetId(2, 2)
pdata.InsertNextCell(vtk.VTK_POLYGON, ptIds)
ptIds.SetId(0, 2)
ptIds.SetId(1, 3)
ptIds.SetId(2, 0)
pdata.InsertNextCell(vtk.VTK_POLYGON, ptIds)
rs = CoarsenSurface(pdata)
rs.coarsen(min_cell_area=1.e-5)
assert(rs.getVtkPolyData().GetNumberOfPolys() == 2)
if __name__ == '__main__':
testNoCoarsening()
testAddingThreePointsThenMore()
testStartingWithTwoCells()
| gregvonkuster/icqsol | shapes/icqCoarsenSurface.py | Python | mit | 12,617 | [
"VTK"
] | 98b6b9fc86d6873e18e8a59d22efc006a20e6781674278de6be46ac43c588181 |
"""
Script for an easy installation/compilation of aRMSD from command line
License: MIT
(c) 2016 by Arne Wagner
*Recent changes:
- Installer writes PyInstaller .spec file on-the-fly
- Added support for openbabel (writes hook if none exists)
- PyInstaller imports/excludes can be modified in the functions
- Added optional openbabel flag for compilation
"""
from __future__ import print_function
from distutils.sysconfig import get_python_lib
import os
import sys
import shutil
import subprocess
import shlex
from codecs import open
import time
inst_version = '1.4' # Version of the installer
def pyinstaller_data(name, platform, obf_files):
""" Sets up a PyInstaller dictionary with all parameters that will be written to the .spec file """
# Excludes and hidden imports
#data_excl = ['_ssl', '_hashlib', 'PySide', '_gtkagg', '_tkagg', '_wxagg', '_qt5agg',
# 'bsddb', 'curses', 'pywin.debugger', 'pywin.debugger.dbgcon',
# 'pywin.dialogs', 'tcl', 'Tkconstants', 'Tkinter', 'wx', '_Qt5Agg', '_webagg']
data_excl = []
hiddenimp = ['matplotlib', 'vtk', 'uncertainties']
if obf_files is not None: # Add pybel to hiddenimports
hiddenimp.append('openbabel')
# Extra data and binaries for PyInstaller
ext_bin = []
ext_dat = []
# Setup dictionary and return it
pyinst_dict = {'name': name, 'platform': platform, 'hiddenimports': hiddenimp, 'data_excludes': data_excl,
'binaries': ext_bin, 'extra_datas': ext_dat}
return pyinst_dict
def analyze_arguments(arguments):
""" Checks given arguments and passes correct ones to the compilation script """
accepted_arg_prefix = ['--use_openbabel', '--use_cython', '--cython_compiler', '--overwrite']
def _split(arg):
pos = arg.find('=')
prefix = arg[:pos]
suffix = arg[pos+1:]
return (None, None) if prefix not in accepted_arg_prefix else (prefix, suffix)
# Default compiler arguments
use_openbabel = False
use_cython = False
cython_compiler = 'msvc'
overwrite = False
if len(arguments) != 0: # Arguments are given
for entry in arguments:
data = _split(entry)
if data[0] == '--use_cython':
use_cython = data[1]
elif data[0] == '--cython_compiler':
cython_compiler = data[1]
elif data[0] == '--use_openbabel':
use_openbabel = data[1]
elif data[0] == '--overwrite':
overwrite = data[1]
return use_openbabel, use_cython, cython_compiler, overwrite
def check_for_ext(pyx_file_path, ext, default):
""" Checks if a .pyx/.pyd file exists and returns the extension """
return ext if os.path.isfile(pyx_file_path+ext) else default
def check_for_pyd_so(file_path):
""" Checks if a file with .pyd or .so extension exists """
return True if os.path.isfile(file_path+'.pyd') or os.path.isfile(file_path+'.so') else False
def get_current_version(armsd_dir):
""" Returns the name of the executable (this part is copied from the spec file) """
# Determine the version of aRMSD and append it to the file name
contents = open(armsd_dir+'\\aRMSD.py').readlines()
for index, line in enumerate(contents):
if '__aRMSD_version__' in line and len(line.split()) == 3:
version = eval(line.split()[-1])
if 'is_compiled' in line and len(line.split()) > 7: # Let the program now that it is compiled
contents[index].replace('False', 'True')
# Setup platform and program name
platform, name = '', 'aRMSD'
# Determine platform and architecture
# First: Operating system
if sys.platform == 'win32':
platform = 'Win'
elif sys.platform == 'darwin':
platform = 'Mac'
elif sys.platform == 'linux2':
platform = 'Lin'
else:
platform = 'Os'
# Second: 32 or 63 bit
if sys.maxsize > 2 ** 32:
platform += '64'
else:
platform += '32'
name += '_{}_{}'.format(version, platform)
return name, platform
def has_module(mod, site_packages_path):
""" Checks for a module folder in the site pacakges path """
return os.path.isdir(site_packages_path+'\\'+mod)
def copy_obfiles(build_dir, site_packages_path):
""" Copies .obf files to build folder """
# List of .obf files (file format support), add or remove obf files accordingly
obf_files = ['formats_cairo.obf', 'formats_common.obf', 'formats_compchem.obf',
'formats_misc.obf', 'formats_utility.obf', 'formats_xml.obf']
babel_dir = site_packages_path+'\\openbabel' # Path of the .obf files
# Copy the files from the openbabel path to the build directory, return the files names
[shutil.copyfile(babel_dir+'\\'+entry, build_dir+'\\'+entry) for entry in obf_files]
return obf_files
def write_ob_hook(site_packages_path, overwrite):
""" Writes a working pyinstaller hook for openbabel if there is none """
hook_path = site_packages_path+'\\PyInstaller\\hooks' # Path of the PyInstaller hooks
babel_data = site_packages_path+'\\openbabel\\data' # Path of the openbabel data files
if not os.path.isfile(hook_path+'\\hook-openbabel.py') or overwrite: # Don't overwrite files
data_files = os.listdir(babel_data) # All files in the directory
# If these files are not included openbabel will give a warning and fall back to the internal data
dat_names = ['space-groups', 'element', 'types', 'resdata', 'bondtyp', 'aromatic', 'atomtyp']
datas = []
for entry in range(len(data_files)):
if data_files[entry].split('.')[0] in dat_names: # If a file in dat_names is found, add it to datas
datas.append((site_packages_path+'\\openbabel\\data\\'+data_files[entry], 'openbabel\\data'))
os.chdir(hook_path)
# Write hook file
with open('hook-openbabel.py', 'w') as outfile:
outfile.write("""
# This hook has been created by aRMSD
# It may not work for the compilation of other executables
from PyInstaller.utils.hooks import collect_dynamic_libs
binaries = collect_dynamic_libs('openbabel')
datas """+"= "+str(datas))
print('>> An openbabel hook for PyInstaller has been created!')
outfile.close()
else:
print('>> A preexisting openbabel hook was found and will be used!')
def write_spec_file(build_dir, pyinst_dict, obf_files):
""" Writes a .spec file for PyInstaller """
def _write_obf(obf_files, build_dir):
return_string = 'a.binaries'
if obf_files is not None:
if len(obf_files) == 1: # Only one .obf files
return_string += " + [('"+obf_files[0]+"', "+repr(build_dir+'\\'+obf_files[0])+", 'BINARY')],"
else:
for entry in range(len(obf_files) - 1):
return_string += " + [('"+obf_files[entry]+"', "+repr(build_dir+'\\'+obf_files[entry])+", 'BINARY')]"
return_string += " + [('"+obf_files[-1]+"', "+repr(build_dir+'\\'+obf_files[-1])+", 'BINARY')],"
else:
return_string += ','
return return_string
os.chdir(build_dir) # Change to build directory and create a new file
spec_file = 'aRMSD.spec'
obf_str = _write_obf(obf_files, build_dir) # Write additional binary string for .spec file
# Write temporary setup file
with open(spec_file, 'w') as outfile:
outfile.write("""
# Automatically created aRMSD 'spec' file for a PyInstaller based compilation
# This file deletes itself after the installation.
# Authors: Arne Wagner
# License: MIT
block_cipher = None
import os
folder = os.getcwd() # Get current working directory
binaries = """+str(pyinst_dict['binaries'])+"""
extra_datas = """+str(pyinst_dict['extra_datas'])+"""
exclude_datas = """+str(pyinst_dict['data_excludes'])+"\n\n"+"""hiddenimports = """+str(pyinst_dict['hiddenimports'])+"""
a = Analysis(['aRMSD.py'],
pathex = [folder],
binaries = binaries,
datas = extra_datas,
hiddenimports = hiddenimports,
hookspath = [],
runtime_hooks = [],
excludes = [],
win_no_prefer_redirects = False,
win_private_assemblies = False,
cipher = block_cipher)
# Setup platform and program name """+"\n"+"platform, name = '"+pyinst_dict['platform']+"', '"+pyinst_dict['name']+"'\n")
outfile.write("""
# Exclude some binaries
#a.binaries = [x for x in a.binaries if not x[0].startswith("zmq")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("IPython")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("docutils")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("pytz")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("wx")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("libQtWebKit")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("libQtDesigner")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("PySide")]
#a.binaries = [x for x in a.binaries if not x[0].startswith("libtk")]
# Exclude selected data
for exclude_data in exclude_datas:
a.datas = [x for x in a.datas if exclude_data not in x[0]]
# Setup pyz
pyz = PYZ(a.pure, a.zipped_data,
cipher = block_cipher)
exe = EXE(pyz,
a.scripts,\n """+_write_obf(obf_files, build_dir)+"""
a.zipfiles,
a.datas,
name = name,
debug = False,
strip = False,
upx = True,
console = True,
icon = folder+"""+r"'\\aRMSD_icon.ico')"+"""
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip = False,
upx = True,
name = name)
""")
outfile.close()
return spec_file
def package_cython_modules(build_dir, list_of_files, cython_compiler):
""" Compiles .pyx/.py files to .pyd/.so files inplace """
os.chdir(build_dir) # Change to build directory and create a new file
setup_file = 'cythonize_modules.py'
# Write temporary setup file
with open(setup_file, 'w') as outfile:
outfile.write("""
from setuptools import setup
from setuptools import Extension
from Cython.Build import cythonize
setup(name = 'acore', ext_modules = cythonize('"""+list_of_files[0]+"""'),)
setup(name = 'aplot', ext_modules = cythonize('"""+list_of_files[1]+"""'),)
setup(name = 'alog', ext_modules = cythonize('"""+list_of_files[2]+"""'),)
""")
t0 = time.clock() # Start time
# Cythonize modules and compile them to .pyd/.so libraries
subprocess.call(r'python {setup} build_ext --inplace --compiler={cython_compiler}'.format(setup=setup_file,
cython_compiler=cython_compiler))
t1 = time.clock() # End time
print('\n>> Modules were successfully compiled by Cython!')
print('Compilation time: '+str(round((t1 - t0) / 60.0, 1))+' min')
def run_compilation(use_openbabel, use_cython, cython_compiler, overwrite):
""" Runs the pyinstaller compilation with the given flags for cython and the c compiler """
print('\n *** This the official installer for aRMSD (Installer version: '+inst_version+') ***')
print('==============================================================================')
print('It will create a standalone executable using PyInstaller.')
print('\nNote: The compilation process will take some time (see below),')
print(' open/close additional windows and create temporary files.')
if not use_cython: # Estimates the compilation time based on tests - but may be different on other machines
print('\n\t -- Estimated total compilation time: 30 min --')
else:
print('\n\t --Estimated total compilation time: 35 min --')
print('------------------------------------------------------------------------------')
print('Info: You can customize the build by adjusting the')
print(' aRMSD.spec and compile_aRMSD.py files')
print('------------------------------------------------------------------------------')
# Determine site packages path
site_packages_path = get_python_lib()
# Check for PyInstaller and openbabel
has_pyinst = has_module('pyinstaller', site_packages_path)
has_obabel = has_module('openbabel', site_packages_path)
obf_files = None # Will be checked and updated if .obf files are copyied
if has_pyinst:
# Names of the build folder and the core modules (without extensions)
build_folder_name = 'build'
name_core, name_log, name_plot = 'acore', 'alog', 'aplot'
basic_dir = os.getcwd() # Determine the initial working directory
armsd_dir = basic_dir+'\\armsd' # aRMSD folder in the working directory
build_dir = basic_dir+'\\'+build_folder_name # Build folder directory
# Check for pre-compiled .pyd files
comp_core = check_for_pyd_so(armsd_dir + '\\' + name_core)
comp_plot = check_for_pyd_so(armsd_dir + '\\' + name_plot)
comp_log = check_for_pyd_so(armsd_dir + '\\' + name_log)
if True in [comp_core, comp_plot, comp_log]: # If a single pre-compiled module exists, don't compile
print('\n>> Pre-compiled modules found...')
use_cython = False
# Check if .pyx files of the three modules exist
ext_core = check_for_ext(armsd_dir+'\\'+name_core, '.pyx', '.py')
ext_plot = check_for_ext(armsd_dir+'\\'+name_plot, '.pyx', '.py')
ext_log = check_for_ext(armsd_dir+'\\'+name_log, '.pyx', '.py')
print('\n>> Installer was called as...')
print('\npython compile_aRMSD.py --use_openbabel='+str(use_openbabel)+' --use_cython='+str(use_cython)+
' --cython_compiler='+str(cython_compiler)+' --overwrite='+str(overwrite))
print('\n>> Creating temporary directory... '+build_folder_name)
if os.path.isdir(build_folder_name): # Remove build folder if it exists
shutil.rmtree(build_dir)
print('\n>> Build directory already exists... it will be removed!')
os.makedirs(build_folder_name) # Make temporary build directory
os.chdir(build_folder_name) # Change to build folder
# Copy core files to build directory
shutil.copyfile(armsd_dir+'\\'+name_core+ext_core, build_dir+'\\'+name_core+ext_core)
shutil.copyfile(armsd_dir+'\\'+name_plot+ext_plot, build_dir+'\\'+name_plot+ext_plot)
shutil.copyfile(armsd_dir+'\\'+name_log+ext_log, build_dir+'\\'+name_log+ext_log)
if overwrite:
print('\n>> INFO: All existing files (hooks, etc.) will be overwritten')
if use_openbabel and has_obabel: # Copy obenbabel files
print('\n>> Copying openbabel files...')
obf_files = copy_obfiles(build_dir, site_packages_path)
write_ob_hook(site_packages_path, overwrite)
elif use_openbabel and not has_obabel:
print('\n>> ERROR: Openbabel was not found on your system, will continue without it!')
else:
print('\n>> INFO: Openbabel will not be used!')
print('\n>> Copying core modules...')
print('\t... '+name_core+ext_core)
print('\t... '+name_plot+ext_plot)
print('\t... '+name_log+ext_log)
if use_cython: # Cythonize pyx files or py files
print('\n>> Attempting to use Cython in the compilation')
try:
# Import required modules
from setuptools import setup
from setuptools import Extension
from Cython.Build import cythonize
print('\n>> Cython and setuptools found, starting compilation...')
will_use_cython = True
except ImportError: # Something went wrong, most likely no Cython installation
print('\n>> ERROR: Will continue without cythonization!')
will_use_cython = False
if will_use_cython:
print('\npython cythonize_modules.py build_ext --inplace --compiler='+cython_compiler)
# Combine modules in list and compile to libraries
sourcefiles = [name_core+ext_core, name_plot+ext_plot, name_log+ext_log]
package_cython_modules(build_dir, sourcefiles, cython_compiler)
# Remove .pyx/.py and .c files - the program will be automatically compiled with the cythonized files
os.remove(name_core+ext_core)
os.remove(name_core+'.c')
os.remove(name_plot+ext_plot)
os.remove(name_plot+'.c')
os.remove(name_log+ext_log)
os.remove(name_log+'.c')
else:
print('\n>> INFO: Cython will not be used!')
print('\n>> Copying main program files...')
# Gets the file name of the created executable
file_name_dir, platform = get_current_version(armsd_dir)
# Copy main file and icon to build directory
shutil.copyfile(armsd_dir+'\\aRMSD.py', build_dir+'\\aRMSD.py')
shutil.copyfile(basic_dir+'\\aRMSD_icon.ico', build_dir+'\\aRMSD_icon.ico')
# Load PyInstaller information (modules can be adjusted in the respective function)
pyinst_dict = pyinstaller_data(file_name_dir, sys.platform, obf_files)
# Write .spec file for compilation
spec_file = write_spec_file(build_dir, pyinst_dict, obf_files)
pyinstaller_cmd = 'pyinstaller --onefile '+spec_file
print('\n>> Calling PyInstaller...')
print('\n'+build_dir+'> '+pyinstaller_cmd)
t0 = time.clock() # Start time
# Compile files with PyInstaller - this should work on every system
pyinstaller_args = shlex.split(pyinstaller_cmd+' '+spec_file)
subprocess.call(pyinstaller_args)
t1 = time.clock() # End time
# Copy executable to 'armsd' folder and delete all temporary files
os.chdir(basic_dir)
shutil.rmtree(build_dir+'\\dist\\'+file_name_dir)
prg_file_name = os.listdir(build_dir+'\\dist')[0] # List file (only one should be there) in distribution directory
shutil.copyfile(build_dir+'\\dist\\'+prg_file_name, armsd_dir+'\\'+prg_file_name)
shutil.rmtree(build_dir)
# Echo successful creation, print compilation time
print('Executable -- '+prg_file_name+' -- was created successfully!')
print('Compilation time: '+str(round((t1 - t0) / 60.0, 1))+' min')
print('Cleaning up files and directories')
print('\nClean up complete, executable has been moved to:\n'+armsd_dir)
print('\n>> Compilation complete!')
print('\n-----------------------------------------------------------------------------')
print('In order to use the executable, copy...')
print('settings.cfg, the xsf folder and '+prg_file_name)
print('to any directory of your choice and start the program.')
print('It is recommended to call aRMSD from command line')
print('e.g. Path\\to\\exe> '+prg_file_name)
print('to catch potential errors. The start of the program may take a few seconds!')
else:
print('\n>> ERROR: PyInstaller was not found, install the package and run again!')
print('--> from command line: pip install pyinstaller')
if __name__ == '__main__': # Run the program
arguments = sys.argv[1:] # Get arguments
use_openbabel, use_cython, cython_compiler, overwrite = analyze_arguments(arguments) # Check arguments and set variables
run_compilation(use_openbabel, use_cython, cython_compiler, overwrite)
| armsd/aRMSD | compile_aRMSD.py | Python | mit | 20,550 | [
"Pybel",
"VTK"
] | df3fc877cee394075c3b7bbeaad0e8bda950372f492e60dab10afd712ed9e72d |
# test_corrector.py
from malaprop.correction.corrector import *
from recluse.nltk_based_segmenter_tokeniser import *
from malaprop.correction.HMM import *
from DamerauLevenshteinDerivor.cderivor import Derivor
from BackOffTrigramModel.BackOffTrigramModelPipe import BackOffTMPipe
import unittest, StringIO, subprocess
class MatchCaseTest(unittest.TestCase):
def test_match_case(self):
result = match_case('This', 'that')
self.assertEqual(result, 'That'), result
result = match_case('this', 'that')
self.assertEqual(result, 'that'), result
result = match_case('THIS', 'that')
self.assertEqual(result, 'THAT'), result
result = match_case('MacGregor', 'macgregor')
self.assertEqual(result, 'MacGregor'), result
result = match_case('MacGregor', 'macdregor')
self.assertEqual(result, 'MacDregor'), result
result = match_case('McGregor', 'macgregor')
self.assertEqual(result, 'MacGregor'), result
result = match_case('MacGregor', 'mcgregor')
self.assertEqual(result, 'McGregor'), result
result = match_case('OrC', 'or')
self.assertEqual(result, 'Or'), result
result = match_case('OrC', 'orca')
self.assertEqual(result, 'OrCa'), result
class CorrectorTest(unittest.TestCase):
def setUp(self):
training_text_file = open('malaprop/test/data/segmenter_training', 'r')
segmenter_tokeniser = NLTKBasedSegmenterTokeniser(training_text_file)
path_to_botmp = subprocess.check_output(['which', 'BackOffTrigramModelPipe']).strip()
arpa_file_name = 'malaprop/test/data/trigram_model_2K.arpa'
botmp = BackOffTMPipe(path_to_botmp, arpa_file_name)
error_rate = 0.3
d = Derivor('malaprop/test/data/1K_test_real_word_vocab')
hmm = HMM(d.variations, botmp, error_rate, 2)
self.c = Corrector(segmenter_tokeniser, hmm)
def test_correct(self):
# Regression tests: these results are consistent with the
# probabilities of their input, but their passing is not a
# guarantee of correctness.
sentence = 'It is therefore a more specific from of the term reflectivity.'
result = self.c.correct(sentence)
expected_result = [[6, 0, 'from', 'form'], [9, 0, 'term', 'team']]
self.assertListEqual(result, expected_result), result
result = self.c.correct(sentence, output='sentence')
expected_result = 'It is therefore a more specific form of the team reflectivity.'
self.assertEqual(result, expected_result), result
sentence = 'Most land areas are in in albedo range of 0.1 to 0.4.'
result = self.c.correct(sentence)
expected_result = [[2,0, 'areas', 'area'], [4,0, 'in', 'win']]
self.assertListEqual(result, expected_result), result
result = self.c.correct(sentence, output='sentence')
expected_result = 'Most land area are win in albedo range of 0.1 to 0.4.'
self.assertEqual(result, expected_result), result
if __name__ == '__main__':
unittest.main()
| ambimorph/malaprop | malaprop/test/test_corrector.py | Python | agpl-3.0 | 3,109 | [
"ORCA"
] | 9a8ca44bb8ac0c78cfc1a15b5af7e8a971f5e3cfd3aab2a41374a19350d9bd9a |
from noaaclass import noaaclass
import itertools
import sys
import os
from urllib import urlretrieve
import threading
from Queue import Queue
from datetime import datetime
from goescalibration import instrument as calibrator
from netcdf import netcdf as nc
import logging
import logging.handlers
logger = logging.getLogger('goesdownloader')
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
"log_goesdownloader.out", maxBytes=20, backupCount=5)
logger.addHandler(handler)
def calculate_destiny(url, destfolder):
name = calibrator.short(url, 1, None)
dest = os.path.join(destfolder, name)
return dest
class DownloadThread(threading.Thread):
def __init__(self, queue, destfolder):
super(DownloadThread, self).__init__()
self.queue = queue
self.destfolder = destfolder
self.daemon = True
def run(self):
while True:
url = self.queue.get()
try:
self.download_url(url)
except Exception, e:
logger.error(" Error: %s" % e)
self.queue.task_done()
def download_url(self, url):
# change it to a different format if you require
ftp, http = url
dest = calculate_destiny(http, self.destfolder)
msg = "[%s] %s %s -> %s"
logger.info(msg % ("Downloading", self.ident, ftp, dest))
try:
# Try ftp...
urlretrieve(ftp, dest)
except Exception:
logger.info(msg % ("Alternative downloading", self.ident,
http, dest))
# Try http...
urlretrieve(http, dest)
calibrator.calibrate(dest)
class DownloadManager(object):
def __init__(self, destfolder, numthreads=4):
self.queue = Queue()
self.downloaders = map(lambda i: DownloadThread(self.queue, destfolder),
range(numthreads))
def start(self):
for d in self.downloaders:
d.start()
def join(self):
self.queue.join()
def localsize(localfile):
with open(localfile, 'rb') as f:
size = len(f.read())
return size
def only_incompleted(url, destfolder):
dest = calculate_destiny(url, destfolder)
completed = False
if os.path.exists(dest):
try:
with nc.loader(dest) as root:
nc.getvar(root, 'data')
nc.getvar(root, 'lat')
nc.getvar(root, 'lon')
completed = True
except (OSError, IOError, Exception):
logger.error("The file %s was broken." % dest)
return not completed
def download(username, password, folder, suscription_id=None, name=None,
datetime_filter=None):
noaa = noaaclass.connect(username, password)
manager = DownloadManager(folder)
suscriptions = noaa.subscribe.gvar_img.get(async=True, hours = 2,
append_files=True)
compare = (lambda sus: sus['id'] == suscription_id if suscription_id else
sus['name'] == name)
suscription = filter(lambda s: compare(s), suscriptions)[0]
orders = filter(lambda o: o['status'] == 'ready', suscription['orders'])
http_files = map(lambda o: o['files']['http'], orders)
ftp_files = map(lambda o: o['files']['ftp'], orders)
files = zip(itertools.chain(*ftp_files), itertools.chain(*http_files))
urls = filter(lambda filename: filename[0][-3:] == '.nc',
files)
if datetime_filter:
get_datetime = lambda f: datetime.strptime(calibrator.short(f),
"%Y.%j.%H%M%S")
urls = filter(lambda f: datetime_filter(get_datetime(f[0])), urls)
urls = filter(lambda u: only_incompleted(u[0], folder), urls)
map(manager.queue.put, urls)
manager.start()
manager.join()
downloaded = map(lambda u: "%s/%s" % (folder, calibrator.short(u[1], 1, None)),
urls)
return downloaded
| gersolar/goesdownloader | goesdownloader/instrument.py | Python | mit | 4,027 | [
"NetCDF"
] | 76db1255355205ad58466e6f31ca7037c0a5168b00821a9eed2d8e627e2733cb |
import unittest
import time
import os, sys, numpy, pysam
from margin.marginCallerLib import vcfRead
from margin.utils import ReadAlignmentStats, pathToBaseNanoporeDir, getFastaDictionary
from cPecan.cPecanEm import Hmm
from sonLib.bioio import system, parseSuiteTestOptions, logger, getBasicOptionParser
import vcf
import numpy
"""Basic system level tests for marginAlign, marginCaller and marginStats scripts.
"""
longTests = False
class TestCase(unittest.TestCase):
def setUp(self):
self.marginAlign = self.getFile("marginAlign") #Path to marginAlign binary
self.marginCaller = self.getFile("marginCaller") #Path to marginCall binary
self.marginStats = self.getFile("marginStats") #Path to marginStats binary
self.modifyHmm = self.getFile("scripts/modifyHmm")
#The following are files used
self.readFastqFile1 = self.getFile("tests/reads.fq") if longTests else \
self.getFile("tests/lessReads.fq")
self.referenceFastaFile1 = self.getFile("tests/references.fa")
self.outputSamFile = self.getFile("tests/test.sam")
self.inputHmmFile = self.getFile("tests/input.hmm")
self.outputHmmFile = self.getFile("tests/output.hmm")
self.inputSamFile1 = self.getFile("tests/input.sam")
self.outputVcfFile = self.getFile("tests/output.vcf")
self.jobTree = self.getFile("tests/testJobTree")
#For testing margin caller
self.mutationsFile = self.getFile("tests/mutations.txt")
self.mutatedReferenceFastaFile = self.getFile("tests/referencesMutated.fa")
self.inputSamFileForMutatedReferenceFile = self.getFile("tests/inputBigMutations.sam") #This is aligned against the mutated reference
self.inputSamFileForMutatedReferenceFileLast = self.getFile("tests/inputBigMutationsLast.sam")
self.inputSamFileForMutatedReferenceFileBwa = self.getFile("tests/inputBigMutationsBwa.sam")
self.inputSamFileForMutatedReferenceFileGraphMap = self.getFile("tests/inputBigMutationsBwa.sam")
self.readFastqFile2 = self.getFile("tests/reads.fq")
unittest.TestCase.setUp(self)
def getFile(self, file):
return os.path.join(pathToBaseNanoporeDir(), file)
def tearDown(self):
unittest.TestCase.tearDown(self)
# Clean up
system("rm -rf %s %s %s %s" % (self.outputSamFile, self.outputHmmFile,
self.outputVcfFile, self.jobTree))
def validateSam(self, samFile, readFastqFile, referenceFastaFile):
"""Checks if a sam file is valid.
"""
# Check if samfile exists
self.assertTrue(os.path.isfile(samFile))
#The call calculate identity will run a lot of internal consistency checks
#as it calculates the alignment identity.
return ReadAlignmentStats.getReadAlignmentStats(samFile, readFastqFile,
referenceFastaFile, globalAlignment=True)
def validateVcf(self, vcfFile, referenceFastaFile, mutationsFile):
#Load reference sequences
referenceSequences = getFastaDictionary(referenceFastaFile)
#Load mutations
mutations = set(map(lambda x : (x[0], int(x[1])+1, x[2]), \
map(lambda x : x.split(), open(mutationsFile, 'r'))))
#Load VCF mutations
imputedMutations = vcfRead(vcfFile)
#print "Known mutations", sorted(list(mutations))
#print "Imputed mutations", sorted(list(imputedMutations))
#Compare mutation sets
intersectionSize = float(len(mutations.intersection(imputedMutations)))
#Return precision, recall, number of mutations called, number of known mutations
return intersectionSize/len(imputedMutations) if len(imputedMutations) else 0.0, \
intersectionSize/len(mutations) if len(mutations) else 0.0, len(imputedMutations), len(mutations)
def checkHmm(self, hmmFile):
Hmm.loadHmm(hmmFile) #This performs a bunch of internal consistency checks
def runMarginAlign(self, readFastqFile, referenceFastaFile, args=""):
startTime = time.time()
system("\t".join([ self.marginAlign, readFastqFile,
referenceFastaFile, self.outputSamFile, "--jobTree=%s" % self.jobTree, args ]))
runTime = time.time() - startTime
readAlignmentStats = self.validateSam(self.outputSamFile, readFastqFile, referenceFastaFile)
#Get some stats to print
identity = numpy.average(map(lambda rAS : rAS.identity(), readAlignmentStats))
mismatchesPerAlignedBase = numpy.average(map(lambda rAS : rAS.mismatchesPerAlignedBase(), readAlignmentStats))
insertionsPerReadBase = numpy.average(map(lambda rAS : rAS.insertionsPerReadBase(), readAlignmentStats))
deletionsPerReadBase = numpy.average(map(lambda rAS : rAS.deletionsPerReadBase(), readAlignmentStats))
logger.info("Ran marginAlign with args: %s, with reference: %s and reads: %s. \
Got identity: %s, Mismatches per aligned base: %s, Insertions per read base: %s, \
Deletions per read base: %s, Took: %s seconds" % \
(args, readFastqFile, referenceFastaFile, identity,
mismatchesPerAlignedBase, insertionsPerReadBase,
deletionsPerReadBase, runTime))
system("rm -rf %s" % self.jobTree)
###The following functions test marginAlign
def testMarginAlignDefaults(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1)
def testMarginAlignNoChain(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--noChain")
def testMarginAlignNoRealign(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--noRealign")
def testMarginAlignNoRealignNoChain(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--noRealign --noChain")
def testMarginAlignEm(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--em --outputModel %s" % self.outputHmmFile)
self.checkHmm(self.outputHmmFile)
def testMarginAlignEmNoChain(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--noChain --em --outputModel %s" % self.outputHmmFile)
self.checkHmm(self.outputHmmFile)
def testMarginAlignLoadCustomInputModel(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--inputModel %s" % self.inputHmmFile)
def testMarginAlignBwa(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--bwa")
def testMarginAlignBwaNoRealign(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--bwa --noRealign")
def testMarginAlignGraphMap(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--graphmap")
def testMarginAlignGraphMapNoRealign(self):
self.runMarginAlign(self.readFastqFile1, self.referenceFastaFile1, "--graphmap --noRealign")
#The following tests marginCaller
def runMarginCaller(self, samFile, referenceFastaFile, mutationsFile, args=""):
startTime = time.time()
system("\t".join([ self.marginCaller, samFile, referenceFastaFile,
self.outputVcfFile, "--jobTree=%s" % self.jobTree, args ]))
runTime = time.time() - startTime
precision, recall, numberOfCalls, numberOfKnownMutations = self.validateVcf(self.outputVcfFile,
referenceFastaFile, mutationsFile)
logger.info("Ran marginCaller with args: %s, with reference: %s and sam: %s. \
Got: %s precision, Got: %s recall, Number of calls: %s, Number of known mutations: %s,\
Took: %s seconds" % (args, samFile, referenceFastaFile, precision, recall,
numberOfCalls, numberOfKnownMutations, runTime))
system("rm -rf %s" % self.jobTree)
def testMarginCallerDefaults(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFile,
self.mutatedReferenceFastaFile,
self.mutationsFile)
def testMarginCallerNoMargin(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFile,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--noMargin")
def testMarginCallerNoMarginLast(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFileLast,
self.mutatedReferenceFastaFile,
self.mutationsFile)
def testMarginCallerNoMarginBwa(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFileBwa,
self.mutatedReferenceFastaFile,
self.mutationsFile)
def testMarginCallerNoMarginGraphMap(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFileGraphMap,
self.mutatedReferenceFastaFile,
self.mutationsFile)
def testMarginCallerNoMarginLastNoMargin(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFileLast,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--noMargin")
def testMarginCallerNoMarginBwaNoMargin(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFileBwa,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--noMargin")
def testMarginCallerNoMarginGraphMapNoMargin(self):
self.runMarginCaller(self.inputSamFileForMutatedReferenceFileGraphMap,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--noMargin")
#Full integrative test that runs EM to train a model, then uses the resulting
#model and alignment to calculate SNPs
def testMarginAlignAndCallerTogether(self):
if longTests:
#This will run margin-align to get the output-model
self.runMarginAlign(self.readFastqFile2, self.mutatedReferenceFastaFile, \
args="--em --outputModel %s" % self.outputHmmFile)
#Establish the accuracy of the alignment
self.runMarginCaller(self.outputSamFile,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--noMargin")
#Establish the accuracy of the model before modification
self.runMarginCaller(self.outputSamFile,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--alignmentModel=%s --errorModel=%s" %
(self.outputHmmFile, self.outputHmmFile))
#Modify the HMM to make it more tolerant other substitutions / normalise the GC content / simplify emission probs
system("%s %s %s --gcContent=0.5 --substitutionRate=0.2 --setFlatIndelEmissions" % (self.modifyHmm, self.outputHmmFile, self.outputHmmFile))
#Establish the accuracy of the model after modification
self.runMarginCaller(self.outputSamFile,
self.mutatedReferenceFastaFile,
self.mutationsFile, "--alignmentModel=%s --errorModel=%s" %
(self.outputHmmFile, self.outputHmmFile))
#This runs margin stats (just to ensure it runs without falling over)
def testMarginStats(self):
system("%s %s %s %s --identity --mismatchesPerAlignedBase --readCoverage \
--deletionsPerReadBase --insertionsPerReadBase --printValuePerReadAlignment" % \
(self.marginStats, self.inputSamFile1, self.readFastqFile1, self.referenceFastaFile1))
def main():
parser = getBasicOptionParser()
parser.add_option("--longTests", dest="longTests", action="store_true",
help="Run longer, more complete tests (with more reads)",
default=False)
options, args = parseSuiteTestOptions(parser)
global longTests
longTests = options.longTests
sys.argv = sys.argv[:1]
unittest.main()
if __name__ == '__main__':
main() | isovic/marginAlign | tests/tests.py | Python | mit | 12,560 | [
"BWA",
"pysam"
] | 70fc20fe85a7e55887cc4530d281768f58e136f28b82f69c7bb119ae90857925 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
Tests specific functionality of the bands plot.
Different inputs are tested (siesta .bands and sisl Hamiltonian).
"""
from sisl.viz.plots.geometry import GeometryPlot
from sisl.messages import SislWarning
import numpy as np
import pytest
import sisl
from sisl.viz.plots.tests.conftest import _TestPlot
pytestmark = [pytest.mark.viz, pytest.mark.plotly]
def test_cross_product():
cell = np.eye(3) * 2
z_dir = np.array([0, 0, 1])
products = [
["x", "y", z_dir], ["-x", "y", -z_dir], ["-x", "-y", z_dir],
["b", "c", cell[0]], ["c", "b", -cell[0]],
np.eye(3)
]
for v1, v2, result in products:
assert np.all(GeometryPlot._cross_product(v1, v2, cell) == result)
class TestGeometry(_TestPlot):
@pytest.fixture(scope="class", params=["sisl_geom", "ghost_atoms"])
def init_func_and_attrs(self, request):
name = request.param
if name == "sisl_geom":
init_func = sisl.geom.graphene(orthogonal=True).plot
elif name == "ghost_atoms":
init_func = sisl.Geometry([[0, 0, 1], [1, 0, 0]], atoms=[sisl.Atom(6), sisl.Atom(-6)]).plot
attrs = {}
return init_func, attrs
@pytest.fixture(scope="class", params=[None, *sisl.viz.GeometryPlot.get_class_param("backend").options])
def backend(self, request):
return request.param
@pytest.fixture(params=[1, 2, 3])
def ndim(self, request, backend):
if backend == "matplotlib" and request.param == 3:
pytest.skip("Matplotlib 3D representations are not available yet")
return request.param
@pytest.fixture(params=["cartesian", "lattice", "explicit"])
def axes(self, request, ndim):
if request.param == "cartesian":
return {1: "x", 2: "x-y", 3: "xyz"}[ndim]
elif request.param == "lattice":
# We don't test the 3D case because it doesn't work
if ndim == 3:
pytest.skip("3D view doesn't support fractional coordinates")
return {1: "a", 2: "a-b"}[ndim]
elif request.param == "explicit":
if ndim == 3:
pytest.skip("3D view doesn't support explicit directions")
return {
1: [[1, 1, 0]],
2: [[1, 1, 0], [0, 1, 1]],
}[ndim]
@pytest.fixture(params=["Unit cell", "supercell"])
def nsc(self, request):
return {"Unit cell": [1, 1, 1], "supercell": [2, 1, 1]}[request.param]
def _check_all_atomic_props_shape(self, backend_info, na, nsc_val):
na_sc = na*nsc_val[0]*nsc_val[1]*nsc_val[2]
for key, value in backend_info["atoms_props"].items():
if not isinstance(value, np.ndarray):
continue
assert value.shape[0] == na_sc, f"'{key}' doesn't have the appropiate shape"
if key == "xy":
assert value.shape[1] == 2
elif key == "xyz":
assert value.shape[1] == 3
@pytest.mark.parametrize("atoms, na", [([], 0), (0, 1), (None, "na")])
def test_atoms(self, plot, axes, nsc, atoms, na):
plot.update_settings(axes=axes, nsc=nsc, show_bonds=False, show_cell=False, atoms=atoms)
if na == "na":
na = plot.geometry.na
backend_info = plot._for_backend
self._check_all_atomic_props_shape(backend_info, na, nsc)
@pytest.mark.parametrize("show_bonds", [False, True])
def test_toggle_bonds(self, plot, axes, ndim, nsc, show_bonds, test_attrs):
plot.update_settings(axes=axes, nsc=nsc, show_bonds=show_bonds, bind_bonds_to_ats=True, show_cell=False, atoms=[])
assert len(plot._for_backend["bonds_props"]) == 0
plot.update_settings(bind_bonds_to_ats=False)
backend_info = plot._for_backend
bonds_props = backend_info["bonds_props"]
if not test_attrs.get("no_bonds", False):
n_bonds = len(bonds_props)
if show_bonds and ndim > 1:
assert n_bonds > 0
if ndim == 2:
assert bonds_props[0]["xys"].shape == (2, 2)
elif ndim == 3:
assert bonds_props[0]["xyz1"].shape == (3,)
assert bonds_props[0]["xyz2"].shape == (3,)
else:
assert n_bonds == 0
@pytest.mark.parametrize("show_cell", [False, "box", "axes"])
def test_cell(self, plot, axes, show_cell):
plot.update_settings(axes=axes, show_cell=show_cell)
assert plot._for_backend["show_cell"] == show_cell
@pytest.mark.parametrize("show_cell", [False, "box", "axes"])
def test_cell_styles(self, plot, axes, show_cell):
cell_style = {"color": "red", "width": 2, "opacity": 0.6}
plot.update_settings(axes=axes, show_cell=show_cell, cell_style=cell_style)
assert plot._for_backend["cell_style"] == cell_style
def test_atoms_sorted_2d(self, plot):
plot.update_settings(atoms=None, axes="yz", nsc=[1, 1, 1])
# Check that atoms are sorted along x
assert np.allclose(plot.geometry.xyz[:, 1:][plot.geometry.xyz[:, 0].argsort()], plot._for_backend["atoms_props"]["xy"])
def test_atoms_style(self, plot, axes, ndim, nsc):
plot.update_settings(atoms=None, axes=axes, nsc=nsc)
rand_values = np.random.random(plot.geometry.na)
atoms_style = {"color": rand_values, "size": rand_values, "opacity": rand_values}
new_atoms_style = {"atoms": 0, "color": 2, "size": 2, "opacity": 0.3}
if ndim == 2:
depth_vector = plot._cross_product(*plot.get_setting("axes"), plot.geometry.cell)
sorted_atoms = np.concatenate(plot.geometry.sort(vector=depth_vector, ret_atoms=True)[1])
else:
sorted_atoms = plot.geometry._sanitize_atoms(None)
# Try both passing a dictionary and a list with one dictionary
for i, atoms_style_val in enumerate((atoms_style, [atoms_style], [atoms_style, new_atoms_style])):
plot.update_settings(atoms_style=atoms_style_val)
backend_info = plot._for_backend
self._check_all_atomic_props_shape(backend_info, plot.geometry.na, nsc)
if i != 2:
for key in atoms_style:
if not (ndim == 3 and key == "color"):
assert np.allclose(
backend_info["atoms_props"][key].astype(float),
np.tile(atoms_style[key][sorted_atoms], nsc[0]*nsc[1]*nsc[2])
)
else:
for key in atoms_style:
if not (ndim == 3 and key == "color"):
assert np.isclose(
backend_info["atoms_props"][key].astype(float),
np.tile(atoms_style[key][sorted_atoms], nsc[0]*nsc[1]*nsc[2])
).sum() == (plot.geometry.na - 1) * nsc[0]*nsc[1]*nsc[2]
def test_bonds_style(self, plot, axes, ndim, nsc):
if ndim == 1:
return
bonds_style = {"width": 2, "opacity": 0.6}
plot.update_settings(atoms=None, axes=axes, nsc=nsc, bonds_style=bonds_style)
bonds_props = plot._for_backend["bonds_props"]
assert bonds_props[0]["width"] == 2
assert bonds_props[0]["opacity"] == 0.6
plot.update_settings(bonds_style={})
def test_arrows(self, plot, axes, ndim, nsc):
# Check that arrows accepts both a dictionary and a list and the data is properly transferred
for arrows in ({"data": [0, 0, 2]}, [{"data": [0, 0, 2]}]):
plot.update_settings(axes=axes, arrows=arrows, atoms=None, nsc=nsc, atoms_style=[])
arrow_data = plot._for_backend["arrows"][0]["data"]
assert arrow_data.shape == (plot.geometry.na * nsc[0]*nsc[1]*nsc[2], ndim)
assert not np.isnan(arrow_data).any()
# Now check that atom selection works
plot.update_settings(arrows=[{"atoms": 0, "data": [0, 0, 2]}])
arrow_data = plot._for_backend["arrows"][0]["data"]
assert arrow_data.shape == (plot.geometry.na * nsc[0]*nsc[1]*nsc[2], ndim)
assert np.isnan(arrow_data).any()
assert not np.isnan(arrow_data[0]).any()
# Check that if atoms is provided, data is only stored for those atoms that are going to be
# displayed
plot.update_settings(atoms=0, arrows=[{"atoms": 0, "data": [0, 0, 2]}])
arrow_data = plot._for_backend["arrows"][0]["data"]
assert arrow_data.shape == (nsc[0]*nsc[1]*nsc[2], ndim)
assert not np.isnan(arrow_data).any()
# Check that if no data is provided for the atoms that are displayed, arrow data is not stored
# We also check that a warning is being raised because we are providing arrow data for atoms that
# are not being displayed.
with pytest.warns(SislWarning):
plot.update_settings(atoms=1, arrows=[{"atoms": 0, "data": [0, 0, 2]}])
assert len(plot._for_backend["arrows"]) == 0
# Finally, check that multiple arrows are passed to the backend
plot.update_settings(atoms=None, arrows=[{"data": [0, 0, 2]}, {"data": [1, 0, 0]}])
assert len(plot._for_backend["arrows"]) == 2
| zerothi/sisl | sisl/viz/plots/tests/test_geometry.py | Python | mpl-2.0 | 9,456 | [
"SIESTA"
] | 9ac31046f9fd8fb1b33f84cbd1e8266c9d96989a71cf129efa30a53ebbda2851 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import inspect
import os.path
import shutil
from spack import *
class Pexsi(MakefilePackage):
"""The PEXSI library is written in C++, and uses message passing interface
(MPI) to parallelize the computation on distributed memory computing
systems and achieve scalability on more than 10,000 processors.
The Pole EXpansion and Selected Inversion (PEXSI) method is a fast
method for electronic structure calculation based on Kohn-Sham density
functional theory. It efficiently evaluates certain selected elements
of matrix functions, e.g., the Fermi-Dirac function of the KS Hamiltonian,
which yields a density matrix. It can be used as an alternative to
diagonalization methods for obtaining the density, energy and forces
in electronic structure calculations.
"""
homepage = 'https://math.berkeley.edu/~linlin/pexsi/index.html'
url = 'https://math.berkeley.edu/~linlin/pexsi/download/pexsi_v0.9.0.tar.gz'
# version('1.0', '4600b03e235935fe623acf500df0edfa')
version('0.10.2', '012f6800098671ec39c2ed7b38935e27')
version('0.9.2', '0ce491a3a922d271c4edf9b20aa93076')
version('0.9.0', '0c1a2de891ba1445dfc184b2fa270ed8')
depends_on('parmetis')
depends_on('superlu-dist@3.3:3.999', when='@:0.9.0')
depends_on('superlu-dist@4.3:4.999', when='@0.9.2')
depends_on('superlu-dist@5.1.2:', when='@0.10.2:')
variant(
'fortran', default=False, description='Builds the Fortran interface'
)
parallel = False
def edit(self, spec, prefix):
substitutions = {
'@MPICC': self.spec['mpi'].mpicc,
'@MPICXX': self.spec['mpi'].mpicxx,
'@MPIFC': self.spec['mpi'].mpifc,
'@MPICXX_LIB': self.spec['mpi:cxx'].libs.joined(),
'@RANLIB': 'ranlib',
'@PEXSI_STAGE': self.stage.source_path,
'@SUPERLU_PREFIX': self.spec['superlu-dist'].prefix,
'@METIS_PREFIX': self.spec['metis'].prefix,
'@PARMETIS_PREFIX': self.spec['parmetis'].prefix,
'@LAPACK_PREFIX': self.spec['lapack'].prefix,
'@BLAS_PREFIX': self.spec['blas'].prefix,
'@LAPACK_LIBS': self.spec['lapack'].libs.joined(),
'@BLAS_LIBS': self.spec['blas'].libs.joined(),
# FIXME : what to do with compiler provided libraries ?
'@STDCXX_LIB': ' '.join(self.compiler.stdcxx_libs),
'@FLDFLAGS': ''
}
if '@0.9.2' in self.spec:
substitutions['@FLDFLAGS'] = '-Wl,--allow-multiple-definition'
template = join_path(
os.path.dirname(inspect.getmodule(self).__file__),
'make.inc'
)
makefile = join_path(
self.stage.source_path,
'make.inc'
)
shutil.copy(template, makefile)
for key, value in substitutions.items():
filter_file(key, value, makefile)
def build(self, spec, prefix):
super(Pexsi, self).build(spec, prefix)
if '+fortran' in self.spec:
make('-C', 'fortran')
def install(self, spec, prefix):
# 'make install' does not exist, despite what documentation says
mkdirp(self.prefix.lib)
install(
join_path(self.stage.source_path, 'src', 'libpexsi_linux.a'),
join_path(self.prefix.lib, 'libpexsi.a')
)
install_tree(
join_path(self.stage.source_path, 'include'),
self.prefix.include
)
# fortran "interface"
if '+fortran' in self.spec:
install_tree(
join_path(self.stage.source_path, 'fortran'),
join_path(self.prefix, 'fortran')
)
| EmreAtes/spack | var/spack/repos/builtin/packages/pexsi/package.py | Python | lgpl-2.1 | 4,943 | [
"DIRAC"
] | b0370b087c8d29683d5d1bb43a4b589bd19421a620c49d6aeab49c177a179dc1 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import textwrap
import time
from typing import Any, Dict, List, Optional, Union
from urllib.parse import urlencode
import markdown
import sqlalchemy as sqla
from flask import Response, request, url_for
from flask.helpers import flash
from flask_appbuilder.forms import FieldConverter
from flask_appbuilder.models.filters import BaseFilter
from flask_appbuilder.models.sqla import filters as fab_sqlafilters
from flask_appbuilder.models.sqla.filters import get_field_setup_query, set_value_to_type
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext
from markupsafe import Markup
from pendulum.datetime import DateTime
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.orm import Session
from airflow import models
from airflow.models import errors
from airflow.models.taskinstance import TaskInstance
from airflow.utils import timezone
from airflow.utils.code_utils import get_python_source
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.json import AirflowJsonEncoder
from airflow.utils.state import State, TaskInstanceState
from airflow.www.forms import DateTimeWithTimezoneField
from airflow.www.widgets import AirflowDateTimePickerWidget
def datetime_to_string(value: Optional[DateTime]) -> Optional[str]:
if value is None:
return None
return value.isoformat()
def get_mapped_instances(task_instance, session):
return (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == task_instance.dag_id,
TaskInstance.run_id == task_instance.run_id,
TaskInstance.task_id == task_instance.task_id,
TaskInstance.map_index >= 0,
)
.all()
)
def get_instance_with_map(task_instance, session):
if task_instance.map_index == -1:
return alchemy_to_dict(task_instance)
mapped_instances = get_mapped_instances(task_instance, session)
return get_mapped_summary(task_instance, mapped_instances)
def get_mapped_summary(parent_instance, task_instances):
priority = [
TaskInstanceState.FAILED,
TaskInstanceState.UPSTREAM_FAILED,
TaskInstanceState.UP_FOR_RETRY,
TaskInstanceState.UP_FOR_RESCHEDULE,
TaskInstanceState.QUEUED,
TaskInstanceState.SCHEDULED,
TaskInstanceState.DEFERRED,
TaskInstanceState.SENSING,
TaskInstanceState.RUNNING,
TaskInstanceState.SHUTDOWN,
TaskInstanceState.RESTARTING,
TaskInstanceState.REMOVED,
TaskInstanceState.SUCCESS,
TaskInstanceState.SKIPPED,
]
mapped_states = [ti.state for ti in task_instances]
group_state = None
for state in priority:
if state in mapped_states:
group_state = state
break
group_start_date = datetime_to_string(
min((ti.start_date for ti in task_instances if ti.start_date), default=None)
)
group_end_date = datetime_to_string(
max((ti.end_date for ti in task_instances if ti.end_date), default=None)
)
return {
'task_id': parent_instance.task_id,
'run_id': parent_instance.run_id,
'state': group_state,
'start_date': group_start_date,
'end_date': group_end_date,
'mapped_states': mapped_states,
'operator': parent_instance.operator,
'execution_date': datetime_to_string(parent_instance.execution_date),
'try_number': parent_instance.try_number,
}
def encode_ti(
task_instance: Optional[TaskInstance], is_mapped: Optional[bool], session: Optional[Session]
) -> Optional[Dict[str, Any]]:
if not task_instance:
return None
if is_mapped:
return get_mapped_summary(task_instance, task_instances=get_mapped_instances(task_instance, session))
return {
'task_id': task_instance.task_id,
'dag_id': task_instance.dag_id,
'run_id': task_instance.run_id,
'state': task_instance.state,
'duration': task_instance.duration,
'start_date': datetime_to_string(task_instance.start_date),
'end_date': datetime_to_string(task_instance.end_date),
'operator': task_instance.operator,
'execution_date': datetime_to_string(task_instance.execution_date),
'try_number': task_instance.try_number,
}
def encode_dag_run(dag_run: Optional[models.DagRun]) -> Optional[Dict[str, Any]]:
if not dag_run:
return None
return {
'dag_id': dag_run.dag_id,
'run_id': dag_run.run_id,
'start_date': datetime_to_string(dag_run.start_date),
'end_date': datetime_to_string(dag_run.end_date),
'state': dag_run.state,
'execution_date': datetime_to_string(dag_run.execution_date),
'data_interval_start': datetime_to_string(dag_run.data_interval_start),
'data_interval_end': datetime_to_string(dag_run.data_interval_end),
'run_type': dag_run.run_type,
}
def check_import_errors(fileloc, session):
# Check dag import errors
import_errors = session.query(errors.ImportError).filter(errors.ImportError.filename == fileloc).all()
if import_errors:
for import_error in import_errors:
flash("Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=import_error), "dag_import_error")
def get_sensitive_variables_fields():
import warnings
from airflow.utils.log.secrets_masker import get_sensitive_variables_fields
warnings.warn(
"This function is deprecated. Please use "
"`airflow.utils.log.secrets_masker.get_sensitive_variables_fields`",
DeprecationWarning,
stacklevel=2,
)
return get_sensitive_variables_fields()
def should_hide_value_for_key(key_name):
import warnings
from airflow.utils.log.secrets_masker import should_hide_value_for_key
warnings.warn(
"This function is deprecated. Please use "
"`airflow.utils.log.secrets_masker.should_hide_value_for_key`",
DeprecationWarning,
stacklevel=2,
)
return should_hide_value_for_key(key_name)
def get_params(**kwargs):
"""Return URL-encoded params"""
return urlencode({d: v for d, v in kwargs.items() if v is not None}, True)
def generate_pages(current_page, num_of_pages, search=None, status=None, tags=None, window=7):
"""
Generates the HTML for a paging component using a similar logic to the paging
auto-generated by Flask managed views. The paging component defines a number of
pages visible in the pager (window) and once the user goes to a page beyond the
largest visible, it would scroll to the right the page numbers and keeps the
current one in the middle of the pager component. When in the last pages,
the pages won't scroll and just keep moving until the last page. Pager also contains
<first, previous, ..., next, last> pages.
This component takes into account custom parameters such as search, status, and tags
which could be added to the pages link in order to maintain the state between
client and server. It also allows to make a bookmark on a specific paging state.
:param current_page: the current page number, 0-indexed
:param num_of_pages: the total number of pages
:param search: the search query string, if any
:param status: 'all', 'active', or 'paused'
:param tags: array of strings of the current filtered tags
:param window: the number of pages to be shown in the paging component (7 default)
:return: the HTML string of the paging component
"""
void_link = 'javascript:void(0)'
first_node = Markup(
"""<li class="paginate_button {disabled}" id="dags_first">
<a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">«</a>
</li>"""
)
previous_node = Markup(
"""<li class="paginate_button previous {disabled}" id="dags_previous">
<a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">‹</a>
</li>"""
)
next_node = Markup(
"""<li class="paginate_button next {disabled}" id="dags_next">
<a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">›</a>
</li>"""
)
last_node = Markup(
"""<li class="paginate_button {disabled}" id="dags_last">
<a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">»</a>
</li>"""
)
page_node = Markup(
"""<li class="paginate_button {is_active}">
<a href="{href_link}" aria-controls="dags" data-dt-idx="2" tabindex="0">{page_num}</a>
</li>"""
)
output = [Markup('<ul class="pagination" style="margin-top:0;">')]
is_disabled = 'disabled' if current_page <= 0 else ''
first_node_link = (
void_link if is_disabled else f'?{get_params(page=0, search=search, status=status, tags=tags)}'
)
output.append(
first_node.format(
href_link=first_node_link,
disabled=is_disabled,
)
)
page_link = void_link
if current_page > 0:
page_link = f'?{get_params(page=current_page - 1, search=search, status=status, tags=tags)}'
output.append(previous_node.format(href_link=page_link, disabled=is_disabled))
mid = int(window / 2)
last_page = num_of_pages - 1
if current_page <= mid or num_of_pages < window:
pages = list(range(0, min(num_of_pages, window)))
elif mid < current_page < last_page - mid:
pages = list(range(current_page - mid, current_page + mid + 1))
else:
pages = list(range(num_of_pages - window, last_page + 1))
def is_current(current, page):
return page == current
for page in pages:
vals = {
'is_active': 'active' if is_current(current_page, page) else '',
'href_link': void_link
if is_current(current_page, page)
else f'?{get_params(page=page, search=search, status=status, tags=tags)}',
'page_num': page + 1,
}
output.append(page_node.format(**vals))
is_disabled = 'disabled' if current_page >= num_of_pages - 1 else ''
page_link = (
void_link
if current_page >= num_of_pages - 1
else f'?{get_params(page=current_page + 1, search=search, status=status, tags=tags)}'
)
output.append(next_node.format(href_link=page_link, disabled=is_disabled))
last_node_link = (
void_link
if is_disabled
else f'?{get_params(page=last_page, search=search, status=status, tags=tags)}'
)
output.append(
last_node.format(
href_link=last_node_link,
disabled=is_disabled,
)
)
output.append(Markup('</ul>'))
return Markup('\n'.join(output))
def epoch(dttm):
"""Returns an epoch-type date (tuple with no timezone)"""
return (int(time.mktime(dttm.timetuple())) * 1000,)
def json_response(obj):
"""Returns a json response from a json serializable python object"""
return Response(
response=json.dumps(obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json"
)
def make_cache_key(*args, **kwargs):
"""Used by cache to get a unique key per URL"""
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore')
def task_instance_link(attr):
"""Generates a URL to the Graph view for a TaskInstance."""
dag_id = attr.get('dag_id')
task_id = attr.get('task_id')
execution_date = attr.get('dag_run.execution_date') or attr.get('execution_date') or timezone.utcnow()
url = url_for('Airflow.task', dag_id=dag_id, task_id=task_id, execution_date=execution_date.isoformat())
url_root = url_for(
'Airflow.graph', dag_id=dag_id, root=task_id, execution_date=execution_date.isoformat()
)
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="material-icons" style="margin-left:0;"
aria-hidden="true">filter_alt</span>
</a>
</span>
"""
).format(url=url, task_id=task_id, url_root=url_root)
def state_token(state):
"""Returns a formatted string with HTML for a given State"""
color = State.color(state)
fg_color = State.color_fg(state)
return Markup(
"""
<span class="label" style="color:{fg_color}; background-color:{color};"
title="Current State: {state}">{state}</span>
"""
).format(color=color, state=state, fg_color=fg_color)
def state_f(attr):
"""Gets 'state' & returns a formatted string with HTML for a given State"""
state = attr.get('state')
return state_token(state)
def nobr_f(attr_name):
"""Returns a formatted string with HTML with a Non-breaking Text element"""
def nobr(attr):
f = attr.get(attr_name)
return Markup("<nobr>{}</nobr>").format(f)
return nobr
def datetime_f(attr_name):
"""Returns a formatted string with HTML for given DataTime"""
def dt(attr):
f = attr.get(attr_name)
return datetime_html(f)
return dt
def datetime_html(dttm: Optional[DateTime]) -> str:
"""Return an HTML formatted string with time element to support timezone changes in UI"""
as_iso = dttm.isoformat() if dttm else ''
if not as_iso:
return Markup('')
if timezone.utcnow().isoformat()[:4] == as_iso[:4]:
as_iso = as_iso[5:]
# The empty title will be replaced in JS code when non-UTC dates are displayed
return Markup('<nobr><time title="" datetime="{}">{}</time></nobr>').format(as_iso, as_iso)
def json_f(attr_name):
"""Returns a formatted string with HTML for given JSON serializable"""
def json_(attr):
f = attr.get(attr_name)
serialized = json.dumps(f)
return Markup('<nobr>{}</nobr>').format(serialized)
return json_
def dag_link(attr):
"""Generates a URL to the Graph view for a Dag."""
dag_id = attr.get('dag_id')
execution_date = attr.get('execution_date')
if not dag_id:
return Markup('None')
url = url_for('Airflow.graph', dag_id=dag_id, execution_date=execution_date)
return Markup('<a href="{}">{}</a>').format(url, dag_id)
def dag_run_link(attr):
"""Generates a URL to the Graph view for a DagRun."""
dag_id = attr.get('dag_id')
run_id = attr.get('run_id')
execution_date = attr.get('dag_run.exectuion_date') or attr.get('execution_date')
url = url_for('Airflow.graph', dag_id=dag_id, run_id=run_id, execution_date=execution_date)
return Markup('<a href="{url}">{run_id}</a>').format(url=url, run_id=run_id)
def pygment_html_render(s, lexer=lexers.TextLexer):
"""Highlight text using a given Lexer"""
return highlight(s, lexer(), HtmlFormatter(linenos=True))
def render(obj, lexer):
"""Render a given Python object with a given Pygments lexer"""
out = ""
if isinstance(obj, str):
out = Markup(pygment_html_render(obj, lexer))
elif isinstance(obj, (tuple, list)):
for i, text_to_render in enumerate(obj):
out += Markup("<div>List item #{}</div>").format(i)
out += Markup("<div>" + pygment_html_render(text_to_render, lexer) + "</div>")
elif isinstance(obj, dict):
for k, v in obj.items():
out += Markup('<div>Dict item "{}"</div>').format(k)
out += Markup("<div>" + pygment_html_render(v, lexer) + "</div>")
return out
def json_render(obj, lexer):
"""Render a given Python object with json lexer"""
out = ""
if isinstance(obj, str):
out = Markup(pygment_html_render(obj, lexer))
elif isinstance(obj, (dict, list)):
content = json.dumps(obj, sort_keys=True, indent=4)
out = Markup(pygment_html_render(content, lexer))
return out
def wrapped_markdown(s, css_class='rich_doc'):
"""Convert a Markdown string to HTML."""
if s is None:
return None
s = textwrap.dedent(s)
return Markup(f'<div class="{css_class}" >' + markdown.markdown(s, extensions=['tables']) + "</div>")
def get_attr_renderer():
"""Return Dictionary containing different Pygments Lexers for Rendering & Highlighting"""
return {
'bash': lambda x: render(x, lexers.BashLexer),
'bash_command': lambda x: render(x, lexers.BashLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_md': wrapped_markdown,
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'html': lambda x: render(x, lexers.HtmlLexer),
'jinja': lambda x: render(x, lexers.DjangoLexer),
'json': lambda x: json_render(x, lexers.JsonLexer),
'md': wrapped_markdown,
'mysql': lambda x: render(x, lexers.MySqlLexer),
'postgresql': lambda x: render(x, lexers.PostgresLexer),
'powershell': lambda x: render(x, lexers.PowerShellLexer),
'py': lambda x: render(get_python_source(x), lexers.PythonLexer),
'python_callable': lambda x: render(get_python_source(x), lexers.PythonLexer),
'rst': lambda x: render(x, lexers.RstLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'tsql': lambda x: render(x, lexers.TransactSqlLexer),
'yaml': lambda x: render(x, lexers.YamlLexer),
}
def get_chart_height(dag):
"""
We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
TODO(aoen): See [AIRFLOW-1263]
"""
return 600 + len(dag.tasks) * 10
class UtcAwareFilterMixin:
"""Mixin for filter for UTC time."""
def apply(self, query, value):
"""Apply the filter."""
value = timezone.parse(value, timezone=timezone.utc)
return super().apply(query, value)
class FilterGreaterOrEqual(BaseFilter):
"""Greater than or Equal filter."""
name = lazy_gettext("Greater than or Equal")
arg_name = "gte"
def apply(self, query, value):
query, field = get_field_setup_query(query, self.model, self.column_name)
value = set_value_to_type(self.datamodel, self.column_name, value)
if value is None:
return query
return query.filter(field >= value)
class FilterSmallerOrEqual(BaseFilter):
"""Smaller than or Equal filter."""
name = lazy_gettext("Smaller than or Equal")
arg_name = "lte"
def apply(self, query, value):
query, field = get_field_setup_query(query, self.model, self.column_name)
value = set_value_to_type(self.datamodel, self.column_name, value)
if value is None:
return query
return query.filter(field <= value)
class UtcAwareFilterSmallerOrEqual(UtcAwareFilterMixin, FilterSmallerOrEqual):
"""Smaller than or Equal filter for UTC time."""
class UtcAwareFilterGreaterOrEqual(UtcAwareFilterMixin, FilterGreaterOrEqual):
"""Greater than or Equal filter for UTC time."""
class UtcAwareFilterEqual(UtcAwareFilterMixin, fab_sqlafilters.FilterEqual):
"""Equality filter for UTC time."""
class UtcAwareFilterGreater(UtcAwareFilterMixin, fab_sqlafilters.FilterGreater):
"""Greater Than filter for UTC time."""
class UtcAwareFilterSmaller(UtcAwareFilterMixin, fab_sqlafilters.FilterSmaller):
"""Smaller Than filter for UTC time."""
class UtcAwareFilterNotEqual(UtcAwareFilterMixin, fab_sqlafilters.FilterNotEqual):
"""Not Equal To filter for UTC time."""
class UtcAwareFilterConverter(fab_sqlafilters.SQLAFilterConverter):
"""Retrieve conversion tables for UTC-Aware filters."""
class AirflowFilterConverter(fab_sqlafilters.SQLAFilterConverter):
"""Retrieve conversion tables for Airflow-specific filters."""
conversion_table = (
(
'is_utcdatetime',
[
UtcAwareFilterEqual,
UtcAwareFilterGreater,
UtcAwareFilterSmaller,
UtcAwareFilterNotEqual,
UtcAwareFilterSmallerOrEqual,
UtcAwareFilterGreaterOrEqual,
],
),
# FAB will try to create filters for extendedjson fields even though we
# exclude them from all UI, so we add this here to make it ignore them.
(
'is_extendedjson',
[],
),
) + fab_sqlafilters.SQLAFilterConverter.conversion_table
class CustomSQLAInterface(SQLAInterface):
"""
FAB does not know how to handle columns with leading underscores because
they are not supported by WTForm. This hack will remove the leading
'_' from the key to lookup the column names.
"""
def __init__(self, obj, session=None):
super().__init__(obj, session=session)
def clean_column_names():
if self.list_properties:
self.list_properties = {k.lstrip('_'): v for k, v in self.list_properties.items()}
if self.list_columns:
self.list_columns = {k.lstrip('_'): v for k, v in self.list_columns.items()}
clean_column_names()
# Support for AssociationProxy in search and list columns
for desc in self.obj.__mapper__.all_orm_descriptors:
if not isinstance(desc, AssociationProxy):
continue
proxy_instance = getattr(self.obj, desc.value_attr)
self.list_columns[desc.value_attr] = proxy_instance.remote_attr.prop.columns[0]
self.list_properties[desc.value_attr] = proxy_instance.remote_attr.prop
def is_utcdatetime(self, col_name):
"""Check if the datetime is a UTC one."""
from airflow.utils.sqlalchemy import UtcDateTime
if col_name in self.list_columns:
obj = self.list_columns[col_name].type
return (
isinstance(obj, UtcDateTime)
or isinstance(obj, sqla.types.TypeDecorator)
and isinstance(obj.impl, UtcDateTime)
)
return False
def is_extendedjson(self, col_name):
"""Checks if it is a special extended JSON type"""
from airflow.utils.sqlalchemy import ExtendedJSON
if col_name in self.list_columns:
obj = self.list_columns[col_name].type
return (
isinstance(obj, ExtendedJSON)
or isinstance(obj, sqla.types.TypeDecorator)
and isinstance(obj.impl, ExtendedJSON)
)
return False
def get_col_default(self, col_name: str) -> Any:
if col_name not in self.list_columns:
# Handle AssociationProxy etc, or anything that isn't a "real" column
return None
return super().get_col_default(col_name)
filter_converter_class = AirflowFilterConverter
# This class is used directly (i.e. we can't tell Fab to use a different
# subclass) so we have no other option than to edit the conversion table in
# place
FieldConverter.conversion_table = (
('is_utcdatetime', DateTimeWithTimezoneField, AirflowDateTimePickerWidget),
) + FieldConverter.conversion_table
class UIAlert:
"""
Helper for alerts messages shown on the UI
:param message: The message to display, either a string or Markup
:param category: The category of the message, one of "info", "warning", "error", or any custom category.
Defaults to "info".
:param roles: List of roles that should be shown the message. If ``None``, show to all users.
:param html: Whether the message has safe html markup in it. Defaults to False.
For example, show a message to all users:
.. code-block:: python
UIAlert("Welcome to Airflow")
Or only for users with the User role:
.. code-block:: python
UIAlert("Airflow update happening next week", roles=["User"])
You can also pass html in the message:
.. code-block:: python
UIAlert('Visit <a href="https://airflow.apache.org">airflow.apache.org</a>', html=True)
# or safely escape part of the message
# (more details: https://markupsafe.palletsprojects.com/en/2.0.x/formatting/)
UIAlert(Markup("Welcome <em>%s</em>") % ("John & Jane Doe",))
"""
def __init__(
self,
message: Union[str, Markup],
category: str = "info",
roles: Optional[List[str]] = None,
html: bool = False,
):
self.category = category
self.roles = roles
self.html = html
self.message = Markup(message) if html else message
def should_show(self, securitymanager) -> bool:
"""Determine if the user should see the message based on their role membership"""
if self.roles:
user_roles = {r.name for r in securitymanager.current_user.roles}
if not user_roles.intersection(set(self.roles)):
return False
return True
| apache/airflow | airflow/www/utils.py | Python | apache-2.0 | 26,115 | [
"VisIt"
] | ef66c5d948f8bb7df5f40e0812ee761879507f9d67a206acea66b1f3a9b0e536 |
"""
KeepNote
Classic three-paned viewer for KeepNote.
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import gettext
import os
import subprocess
import traceback
# pygtk imports
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk
import gobject
# keepnote imports
import keepnote
from keepnote import unicode_gtk, KeepNoteError
from keepnote.notebook import NoteBookError
from keepnote.gui import \
add_actions, \
dialog_image_resize, \
get_resource, \
get_resource_image, \
get_resource_pixbuf, \
get_accel_file, \
Action, \
ToggleAction, \
FileChooserDialog, \
CONTEXT_MENU_ACCEL_PATH
from keepnote.history import NodeHistory
from keepnote import notebook as notebooklib
from keepnote.gui import richtext
from keepnote.gui.richtext import RichTextView, RichTextImage, RichTextError
from keepnote.gui.treeview import KeepNoteTreeView
from keepnote.gui.listview import KeepNoteListView
from keepnote.gui.editor import KeepNoteEditor
from keepnote.gui.editor_richtext import RichTextEditor
from keepnote.gui.editor_text import TextEditor
from keepnote.gui.editor_multi import ContentEditor
from keepnote.gui.icon_menu import IconMenu
from keepnote import notebook as notebooklib
from keepnote.gui.treemodel import iter_children
from keepnote.gui.viewer import Viewer
from keepnote.gui.icons import \
lookup_icon_filename
_ = keepnote.translate
DEFAULT_VSASH_POS = 200
DEFAULT_HSASH_POS = 200
DEFAULT_VIEW_MODE = "vertical"
class ThreePaneViewer (Viewer):
"""A viewer with a treeview, listview, and editor"""
def __init__(self, app, main_window, viewerid=None):
Viewer.__init__(self, app, main_window, viewerid,
viewer_name="three_pane_viewer")
self._ui_ready = False
# node selections
self._current_page = None # current page in editor
self._treeview_sel_nodes = [] # current selected nodes in treeview
self._queue_list_select = [] # nodes to select in listview after treeview change
self._new_page_occurred = False
self.back_button = None
self._view_mode = DEFAULT_VIEW_MODE
self.connect("history-changed", self._on_history_changed)
#=========================================
# widgets
# treeview
self.treeview = KeepNoteTreeView()
self.treeview.set_get_node(self._app.get_node)
self.treeview.connect("select-nodes", self._on_tree_select)
self.treeview.connect("delete-node", self.on_delete_node)
self.treeview.connect("error", lambda w,t,e: self.emit("error", t, e))
self.treeview.connect("edit-title", self._on_edit_title)
self.treeview.connect("goto-node", self.on_goto_node)
self.treeview.connect("activate-node", self.on_activate_node)
self.treeview.connect("drop-file", self._on_attach_file)
# listview
self.listview = KeepNoteListView()
self.listview.set_get_node(self._app.get_node)
self.listview.connect("select-nodes", self._on_list_select)
self.listview.connect("delete-node", self.on_delete_node)
self.listview.connect("goto-node", self.on_goto_node)
self.listview.connect("activate-node", self.on_activate_node)
self.listview.connect("goto-parent-node",
lambda w: self.on_goto_parent_node())
self.listview.connect("error", lambda w,t,e: self.emit("error", t, e))
self.listview.connect("edit-title", self._on_edit_title)
self.listview.connect("drop-file", self._on_attach_file)
self.listview.on_status = self.set_status # TODO: clean up
# editor
#self.editor = KeepNoteEditor(self._app)
#self.editor = RichTextEditor(self._app)
self.editor = ContentEditor(self._app)
rich_editor = RichTextEditor(self._app)
self.editor.add_editor("text/xhtml+xml", rich_editor)
self.editor.add_editor("text", TextEditor(self._app))
self.editor.set_default_editor(rich_editor)
self.editor.connect("view-node", self._on_editor_view_node)
self.editor.connect("child-activated", self._on_child_activated)
self.editor.connect("visit-node", lambda w, n: self.goto_node(n, False))
self.editor.connect("error", lambda w,t,e: self.emit("error", t, e))
self.editor.connect("window-request", lambda w,t:
self.emit("window-request", t))
self.editor.view_pages([])
self.editor_pane = gtk.VBox(False, 5)
self.editor_pane.pack_start(self.editor, True, True, 0)
#=====================================
# layout
# TODO: make sure to add underscore for these variables
# create a horizontal paned widget
self.hpaned = gtk.HPaned()
self.pack_start(self.hpaned, True, True, 0)
self.hpaned.set_position(DEFAULT_HSASH_POS)
# layout major widgets
self.paned2 = gtk.VPaned()
self.hpaned.add2(self.paned2)
self.paned2.set_position(DEFAULT_VSASH_POS)
# treeview and scrollbars
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
sw.add(self.treeview)
self.hpaned.add1(sw)
# listview with scrollbars
self.listview_sw = gtk.ScrolledWindow()
self.listview_sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.listview_sw.set_shadow_type(gtk.SHADOW_IN)
self.listview_sw.add(self.listview)
self.paned2.add1(self.listview_sw)
#self.paned2.child_set_property(self.listview_sw, "shrink", True)
# layout editor
self.paned2.add2(self.editor_pane)
self.treeview.grab_focus()
def set_notebook(self, notebook):
"""Set the notebook for the viewer"""
# add/remove reference to notebook
self._app.ref_notebook(notebook)
if self._notebook is not None:
self._app.unref_notebook(self._notebook)
# deregister last notebook, if it exists
if self._notebook:
self._notebook.node_changed.remove(
self.on_notebook_node_changed)
# setup listeners
if notebook:
notebook.node_changed.add(self.on_notebook_node_changed)
# set notebook
self._notebook = notebook
self.editor.set_notebook(notebook)
self.listview.set_notebook(notebook)
self.treeview.set_notebook(notebook)
if self.treeview.get_popup_menu():
self.treeview.get_popup_menu().iconmenu.set_notebook(notebook)
self.listview.get_popup_menu().iconmenu.set_notebook(notebook)
# restore selections
self._load_selections()
# put focus on treeview
self.treeview.grab_focus()
def load_preferences(self, app_pref, first_open=False):
"""Load application preferences"""
p = app_pref.get("viewers", "three_pane_viewer", define=True)
self.set_view_mode(p.get("view_mode", DEFAULT_VIEW_MODE))
self.paned2.set_property("position-set", True)
self.hpaned.set_property("position-set", True)
self.paned2.set_position(p.get("vsash_pos", DEFAULT_VSASH_POS))
self.hpaned.set_position(p.get("hsash_pos", DEFAULT_HSASH_POS))
self.listview.set_date_formats(app_pref.get("timestamp_formats"))
self.listview.set_rules_hint(
app_pref.get("look_and_feel", "listview_rules",
default=True))
try:
# if this version of GTK doesn't have tree-lines, ignore it
self.treeview.set_property(
"enable-tree-lines",
app_pref.get("look_and_feel", "treeview_lines", default=True))
except:
pass
self.editor.load_preferences(app_pref, first_open)
# reload ui
if self._ui_ready:
self.remove_ui(self._main_window)
self.add_ui(self._main_window)
def save_preferences(self, app_pref):
"""Save application preferences"""
p = app_pref.get("viewers", "three_pane_viewer")
p["view_mode"] = self._view_mode
p["vsash_pos"] = self.paned2.get_position()
p["hsash_pos"] = self.hpaned.get_position()
self.editor.save_preferences(app_pref)
def save(self):
"""Save the current notebook"""
self.editor.save()
self._save_selections()
def on_notebook_node_changed(self, nodes):
"""Callback for when notebook node is changed"""
#if self._current_page in nodes:
# self.emit("current-node", self._current_page)
self.emit("modified", True)
def undo(self):
"""Undo the last action in the viewer"""
self.editor.undo()
def redo(self):
"""Redo the last action in the viewer"""
self.editor.redo()
def get_editor(self):
return self.editor
def set_status(self, text, bar="status"):
"""Set a status message"""
self.emit("status", text, bar)
def set_view_mode(self, mode):
"""
Sets view mode for ThreePaneViewer
modes:
"vertical"
"horizontal"
"""
vsash = self.paned2.get_position()
# detach widgets
self.paned2.remove(self.listview_sw)
self.paned2.remove(self.editor_pane)
self.hpaned.remove(self.paned2)
# remake paned2
if mode == "vertical":
# create a vertical paned widget
self.paned2 = gtk.VPaned()
else:
# create a horizontal paned widget
self.paned2 = gtk.HPaned()
self.paned2.set_position(vsash)
self.paned2.show()
self.hpaned.add2(self.paned2)
self.hpaned.show()
self.paned2.add1(self.listview_sw)
self.paned2.add2(self.editor_pane)
# record preference
self._view_mode = mode
def _load_selections(self):
"""Load previous node selections from notebook preferences"""
if self._notebook:
info = self._notebook.pref.get("viewers", "ids",
self._viewerid, define=True)
# load selections
nodes = [node for node in (
self._notebook.get_node_by_id(i)
for i in info.get(
"selected_treeview_nodes", []))
if node is not None]
self.treeview.select_nodes(nodes)
nodes = [node for node in (
self._notebook.get_node_by_id(i)
for i in info.get(
"selected_listview_nodes", []))
if node is not None]
self.listview.select_nodes(nodes)
def _save_selections(self):
"""Save node selections into notebook preferences"""
if self._notebook is not None:
info = self._notebook.pref.get("viewers", "ids",
self._viewerid, define=True)
# save selections
info["selected_treeview_nodes"] = [
node.get_attr("nodeid")
for node in self.treeview.get_selected_nodes()]
info["selected_listview_nodes"] = [
node.get_attr("nodeid")
for node in self.listview.get_selected_nodes()]
self._notebook.set_preferences_dirty()
#===============================================
# node operations
def get_current_page(self):
"""Returns the currently focused page"""
return self._current_page
def get_selected_nodes(self):
"""
Returns a list of selected nodes.
"""
if self.treeview.is_focus():
return self.treeview.get_selected_nodes()
else:
nodes = self.listview.get_selected_nodes()
if len(nodes) == 0:
return self.treeview.get_selected_nodes()
else:
return nodes
def _on_history_changed(self, viewer, history):
"""Callback for when node browse history changes"""
if self._ui_ready and self.back_button:
self.back_button.set_sensitive(history.has_back())
self.forward_button.set_sensitive(history.has_forward())
def get_focused_widget(self, default=None):
"""Returns the currently focused widget"""
if self.treeview.is_focus():
return self.treeview
if self.listview.is_focus():
return self.listview
else:
return default
def on_delete_node(self, widget, nodes=None):
"""Callback for deleting a node"""
# get node to delete
if nodes is None:
nodes = self.get_selected_nodes()
if len(nodes) == 0:
return
if self._main_window.confirm_delete_nodes(nodes):
# change selection
if len(nodes) == 1:
node = nodes[0]
widget = self.get_focused_widget(self.listview)
parent = node.get_parent()
children = parent.get_children()
i = children.index(node)
if i < len(children) - 1:
widget.select_nodes([children[i+1]])
else:
widget.select_nodes([parent])
else:
widget = self.get_focused_widget(self.listview)
widget.select_nodes([])
# perform delete
try:
for node in nodes:
node.trash()
except NoteBookError, e:
self.emit("error", e.msg, e)
def _on_editor_view_node(self, editor, node):
"""Callback for when editor views a node"""
# record node in history
self._history.add(node.get_attr("nodeid"))
self.emit("history-changed", self._history)
def _on_child_activated(self, editor, textview, child):
"""Callback for when child widget in editor is activated"""
if self._current_page and isinstance(child, richtext.RichTextImage):
filename = self._current_page.get_file(child.get_filename())
self._app.run_external_app("image_viewer", filename)
def _on_tree_select(self, treeview, nodes):
"""Callback for treeview selection change"""
# do nothing if selection is unchanged
if self._treeview_sel_nodes == nodes:
return
# remember which nodes are selected in the treeview
self._treeview_sel_nodes = nodes
# view the children of these nodes in the listview
self.listview.view_nodes(nodes)
# if nodes are queued for selection in listview (via goto parent)
# then select them here
if len(self._queue_list_select) > 0:
self.listview.select_nodes(self._queue_list_select)
self._queue_list_select = []
# make sure nodes are also selected in listview
self.listview.select_nodes(nodes)
def _on_list_select(self, listview, pages):
"""Callback for listview selection change"""
# remember the selected node
if len(pages) == 1:
self._current_page = pages[0]
else:
self._current_page = None
try:
self.editor.view_pages(pages)
except RichTextError, e:
self.emit("error",
"Could not load page '%s'." % pages[0].get_title(), e)
self.emit("current-node", self._current_page)
def on_goto_node(self, widget, node):
"""Focus view on a node"""
self.goto_node(node, direct=False)
def on_activate_node(self, widget, node):
"""Focus view on a node"""
if self.viewing_search():
# if we are in a search, goto node, but not directly
self.goto_node(node, direct=False)
else:
if node and node.has_attr("payload_filename"):
# open attached file
self._main_window.on_view_node_external_app("file_launcher",
node,
kind="file")
else:
# goto node directly
self.goto_node(node, direct=True)
def on_goto_parent_node(self, node=None):
"""Focus view on a node's parent"""
if node is None:
nodes = self.get_selected_nodes()
if len(nodes) == 0:
return
node = nodes[0]
# get parent
parent = node.get_parent()
if parent is not None:
self.goto_node(parent, direct=False)
def _on_edit_title(self, widget, node, title):
"""Callback for title edit finishing"""
# move cursor to editor after new page has been created
if self._new_page_occurred:
self._new_page_occurred = False
if node.get_attr("content_type") != notebooklib.CONTENT_TYPE_DIR:
self.goto_editor()
def _on_attach_file(self, widget, parent, index, uri):
"""Attach document"""
self._app.attach_file(uri, parent, index)
def _on_attach_file_menu(self):
"""Callback for attach file action"""
nodes = self.get_selected_nodes()
if len(nodes) > 0:
node = nodes[0]
self._app.on_attach_file(node, self.get_toplevel())
def new_node(self, kind, pos, parent=None):
"""Add a new node to the notebook"""
# TODO: think about where this goes
if self._notebook is None:
return
self.treeview.cancel_editing()
self.listview.cancel_editing()
if parent is None:
nodes = self.get_selected_nodes()
if len(nodes) == 1:
parent = nodes[0]
else:
parent = self._notebook
node = Viewer.new_node(self, kind, pos, parent)
self._view_new_node(node)
def on_new_dir(self):
"""Add new folder near selected nodes"""
self.new_node(notebooklib.CONTENT_TYPE_DIR, "sibling")
def on_new_page(self):
"""Add new page near selected nodes"""
self.new_node(notebooklib.CONTENT_TYPE_PAGE, "sibling")
def on_new_child_page(self):
"""Add new page as child of selected nodes"""
self.new_node(notebooklib.CONTENT_TYPE_PAGE, "child")
def _view_new_node(self, node):
"""View a node particular widget"""
self._new_page_occurred = True
self.goto_node(node)
if node in self.treeview.get_selected_nodes():
self.treeview.edit_node(node)
else:
self.listview.edit_node(node)
#widget = self.get_focused_widget()
#
#if widget == self.treeview:
# self.treeview.expand_node(node.get_parent())
# self.treeview.edit_node(node)
#else:
# self.listview.expand_node(node.get_parent())
# self.listview.edit_node(node)
def _on_rename_node(self):
"""Callback for renaming a node"""
nodes = self.get_selected_nodes()
if len(nodes) == 0:
return
widget = self.get_focused_widget()
if widget == self.treeview:
self.treeview.edit_node(nodes[0])
else:
self.listview.edit_node(nodes[0])
def goto_node(self, node, direct=False):
"""Move view focus to a particular node"""
if node is None:
# default node is the one selected in the listview
nodes = self.listview.get_selected_nodes()
if len(nodes) == 0:
return
node = nodes[0]
treenodes = self.treeview.get_selected_nodes()
if direct:
# direct goto: open up treeview all the way to the node
self.treeview.select_nodes([node])
else:
# indirect goto: donot open up treeview, only listview
# get path to root
path = []
ptr = node
while ptr:
if ptr in treenodes:
# if parent path is allready selected then quit
path = []
break
path.append(ptr)
ptr = ptr.get_parent()
# find first node that is collapsed
node2 = None
for node2 in reversed(path):
if not self.treeview.is_node_expanded(node2):
break
# make selections
if node2:
self.treeview.select_nodes([node2])
self.listview.select_nodes([node])
def goto_next_node(self):
"""Move focus to the 'next' node"""
widget = self.get_focused_widget(self.treeview)
path, col = widget.get_cursor()
if path:
path2 = path[:-1] + (path[-1] + 1,)
if len(path) > 1:
it = widget.get_model().get_iter(path[:-1])
nchildren = widget.get_model().iter_n_children(it)
else:
nchildren = widget.get_model().iter_n_children(None)
if path2[-1] < nchildren:
widget.set_cursor(path2)
def goto_prev_node(self):
"""Move focus to the 'previous' node"""
widget = self.get_focused_widget()
path, col = widget.get_cursor()
if path and path[-1] > 0:
path2 = path[:-1] + (path[-1] - 1,)
widget.set_cursor(path2)
def expand_node(self, all=False):
"""Expand the tree beneath the focused node"""
widget = self.get_focused_widget(self.treeview)
path, col = widget.get_cursor()
if path:
widget.expand_row(path, all)
def collapse_node(self, all=False):
"""Collapse the tree beneath the focused node"""
widget = self.get_focused_widget(self.treeview)
path, col = widget.get_cursor()
if path:
if all:
# recursively collapse all notes
widget.collapse_all_beneath(path)
else:
widget.collapse_row(path)
def on_copy_tree(self):
"""Callback for copy on whole tree"""
widget = self._main_window.get_focus()
if gobject.signal_lookup("copy-tree-clipboard", widget) != 0:
widget.emit("copy-tree-clipboard")
#============================================
# Search
def start_search_result(self):
"""Start a new search result"""
self.treeview.select_nodes([])
self.listview.view_nodes([], nested=False)
def add_search_result(self, node):
"""Add a search result"""
self.listview.append_node(node)
def end_search_result(self):
"""End a search result"""
# select top result
try:
self.listview.get_selection().select_path((0,))
except:
# don't worry if there isn't anything to select
pass
def viewing_search(self):
"""Returns True if we are currently viewing a search result"""
return (len(self.treeview.get_selected_nodes()) == 0 and
len(self.listview.get_selected_nodes()) > 0)
#=============================================
# Goto functions
def goto_treeview(self):
"""Switch focus to TreeView"""
self.treeview.grab_focus()
def goto_listview(self):
"""Switch focus to ListView"""
self.listview.grab_focus()
def goto_editor(self):
"""Switch focus to Editor"""
self.editor.grab_focus()
#===========================================
# ui
def add_ui(self, window):
"""Add the view's UI to a window"""
assert window == self._main_window
self._ui_ready = True
self._action_group = gtk.ActionGroup("Viewer")
self._uis = []
add_actions(self._action_group, self._get_actions())
self._main_window.get_uimanager().insert_action_group(
self._action_group, 0)
for s in self._get_ui():
self._uis.append(
self._main_window.get_uimanager().add_ui_from_string(s))
uimanager = self._main_window.get_uimanager()
uimanager.ensure_update()
# setup toolbar
self.back_button = uimanager.get_widget("/main_tool_bar/Viewer/Back")
self.forward_button = uimanager.get_widget("/main_tool_bar/Viewer/Forward")
# setup editor
self.editor.add_ui(window)
# TODO: Try to add accellerator to popup menu
#menu = viewer.editor.get_textview().get_popup_menu()
#menu.set_accel_group(self._accel_group)
#menu.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
# treeview context menu
menu1 = uimanager.get_widget(
"/popup_menus/treeview_popup").get_submenu()
self.treeview.set_popup_menu(menu1)
menu1.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
menu1.set_accel_group(uimanager.get_accel_group())
menu1.iconmenu = self._setup_icon_menu()
item = uimanager.get_widget(
"/popup_menus/treeview_popup/Change Note Icon")
item.set_submenu(menu1.iconmenu)
item.show()
# listview context menu
menu2 = uimanager.get_widget(
"/popup_menus/listview_popup").get_submenu()
self.listview.set_popup_menu(menu2)
menu2.set_accel_group(uimanager.get_accel_group())
menu2.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
menu2.iconmenu = self._setup_icon_menu()
item = uimanager.get_widget(
"/popup_menus/listview_popup/Change Note Icon")
item.set_submenu(menu2.iconmenu)
item.show()
def _setup_icon_menu(self):
"""Setup the icon menu"""
iconmenu = IconMenu()
iconmenu.connect("set-icon",
lambda w, i: self._app.on_set_icon(
i, u"", self.get_selected_nodes()))
iconmenu.new_icon.connect("activate",
lambda w: self._app.on_new_icon(
self.get_selected_nodes(), self._notebook,
self._main_window))
iconmenu.set_notebook(self._notebook)
return iconmenu
def remove_ui(self, window):
"""Remove the view's UI from a window"""
assert self._main_window == window
self._ui_ready = False
self.editor.remove_ui(self._main_window)
for ui in reversed(self._uis):
self._main_window.get_uimanager().remove_ui(ui)
self._uis = []
self._main_window.get_uimanager().ensure_update()
self._main_window.get_uimanager().remove_action_group(self._action_group)
self._action_group = None
def _get_ui(self):
"""Returns the UI XML"""
# NOTE: I use a dummy menubar popup_menus so that I can have
# accelerators on the menus. It is a hack.
return ["""
<ui>
<menubar name="main_menu_bar">
<menu action="File">
<placeholder name="Viewer">
<menuitem action="New Page"/>
<menuitem action="New Child Page"/>
<menuitem action="New Folder"/>
</placeholder>
</menu>
<menu action="Edit">
<placeholder name="Viewer">
<menuitem action="Attach File"/>
<separator/>
<placeholder name="Editor"/>
</placeholder>
</menu>
<placeholder name="Viewer">
<placeholder name="Editor"/>
<menu action="View">
<menuitem action="View Note in File Explorer"/>
<menuitem action="View Note in Text Editor"/>
<menuitem action="View Note in Web Browser"/>
<menuitem action="Open File"/>
</menu>
</placeholder>
<menu action="Go">
<placeholder name="Viewer">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
<menuitem action="Go to Note"/>
<menuitem action="Go to Parent Note"/>
<menuitem action="Go to Next Note"/>
<menuitem action="Go to Previous Note"/>
<menuitem action="Expand Note"/>
<menuitem action="Collapse Note"/>
<menuitem action="Expand All Child Notes"/>
<menuitem action="Collapse All Child Notes"/>
<separator/>
<menuitem action="Go to Tree View"/>
<menuitem action="Go to List View"/>
<menuitem action="Go to Editor"/>
<placeholder name="Editor"/>
</placeholder>
</menu>
<menu action="Tools">
</menu>
</menubar>
<toolbar name="main_tool_bar">
<placeholder name="Viewer">
<toolitem action="New Folder"/>
<toolitem action="New Page"/>
<separator/>
<toolitem action="Back"/>
<toolitem action="Forward"/>
<separator/>
<placeholder name="Editor"/>
</placeholder>
</toolbar>
<menubar name="popup_menus">
<menu action="treeview_popup">
<menuitem action="New Page"/>
<menuitem action="New Child Page"/>
<menuitem action="New Folder"/>
<menuitem action="Attach File"/>
<placeholder name="New"/>
<separator/>
<menuitem action="Cut"/>
<menuitem action="Copy"/>
<menuitem action="Copy Tree"/>
<menuitem action="Paste"/>
<separator/>
<menuitem action="Delete Note"/>
<menuitem action="Rename Note"/>
<menuitem action="Change Note Icon"/>
<separator/>
<menuitem action="View Note in File Explorer"/>
<menuitem action="View Note in Text Editor"/>
<menuitem action="View Note in Web Browser"/>
<menuitem action="Open File"/>
</menu>
<menu action="listview_popup">
<menuitem action="Go to Note"/>
<menuitem action="Go to Parent Note"/>
<separator/>
<menuitem action="New Page"/>
<menuitem action="New Child Page"/>
<menuitem action="New Folder"/>
<menuitem action="Attach File"/>
<placeholder name="New"/>
<separator/>
<menuitem action="Cut"/>
<menuitem action="Copy"/>
<menuitem action="Copy Tree"/>
<menuitem action="Paste"/>
<separator/>
<menuitem action="Delete Note"/>
<menuitem action="Rename Note"/>
<menuitem action="Change Note Icon"/>
<separator/>
<menuitem action="View Note in File Explorer"/>
<menuitem action="View Note in Text Editor"/>
<menuitem action="View Note in Web Browser"/>
<menuitem action="Open File"/>
</menu>
</menubar>
</ui>
"""]
def _get_actions(self):
"""Returns actions for view's UI"""
return map(lambda x: Action(*x), [
("treeview_popup", None, "", "", None, lambda w: None),
("listview_popup", None, "", "", None, lambda w: None),
("Copy Tree", gtk.STOCK_COPY, _("Copy _Tree"),
"<control><shift>C", _("Copy entire tree"),
lambda w: self.on_copy_tree()),
("New Page", gtk.STOCK_NEW, _("New _Page"),
"<control>N", _("Create a new page"),
lambda w: self.on_new_page(), "note-new.png"),
("New Child Page", gtk.STOCK_NEW, _("New _Child Page"),
"<control><shift>N", _("Create a new child page"),
lambda w: self.on_new_child_page(),
"note-new.png"),
("New Folder", gtk.STOCK_DIRECTORY, _("New _Folder"),
"<control><shift>M", _("Create a new folder"),
lambda w: self.on_new_dir(),
"folder-new.png"),
("Attach File", gtk.STOCK_ADD, _("_Attach File..."),
"", _("Attach a file to the notebook"),
lambda w: self._on_attach_file_menu()),
("Back", gtk.STOCK_GO_BACK, _("_Back"), "", None,
lambda w: self.visit_history(-1)),
("Forward", gtk.STOCK_GO_FORWARD, _("_Forward"), "", None,
lambda w: self.visit_history(1)),
("Go to Note", gtk.STOCK_JUMP_TO, _("Go to _Note"),
"", None,
lambda w: self.on_goto_node(None, None)),
("Go to Parent Note", gtk.STOCK_GO_BACK, _("Go to _Parent Note"),
"<shift><alt>Left", None,
lambda w: self.on_goto_parent_node()),
("Go to Next Note", gtk.STOCK_GO_DOWN, _("Go to Next N_ote"),
"<alt>Down", None,
lambda w: self.goto_next_node()),
("Go to Previous Note", gtk.STOCK_GO_UP, _("Go to _Previous Note"),
"<alt>Up", None,
lambda w: self.goto_prev_node()),
("Expand Note", gtk.STOCK_ADD, _("E_xpand Note"),
"<alt>Right", None,
lambda w: self.expand_node()),
("Collapse Note", gtk.STOCK_REMOVE, _("_Collapse Note"),
"<alt>Left", None,
lambda w: self.collapse_node()),
("Expand All Child Notes", gtk.STOCK_ADD, _("Expand _All Child Notes"),
"<shift><alt>Right", None,
lambda w: self.expand_node(True)),
("Collapse All Child Notes", gtk.STOCK_REMOVE, _("Collapse A_ll Child Notes"),
"<shift><alt>Left", None,
lambda w: self.collapse_node(True)),
("Go to Tree View", None, _("Go to _Tree View"),
"<control>T", None,
lambda w: self.goto_treeview()),
("Go to List View", None, _("Go to _List View"),
"<control>Y", None,
lambda w: self.goto_listview()),
("Go to Editor", None, _("Go to _Editor"),
"<control>D", None,
lambda w: self.goto_editor()),
("Delete Note", gtk.STOCK_DELETE, _("_Delete"),
"", None, self.on_delete_node),
("Rename Note", gtk.STOCK_EDIT, _("_Rename"),
"", None,
lambda w: self._on_rename_node()),
("Change Note Icon", None, _("_Change Note Icon"),
"", None, lambda w: None,
lookup_icon_filename(None, u"folder-red.png")),
])
| reshadh/Keepnote-LaTeX | keepnote/gui/three_pane_viewer.py | Python | gpl-2.0 | 36,245 | [
"VisIt"
] | 4f857ab012976eb6cd7fc3b6bad04b532d9837aa88d53c6c015801910694db78 |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# Eric Chang <ericchang2017@u.northwestern.edu>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from scipy.special import boxcox
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.deprecation import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
_check_sample_weight,
FLOAT_DTYPES, _deprecate_positional_args)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'PowerTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
'power_transform',
]
def _is_constant_feature(var, mean, n_samples):
"""Detect if a feature is indistinguishable from a constant feature.
The detection is based on its computed variance and on the theoretical
error bounds of the '2 pass algorithm' for variance computation.
See "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
"""
# In scikit-learn, variance is always computed using float64 accumulators.
eps = np.finfo(np.float64).eps
upper_bound = n_samples * eps * var + (n_samples * mean * eps)**2
return var <= upper_bound
def _handle_zeros_in_scale(scale, copy=True, constant_mask=None):
"""Set scales of near constant features to 1.
The goal is to avoid division by very small or zero values.
Near constant features are detected automatically by identifying
scales close to machine precision unless they are precomputed by
the caller and passed with the `constant_mask` kwarg.
Typically for standard scaling, the scales are the standard
deviation while near constant features are better detected on the
computed variances which are closer to machine precision by
construction.
"""
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if constant_mask is None:
# Detect near constant values to avoid dividing by a very small
# value that could lead to suprising results and numerical
# stability issues.
constant_mask = scale < 10 * np.finfo(scale.dtype).eps
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[constant_mask] = 1.0
return scale
@_deprecate_positional_args
def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to center and scale.
axis : int, default=0
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : bool, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : bool, default=True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.StandardScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.
See Also
--------
StandardScaler : Performs scaling to unit variance using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
estimator='the scale function', dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(TransformerMixin, BaseEstimator):
"""Transform features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, e.g. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
clip : bool, default=False
Set to True to clip transformed values of held-out data to
provided `feature range`.
.. versionadded:: 0.24
Attributes
----------
min_ : ndarray of shape (n_features,)
Per feature adjustment for minimum. Equivalent to
``min - X.min(axis=0) * self.scale_``
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data. Equivalent to
``(max - min) / (X.max(axis=0) - X.min(axis=0))``
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray of shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray of shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray of shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
n_samples_seen_ : int
The number of samples processed by the estimator.
It will be reset on new calls to fit, but increments across
``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler()
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
See Also
--------
minmax_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
@_deprecate_positional_args
def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):
self.feature_range = feature_range
self.copy = copy
self.clip = clip
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does not support sparse input. "
"Consider using MaxAbsScaler instead.")
first_pass = not hasattr(self, 'n_samples_seen_')
X = self._validate_data(X, reset=first_pass,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range, copy=True))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scale features of X according to feature_range.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data that will be transformed.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
X = self._validate_data(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan", reset=False)
X *= self.scale_
X += self.min_
if self.clip:
np.clip(X, self.feature_range[0], self.feature_range[1], out=X)
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data that will be transformed. It cannot be sparse.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X -= self.min_
X /= self.scale_
return X
def _more_tags(self):
return {'allow_nan': True}
@_deprecate_positional_args
def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True):
"""Transform features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by (when ``axis=0``)::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
The transformation is calculated as (when ``axis=0``)::
X_scaled = scale * X + min - X.min(axis=0) * scale
where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`~sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int, default=0
Axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Returns
-------
X_tr : ndarray of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MinMaxScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`.
See Also
--------
MinMaxScaler : Performs scaling to a given range using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(TransformerMixin, BaseEstimator):
"""Standardize features by removing the mean and scaling to unit variance
The standard score of a sample `x` is calculated as:
z = (x - u) / s
where `u` is the mean of the training samples or zero if `with_mean=False`,
and `s` is the standard deviation of the training samples or one if
`with_std=False`.
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using
:meth:`transform`.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : bool, default=True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : bool, default=True
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray of shape (n_features,) or None
Per feature relative scaling of the data to achieve zero mean and unit
variance. Generally this is calculated using `np.sqrt(var_)`. If a
variance is zero, we can't achieve unit variance, and the data is left
as-is, giving a scaling factor of 1. `scale_` is equal to `None`
when `with_std=False`.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray of shape (n_features,) or None
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray of shape (n_features,) or None
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_samples_seen_ : int or ndarray of shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are no missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array of dtype int. If
`sample_weights` are used it will be a float (if no missing data)
or an array of dtype float that sums the weights seen so far.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler()
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
See Also
--------
scale : Equivalent function without the estimator API.
:class:`~sklearn.decomposition.PCA` : Further removes the linear
correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
@_deprecate_positional_args
def __init__(self, *, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None, sample_weight=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y, sample_weight)
def partial_fit(self, X, y=None, sample_weight=None):
"""
Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
first_call = not hasattr(self, "n_samples_seen_")
X = self._validate_data(X, accept_sparse=('csr', 'csc'),
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan', reset=first_call)
n_features = X.shape[1]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
dtype = np.int64 if sample_weight is None else X.dtype
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(n_features, dtype=dtype)
elif np.size(self.n_samples_seen_) == 1:
self.n_samples_seen_ = np.repeat(
self.n_samples_seen_, X.shape[1])
self.n_samples_seen_ = \
self.n_samples_seen_.astype(dtype, copy=False)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
sparse_constructor = (sparse.csr_matrix
if X.format == 'csr' else sparse.csc_matrix)
if self.with_std:
# First pass
if not hasattr(self, 'scale_'):
self.mean_, self.var_, self.n_samples_seen_ = \
mean_variance_axis(X, axis=0, weights=sample_weight,
return_sum_weights=True)
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_,
weights=sample_weight)
# We force the mean and variance to float64 for large arrays
# See https://github.com/scikit-learn/scikit-learn/pull/12338
self.mean_ = self.mean_.astype(np.float64, copy=False)
self.var_ = self.var_.astype(np.float64, copy=False)
else:
self.mean_ = None # as with_mean must be False for sparse
self.var_ = None
weights = _check_sample_weight(sample_weight, X)
sum_weights_nan = weights @ sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr),
shape=X.shape)
self.n_samples_seen_ += (
(np.sum(weights) - sum_weights_nan).astype(dtype)
)
else:
# First pass
if not hasattr(self, 'scale_'):
self.mean_ = .0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_,
sample_weight=sample_weight)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
# Extract the list of near constant features on the raw variances,
# before taking the square root.
constant_mask = _is_constant_feature(
self.var_, self.mean_, self.n_samples_seen_)
self.scale_ = _handle_zeros_in_scale(
np.sqrt(self.var_), copy=False, constant_mask=constant_mask)
else:
self.scale_ = None
return self
def transform(self, X, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : {array-like, sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
copy = copy if copy is not None else self.copy
X = self._validate_data(X, reset=False,
accept_sparse='csr', copy=copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
dtype=FLOAT_DTYPES, force_all_finite="allow-nan")
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
def _more_tags(self):
return {'allow_nan': True,
'preserves_dtype': [np.float64, np.float32]}
class MaxAbsScaler(TransformerMixin, BaseEstimator):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray of shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler()
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
See Also
--------
maxabs_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
@_deprecate_positional_args
def __init__(self, *, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""
Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
first_pass = not hasattr(self, 'n_samples_seen_')
X = self._validate_data(X, reset=first_pass,
accept_sparse=('csr', 'csc'), estimator=self,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs, copy=True)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be scaled.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=('csr', 'csc'),
copy=self.copy, reset=False,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be transformed back.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def _more_tags(self):
return {'allow_nan': True}
@_deprecate_positional_args
def maxabs_scale(X, *, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
axis : int, default=0
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know what
you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MaxAbsScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`.
See Also
--------
MaxAbsScaler : Performs scaling to the [-1, 1] range using
the Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(TransformerMixin, BaseEstimator):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the ``transform`` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : bool, default=True
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : bool, default=True
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, \
default=(25.0, 75.0), == (1st quantile, 3rd quantile), == IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : bool, default=True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
unit_variance : bool, default=False
If True, scale data so that normally distributed features have a
variance of 1. In general, if the difference between the x-values of
``q_max`` and ``q_min`` for a standard normal distribution is greater
than 1, the dataset will be scaled down. If less than 1, the dataset
will be scaled up.
.. versionadded:: 0.24
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
Examples
--------
>>> from sklearn.preprocessing import RobustScaler
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> transformer = RobustScaler().fit(X)
>>> transformer
RobustScaler()
>>> transformer.transform(X)
array([[ 0. , -2. , 0. ],
[-1. , 0. , 0.4],
[ 1. , 0. , -1.6]])
See Also
--------
robust_scale : Equivalent function without the estimator API.
:class:`~sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
"""
@_deprecate_positional_args
def __init__(self, *, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True, unit_variance=False):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.unit_variance = unit_variance
self.copy = copy
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the median and quantiles
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = self._validate_data(X, accept_sparse='csc', estimator=self,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
self.center_ = np.nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(np.nanpercentile(column_data,
self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
if self.unit_variance:
adjust = (stats.norm.ppf(q_max / 100.0) -
stats.norm.ppf(q_min / 100.0))
self.scale_ = self.scale_ / adjust
else:
self.scale_ = None
return self
def transform(self, X):
"""Center and scale the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the specified axis.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=('csr', 'csc'),
copy=self.copy, estimator=self,
dtype=FLOAT_DTYPES, reset=False,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The rescaled data to be transformed back.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def _more_tags(self):
return {'allow_nan': True}
@_deprecate_positional_args
def robust_scale(X, *, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True, unit_variance=False):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_sample, n_features)
The data to center and scale.
axis : int, default=0
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : bool, default=True
If True, center the data before scaling.
with_scaling : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
default=(25.0, 75.0), == (1st quantile, 3rd quantile), == IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : bool, default=True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
unit_variance : bool, default=False
If True, scale data so that normally distributed features have a
variance of 1. In general, if the difference between the x-values of
``q_max`` and ``q_min`` for a standard normal distribution is greater
than 1, the dataset will be scaled down. If less than 1, the dataset
will be scaled up.
.. versionadded:: 0.24
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.RobustScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`.
See Also
--------
RobustScaler : Performs centering and scaling using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range,
unit_variance=unit_variance, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
@_deprecate_positional_args
def normalize(X, norm='l2', *, axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : {0, 1}, default=1
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : bool, default=True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : bool, default=False
whether to return the computed norms
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See Also
--------
Normalizer : Performs normalization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, accept_sparse=sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
mins, maxes = min_max_axis(X, 1)
norms = np.maximum(abs(mins), maxes)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(abs(X), axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(TransformerMixin, BaseEstimator):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1, l2 or inf) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample. If norm='max'
is used, values will be rescaled by the maximum of the absolute
values.
copy : bool, default=True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer()
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See Also
--------
normalize : Equivalent function without the estimator API.
"""
@_deprecate_positional_args
def __init__(self, norm='l2', *, copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to estimate the normalization parameters.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_data(X, accept_sparse='csr')
return self
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
X = self._validate_data(X, accept_sparse='csr', reset=False)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def _more_tags(self):
return {'stateless': True}
@_deprecate_positional_args
def binarize(X, *, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix.
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
Binarizer : Performs binarization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(TransformerMixin, BaseEstimator):
"""Binarize data (set feature values to 0 or 1) according to a threshold.
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Binarizer
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = Binarizer().fit(X) # fit does nothing.
>>> transformer
Binarizer()
>>> transformer.transform(X)
array([[1., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See Also
--------
binarize : Equivalent function without the estimator API.
"""
@_deprecate_positional_args
def __init__(self, *, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_data(X, accept_sparse='csr')
return self
def transform(self, X, copy=None):
"""Binarize each element of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
# TODO: This should be refactored because binarize also calls
# check_array
X = self._validate_data(X, accept_sparse=['csr', 'csc'], copy=copy,
reset=False)
return binarize(X, threshold=self.threshold, copy=False)
def _more_tags(self):
return {'stateless': True}
class KernelCenterer(TransformerMixin, BaseEstimator):
r"""Center an arbitrary kernel matrix :math:`K`.
Let define a kernel :math:`K` such that:
.. math::
K(X, Y) = \phi(X) . \phi(Y)^{T}
:math:`\phi(X)` is a function mapping of rows of :math:`X` to a
Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`.
This class allows to compute :math:`\tilde{K}(X, Y)` such that:
.. math::
\tilde{K(X, Y)} = \tilde{\phi}(X) . \tilde{\phi}(Y)^{T}
:math:`\tilde{\phi}(X)` is the centered mapped data in the Hilbert
space.
`KernelCenterer` centers the features without explicitly computing the
mapping :math:`\phi(\cdot)`. Working with centered kernels is sometime
expected when dealing with algebra computation such as eigendecomposition
for :class:`~sklearn.decomposition.KernelPCA` for instance.
Read more in the :ref:`User Guide <kernel_centering>`.
Attributes
----------
K_fit_rows_ : ndarray of shape (n_samples,)
Average of each column of kernel matrix.
K_fit_all_ : float
Average of kernel matrix.
References
----------
.. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
"Nonlinear component analysis as a kernel eigenvalue problem."
Neural computation 10.5 (1998): 1299-1319.
<https://www.mlpack.org/papers/kpca.pdf>`_
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
def __init__(self):
# Needed for backported inspect.signature compatibility with PyPy
pass
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : ndarray of shape (n_samples, n_samples)
Kernel matrix.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
K = self._validate_data(K, dtype=FLOAT_DTYPES)
if K.shape[0] != K.shape[1]:
raise ValueError("Kernel matrix must be a square matrix."
" Input is a {}x{} matrix."
.format(K.shape[0], K.shape[1]))
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, copy=True):
"""Center kernel matrix.
Parameters
----------
K : ndarray of shape (n_samples1, n_samples2)
Kernel matrix.
copy : bool, default=True
Set to False to perform inplace computation.
Returns
-------
K_new : ndarray of shape (n_samples1, n_samples2)
"""
check_is_fitted(self)
K = self._validate_data(K, copy=copy, dtype=FLOAT_DTYPES, reset=False)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def _more_tags(self):
return {'pairwise': True}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated("Attribute _pairwise was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1.")
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
class QuantileTransformer(TransformerMixin, BaseEstimator):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.19
Parameters
----------
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, default=1e5
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
n_quantiles_ : int
The actual number of quantiles used to discretize the cumulative
distribution function.
quantiles_ : ndarray of shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray of shape (n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X)
array([...])
See Also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
@_deprecate_positional_args
def __init__(self, *, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
# Due to floating-point precision error in `np.nanpercentile`,
# make sure that quantiles are monotonically increasing.
# Upstream issue in numpy:
# https://github.com/numpy/numpy/issues/14685
self.quantiles_ = np.maximum.accumulate(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative. If a sparse matrix is provided,
it will be converted into a sparse ``csc_matrix``.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
# due to floating-point precision error in `np.nanpercentile`,
# make sure the quantiles are monotonically increasing
# Upstream issue in numpy:
# https://github.com/numpy/numpy/issues/14685
self.quantiles_ = np.maximum.accumulate(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X, in_fit=True, copy=False)
n_samples = X.shape[0]
if self.n_quantiles > n_samples:
warnings.warn("n_quantiles (%s) is greater than the total number "
"of samples (%s). n_quantiles is set to "
"n_samples."
% (self.n_quantiles, n_samples))
self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles_,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature."""
output_distribution = self.output_distribution
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform distribution
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if output_distribution == 'normal':
X_col = stats.norm.cdf(X_col)
# else output distribution is already a uniform distribution
# find index for lower and higher bounds
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if output_distribution == 'normal':
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if output_distribution == 'uniform':
lower_bounds_idx = (X_col == lower_bound_x)
upper_bounds_idx = (X_col == upper_bound_x)
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = .5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col[isfinite_mask] = np.interp(X_col_finite,
self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output distribution
if not inverse:
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if output_distribution == 'normal':
X_col = stats.norm.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1))
clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
# else output distribution is uniform and the ppf is the
# identity function so we let X_col unchanged
return X_col
def _check_inputs(self, X, in_fit, accept_sparse_negative=False,
copy=False):
"""Check inputs before fit and transform."""
X = self._validate_data(X, reset=in_fit,
accept_sparse='csc', copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if (not accept_sparse_negative and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts'
' non-negative sparse matrices.')
# check the output distribution
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, default=False
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray of shape (n_samples, n_features)
Projected data.
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(X, in_fit=False, copy=self.copy)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(X, in_fit=False, accept_sparse_negative=True,
copy=self.copy)
return self._transform(X, inverse=True)
def _more_tags(self):
return {'allow_nan': True}
@_deprecate_positional_args
def quantile_transform(X, *, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to transform.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, default=1e5
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged
..versionchanged:: 0.23
The default value of `copy` changed from False to True in 0.23.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
See Also
--------
QuantileTransformer : Performs quantile-based scaling using the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.quantile_transform` unless
you know what you are doing. A common mistake is to apply it
to the entire data *before* splitting into training and
test sets. This will bias the model evaluation because
information would have leaked from the test set to the
training set.
In general, we recommend using
:class:`~sklearn.preprocessing.QuantileTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking:`pipe = make_pipeline(QuantileTransformer(),
LogisticRegression())`.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
class PowerTransformer(TransformerMixin, BaseEstimator):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, PowerTransformer supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.20
Parameters
----------
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Attributes
----------
lambdas_ : ndarray of float of shape (n_features,)
The parameters of the power transformation for the selected features.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(pt.fit(data))
PowerTransformer()
>>> print(pt.lambdas_)
[ 1.386... -3.100...]
>>> print(pt.transform(data))
[[-1.316... -0.707...]
[ 0.209... -0.707...]
[ 1.106... 1.414...]]
See Also
--------
power_transform : Equivalent function without the estimator API.
QuantileTransformer : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
"""
@_deprecate_positional_args
def __init__(self, method='yeo-johnson', *, standardize=True, copy=True):
self.method = method
self.standardize = standardize
self.copy = copy
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._fit(X, y=y, force_transform=False)
return self
def fit_transform(self, X, y=None):
return self._fit(X, y, force_transform=True)
def _fit(self, X, y=None, force_transform=False):
X = self._check_input(X, in_fit=True, check_positive=True,
check_method=True)
if not self.copy and not force_transform: # if call from fit()
X = X.copy() # force copy so that fit does not change X inplace
optim_function = {'box-cox': self._box_cox_optimize,
'yeo-johnson': self._yeo_johnson_optimize
}[self.method]
with np.errstate(invalid='ignore'): # hide NaN warnings
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or force_transform:
transform_function = {'box-cox': boxcox,
'yeo-johnson': self._yeo_johnson_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
def transform(self, X):
"""Apply the power transform to each feature using the fitted lambdas.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
"""
check_is_fitted(self)
X = self._check_input(X, in_fit=False, check_positive=True,
check_shape=True)
transform_function = {'box-cox': boxcox,
'yeo-johnson': self._yeo_johnson_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
X = self._scaler.transform(X)
return X
def inverse_transform(self, X):
"""Apply the inverse power transformation using the fitted lambdas.
The inverse of the Box-Cox transformation is given by::
if lambda_ == 0:
X = exp(X_trans)
else:
X = (X_trans * lambda_ + 1) ** (1 / lambda_)
The inverse of the Yeo-Johnson transformation is given by::
if X >= 0 and lambda_ == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda_ != 0:
X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1
elif X < 0 and lambda_ != 2:
X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))
elif X < 0 and lambda_ == 2:
X = 1 - exp(-X_trans)
Parameters
----------
X : array-like of shape (n_samples, n_features)
The transformed data.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The original data.
"""
check_is_fitted(self)
X = self._check_input(X, in_fit=False, check_shape=True)
if self.standardize:
X = self._scaler.inverse_transform(X)
inv_fun = {'box-cox': self._box_cox_inverse_tranform,
'yeo-johnson': self._yeo_johnson_inverse_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = inv_fun(X[:, i], lmbda)
return X
def _box_cox_inverse_tranform(self, x, lmbda):
"""Return inverse-transformed input x following Box-Cox inverse
transform with parameter lambda.
"""
if lmbda == 0:
x_inv = np.exp(x)
else:
x_inv = (x * lmbda + 1) ** (1 / lmbda)
return x_inv
def _yeo_johnson_inverse_transform(self, x, lmbda):
"""Return inverse-transformed input x following Yeo-Johnson inverse
transform with parameter lambda.
"""
x_inv = np.zeros_like(x)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
def _yeo_johnson_transform(self, x, lmbda):
"""Return transformed input x following Yeo-Johnson transform with
parameter lambda.
"""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def _box_cox_optimize(self, x):
"""Find and return optimal lambda parameter of the Box-Cox transform by
MLE, for observed data x.
We here use scipy builtins which uses the brent optimizer.
"""
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
_, lmbda = stats.boxcox(x[~np.isnan(x)], lmbda=None)
return lmbda
def _yeo_johnson_optimize(self, x):
"""Find and return optimal lambda parameter of the Yeo-Johnson
transform by MLE, for observed data x.
Like for Box-Cox, MLE is done via the brent optimizer.
"""
def _neg_log_likelihood(lmbda):
"""Return the negative log likelihood of the observed data x as a
function of lambda."""
x_trans = self._yeo_johnson_transform(x, lmbda)
n_samples = x.shape[0]
loglike = -n_samples / 2 * np.log(x_trans.var())
loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()
return -loglike
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
x = x[~np.isnan(x)]
# choosing bracket -2, 2 like for boxcox
return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
def _check_input(self, X, in_fit, check_positive=False, check_shape=False,
check_method=False):
"""Validate the input before fit and transform.
Parameters
----------
X : array-like of shape (n_samples, n_features)
in_fit : bool
Whether or not `_check_input` is called from `fit` or other
methods, e.g. `predict`, `transform`, etc.
check_positive : bool, default=False
If True, check that all data is positive and non-zero (only if
``self.method=='box-cox'``).
check_shape : bool, default=False
If True, check that n_features matches the length of self.lambdas_
check_method : bool, default=False
If True, check that the transformation method is valid.
"""
X = self._validate_data(X, ensure_2d=True, dtype=FLOAT_DTYPES,
copy=self.copy, force_all_finite='allow-nan',
reset=in_fit)
with np.warnings.catch_warnings():
np.warnings.filterwarnings(
'ignore', r'All-NaN (slice|axis) encountered')
if (check_positive and self.method == 'box-cox' and
np.nanmin(X) <= 0):
raise ValueError("The Box-Cox transformation can only be "
"applied to strictly positive data")
if check_shape and not X.shape[1] == len(self.lambdas_):
raise ValueError("Input data has a different number of features "
"than fitting data. Should have {n}, data has {m}"
.format(n=len(self.lambdas_), m=X.shape[1]))
valid_methods = ('box-cox', 'yeo-johnson')
if check_method and self.method not in valid_methods:
raise ValueError("'method' must be one of {}, "
"got {} instead."
.format(valid_methods, self.method))
return X
def _more_tags(self):
return {'allow_nan': True}
@_deprecate_positional_args
def power_transform(X, method='yeo-johnson', *, standardize=True, copy=True):
"""
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, power_transform supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
.. versionchanged:: 0.23
The default value of the `method` parameter changed from
'box-cox' to 'yeo-johnson' in 0.23.
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import power_transform
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(power_transform(data, method='box-cox'))
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
.. warning:: Risk of data leak.
Do not use :func:`~sklearn.preprocessing.power_transform` unless you
know what you are doing. A common mistake is to apply it to the entire
data *before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.PowerTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),
LogisticRegression())`.
See Also
--------
PowerTransformer : Equivalent transformation with the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
quantile_transform : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
"""
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
| glemaitre/scikit-learn | sklearn/preprocessing/_data.py | Python | bsd-3-clause | 115,697 | [
"Gaussian"
] | 5a2f5dfb8f2e10dfc2cd39c7c90d1ce6ab87f0211472deb3bad66127ef9f4c26 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import re
import math
from collections import defaultdict
from .exceptions import *
from . import qcformat
#import molpro_basissets
from . import options
from .pdict import PreservingDict
from .psivarrosetta import useme2psivar
def harvest_output(outtext):
"""Function to separate portions of a Psi4 output file *outtext*.
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# Process SAPT
mobj = re.search(
r'^\s+' + r'SAPT Results' + r'\s*' +
r'^\s*(?:-+)\s*' +
r'^\s+' + r'Electrostatics' +
r'(?:.*?)' +
r'^\s+' + r'Exchange' +
r'(?:.*?)' +
r'^\s+' + r'Induction' +
r'(?:.*?)' +
r'^\s+' + r'Dispersion' +
r'(?:.*?)' +
r'^\s+' + r'Total' +
r'(?:.*?)' +
r'^(?:\s*?)$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched sapt')
for pv in mobj.group(0).split('\n'):
submobj = re.search(r'^\s+' + r'(.+?)' + r'\s+' +
NUMBER + r'\s+' + r'[mEh]' + r'\s+' +
NUMBER + r'\s+' + r'[kcal/mol]' + r'\s*$', pv)
if submobj:
try:
key = ''.join(submobj.group(1).split())
useme = useme2psivar[key]
except KeyError:
#print '%30s' % (''),
pass
else:
#print '%30s' % (useme),
psivar['%s' % (useme)] = submobj.group(2)
#print '*', submobj.group(1), submobj.group(2), submobj.group(3)
# Process PsiVariables
mobj = re.search(r'^(?: Variable Map:)\s*' +
r'^\s*(?:-+)\s*' +
r'^(.*?)' +
r'^(?:\s*?)$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
for pv in mobj.group(1).split('\n'):
submobj = re.search(r'^\s+' + r'"(.+?)"' + r'\s+=>\s+' + NUMBER + r'\s*$', pv)
if submobj:
psivar['%s' % (submobj.group(1))] = submobj.group(2)
# Process Completion
mobj = re.search(r'Psi4 exiting successfully. Buy a developer a beer!',
outtext, re.MULTILINE)
if mobj:
psivar['SUCCESS'] = True
return psivar, psivar_coord, psivar_grad
class Infile(qcformat.InputFormat2):
def __init__(self, mem, mol, mtd, der, opt):
qcformat.InputFormat2.__init__(self, mem, mol, mtd, der, opt)
#print self.method, self.molecule.nactive_fragments()
if 'sapt' in self.method and self.molecule.nactive_fragments() != 2:
raise FragmentCountError("""Requested molecule has %d, not 2, fragments.""" % (self.molecule.nactive_fragments()))
# # memory in MB --> MW
# self.memory = int(math.ceil(mem / 8.0))
# # auxiliary basis sets
# [self.unaugbasis, self.augbasis, self.auxbasis] = self.corresponding_aux_basis()
def format_infile_string(self):
"""
"""
# Handle memory and comment
memcmd, memkw = """# %s\n\nmemory %d mb\n\n""" % (self.molecule.tagline, self.memory), {}
# Handle molecule and basis set
molcmd, molkw = self.molecule.format_molecule_for_psi4(), {}
# format global convergence directions
# text += self.format_global_parameters()
_cdscmd, cdskw = muster_cdsgroup_options()
# Handle calc type and quantum chemical method
mdccmd, mdckw = procedures['energy'][self.method](self.method, self.dertype)
# # format options
# optcmd = qcdb.options.prepare_options_for_psi4(mdckw)
# make options from imdb only user options (currently non-existent). set basis and castup from here.
# Handle driver vs input/default keyword reconciliation
userkw = self.options
# userkw = p4util.prepare_options_for_modules()
#userkw = qcdb.options.reconcile_options(userkw, memkw)
#userkw = qcdb.options.reconcile_options(userkw, molkw)
#userkw = qcdb.options.reconcile_options(userkw, baskw)
#userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = options.reconcile_options2(userkw, cdskw)
userkw = options.reconcile_options2(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
optcmd = options.prepare_options_for_psi4(userkw)
# Handle text to be passed untouched to psi4
litcmd = """\nprint_variables()\n\n"""
# Assemble infile pieces
return memcmd + molcmd + optcmd + mdccmd + litcmd
#'hf'
#'df-hf'
#'b3lyp'
#'blyp'
#'bp86'
#'fno-ccsd(t)'
#'df-ccsd(t)'
#'fno-df-ccsd(t)'
#'df-b97-d'
#'df-b97-d3'
#'pbe0-2'
#'dsd-pbep86'
#'wb97x-2'
#'DLdf+d'
#'DLdf+d09'
#'df-b3lyp'
#'df-b3lyp-d'
#'df-b3lyp-d3'
#'df-wb97x-d'
def muster_cdsgroup_options():
text = ''
options = defaultdict(lambda: defaultdict(dict))
options['GLOBALS']['E_CONVERGENCE']['value'] = 8
options['SCF']['GUESS']['value'] = 'sad'
options['SCF']['MAXITER']['value'] = 200
return text, options
def muster_modelchem(name, dertype):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
if dertype == 0:
text += """energy('"""
else:
raise ValidationError("""Requested Psi4 dertype %d is not available.""" % (dertype))
if lowername == 'mp2':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'direct'
options['MP2']['MP2_TYPE']['value'] = 'conv'
text += """mp2')\n\n"""
elif lowername == 'df-mp2':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'df'
options['MP2']['MP2_TYPE']['value'] = 'df'
text += """mp2')\n\n"""
elif lowername == 'sapt0':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'df'
text += """sapt0')\n\n"""
elif lowername == 'sapt2+':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SAPT']['NAT_ORBS_T2']['value'] = True
options['SAPT']['NAT_ORBS_T3']['value'] = True
options['SAPT']['NAT_ORBS_V4']['value'] = True
options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
text += """sapt2+')\n\n"""
elif lowername == 'sapt2+(3)':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SAPT']['NAT_ORBS_T2']['value'] = True
options['SAPT']['NAT_ORBS_T3']['value'] = True
options['SAPT']['NAT_ORBS_V4']['value'] = True
options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
text += """sapt2+(3)')\n\n"""
elif lowername == 'sapt2+3(ccd)':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SAPT']['NAT_ORBS_T2']['value'] = True
options['SAPT']['NAT_ORBS_T3']['value'] = True
options['SAPT']['NAT_ORBS_V4']['value'] = True
options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
options['SAPT']['DO_MBPT_DISP']['value'] = True
text += """sapt2+3(ccd)')\n\n"""
elif lowername == 'df-b97-d3':
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
text += """b97-d3')\n\n"""
elif lowername == 'df-wb97x-d':
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
text += """wb97x-d')\n\n"""
elif lowername == 'df-b3lyp-d3':
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
text += """b3lyp-d3')\n\n"""
elif lowername == 'dfdf-b2plyp-d3':
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'df'
options['DFMP2']['MP2_TYPE']['value'] = 'df'
options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
text += """b2plyp-d3')\n\n"""
elif lowername == 'df-wpbe':
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
text += """wpbe')\n\n"""
elif lowername == 'ccsd-polarizability':
options['GLOBALS']['FREEZE_CORE']['value'] = True
text = """property('ccsd', properties=['polarizability'])\n\n"""
elif lowername == 'mrccsdt(q)':
options['SCF']['SCF_TYPE']['value'] = 'pk'
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['GLOBALS']['NAT_ORBS']['value'] = True # needed by mrcc but not recognized by mrcc
options['FNOCC']['OCC_TOLERANCE']['value'] = 6
text += """mrccsdt(q)')\n\n"""
elif lowername == 'c4-ccsdt(q)':
options['CFOUR']['CFOUR_SCF_CONV']['value'] = 11
options['CFOUR']['CFOUR_CC_CONV']['value'] = 10
options['CFOUR']['CFOUR_FROZEN_CORE']['value'] = True
text += """c4-ccsdt(q)')\n\n"""
elif lowername == 'df-m05-2x':
options['SCF']['SCF_TYPE']['value'] = 'df'
options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
text += """m05-2x')\n\n"""
else:
raise ValidationError("""Requested Psi4 computational methods %d is not available.""" % (lowername))
# # Set clobbering
# if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
# if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
# if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
# options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options
procedures = {
'energy': {
'df-b97-d3' : muster_modelchem,
'df-wb97x-d' : muster_modelchem,
'df-b3lyp-d3' : muster_modelchem,
'mp2' : muster_modelchem,
'df-mp2' : muster_modelchem,
'sapt0' : muster_modelchem,
'sapt2+' : muster_modelchem,
'sapt2+(3)' : muster_modelchem,
'sapt2+3(ccd)' : muster_modelchem,
'mrccsdt(q)' : muster_modelchem,
'c4-ccsdt(q)' : muster_modelchem,
'ccsd-polarizability' : muster_modelchem,
'dfdf-b2plyp-d3': muster_modelchem,
'df-wpbe' : muster_modelchem,
'df-m05-2x' : muster_modelchem,
}
}
qcmtdIN = procedures['energy']
def psi4_list():
"""Return an array of Psi4 methods with energies.
"""
return sorted(procedures['energy'].keys())
| CDSherrill/psi4 | psi4/driver/qcdb/qcprog_psi4.py | Python | lgpl-3.0 | 12,630 | [
"CFOUR",
"Psi4"
] | ee9b698de4043ff6ac95c8cd6eb021e9fc20c8cef125ff69d4a1f6d0aea4a210 |
#!/home-1/asierak1@jhu.edu/ParaView-4.2.0-Linux-64bit/bin/pvpython
# README
#
# Dependencies (I have tested it with these; others may also work):
# - ParaView-4.2.0
# - ffmpeg release/2.8 (git://source.ffmpeg.org/ffmpeg.git)
#
# Usage:
# - Change the path given in Line 1 to point to pvpython inside your Paraview
# installation
# - Use ParaView to set up the scene as you like
# - Make sure to rename the filter containing the data as 'flow' and 'part'
# - Save the ParaView state file (File->Save State)
# - Create a directory for storing the image files
# - Run animate-cgns.py and follow the instructions to generate the animation
#
# Don't be afraid to edit this script for your own purposes (e.g., if you don't
# need to read a 'part' file, comment out those lines below.
#
# 2015 Adam Sierakowski sierakowski@jhu.edu
import sys
from paraview.simple import *
import glob, os
import subprocess
import math
import re
def sorted_nicely( l ):
""" Sorts the given iterable in a natural way
Required arguments:
l -- The iterable to be sorted.
courtesy http://stackoverflow.com/questions/2669059/...
.../how-to-sort-alpha-numeric-set-in-python
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key = alphanum_key)
# TODO accept starting and ending time command line input
print "CGNS animation utility"
print ""
root = raw_input("Simulation root: ")
if not root.endswith('/'):
root = root + '/'
state = raw_input("ParaView state file (.pvsm): ")
if not state.endswith('.pvsm'):
state = state + '.pvsm'
ts = raw_input("Animation start time: ")
te = raw_input("Animation end time: ")
img = raw_input("Image output directory: ")
if not img.endswith('/'):
img = img + '/'
anim = raw_input("Animation output name (include extension): ")
fps = raw_input("Animation frame rate: ")
state = root + state
img = root + img
anim = root + anim
print "\nSummary:"
print " Simulation root: " + root
print " ParaView state file: " + state
print " Animation time: " + ts + " to " + te
print " Image output directory: " + img
print " Animation output name: " + anim
print " Animation frame rate: " + fps
print ""
run = raw_input("Continue? (y/N) ")
print ""
if run == 'y' or run == 'Y':
# load state file
# make sure to build state file using the filter names 'flow' and 'part'
LoadState(state)
view = GetRenderView()
flow = FindSource('flow')
if flow == None:
print "Make sure to build the state file using the filter name \'flow\'"
part = FindSource('part')
if part == None:
print "Make sure to build the state file using the filter name \'part\'"
view.WriteImage(img + "tmp.png", "vtkPNGWriter", 1)
os.remove(img + "tmp.png")
# determine zero padding length
mag = int(math.floor(math.log10(float(te))))
# go through all files
for fname in sorted_nicely(glob.glob(root + "/output/flow*.cgns")):
time = fname.split('/')[-1]
if time.endswith('.cgns'):
time = time[:-5]
if time.startswith('flow-'):
time = time[5:]
if float(time) >= float(ts) and float(time) <= float(te):
# change to file given by time
flow.FileName = root + "/output/flow-" + time + ".cgns"
flow.FileNameChanged()
part.FileName = root + "/output/part-" + time + ".cgns"
part.FileNameChanged()
print "Saving image for t = " + time
# pad image output time stamp for ffmpeg
ztime = time.zfill(mag + 5)
# save screen shot
view.WriteImage(img + "img-" + ztime + ".png", "vtkPNGWriter", 1)
# stitch together using ffmpeg
print ""
print "Stitching images together into " + anim
print ""
pts = "setpts=" + str(30./float(fps)) + "*PTS"
subprocess.call(["ffmpeg", "-r", "30", "-f", "image2", "-pattern_type",
"glob", "-i", img + "*.png", "-qscale:v", "4", "-vcodec",
"msmpeg4v2", "-r", "30", "-vf", pts, anim])
| teethfish/bluebottle | tools/animate-cgns.py | Python | apache-2.0 | 3,991 | [
"ParaView"
] | 8390189abb3e73a1de1fc6a3bc075b5945cd1a4b3365fe502c025d694df5d69d |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Code to interact with the ever-so-useful EMBOSS programs.
"""
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/Emboss/__init__.py | Python | gpl-2.0 | 236 | [
"Biopython"
] | f006b6ed087cbc5d3578c0d8ceaf830cbbd838301eff9be9e6aaa13e17b6c1ef |
# -*- coding:utf-8 -*-
import test_core
import demjson
import datetime
test_core.title("登录测试")
f = open("testconfig.json", 'r')
lines = f.read()
f.close()
jsonfiledata = demjson.decode(lines)
if jsonfiledata["url"] == "":
test_core.terr("错误: 'testconfig.json' 配置不完全。")
exit()
uurl = jsonfiledata["url"]+"nyalogin.php"
udataarr = {
'user':"testmail@uuu.moe",
'password':"testpassword",
'ua':"test"
}
dataarr = test_core.postarray(uurl,udataarr,True)
jsonfiledata["update"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
jsonfiledata["token"] = dataarr["token"]
lines = demjson.encode(jsonfiledata)
f = open("testconfig.json", 'w')
f.write(lines)
f.close() | cxchope/YashiLogin | tests/test_login.py | Python | mit | 712 | [
"MOE"
] | 04d888187c48d6d7c5b981d5fbeaf772bbc16e22b30a44a2277816a0ad92cf8f |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for operators used in Gluon dispatched by F=ndarray."""
from __future__ import absolute_import
import numpy as np
from ...context import current_context
from . import _internal as _npi
from ..ndarray import NDArray
from ...base import numeric_types
__all__ = ['randint', 'uniform', 'normal']
def randint(low, high=None, size=None, dtype=None, **kwargs):
"""Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution of
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.int'.
ctx : Context, optional
Device context of output. Default is current context.
out : ndarray, optional
The output ndarray (default is `None`).
Returns
-------
out : ndarray of ints
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
Examples
--------
>>> np.random.randint(2, size=10)
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
ctx = kwargs.pop('ctx', None)
out = kwargs.pop('out', None)
if dtype is None:
dtype = 'int'
if ctx is None:
ctx = current_context()
if size is None:
size = 1
if high is None:
high = low
low = 0
return _npi.random_randint(low, high, shape=size, dtype=dtype, ctx=ctx, out=out)
def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, ndarray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, ndarray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray
Drawn samples from the parameterized uniform distribution.
"""
from ...numpy import ndarray as np_ndarray
input_type = (isinstance(low, np_ndarray), isinstance(high, np_ndarray))
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if out is not None:
size = out.shape
if size == ():
size = None
if input_type == (True, True):
return _npi.uniform(low, high, low=None, high=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (False, True):
return _npi.uniform(high, low=low, high=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (True, False):
return _npi.uniform(low, low=None, high=high, size=size,
ctx=ctx, dtype=dtype, out=out)
else:
return _npi.uniform(low=low, high=high, size=size,
ctx=ctx, dtype=dtype, out=out)
def normal(loc=0.0, scale=1.0, size=None, **kwargs):
"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float, optional
Mean (centre) of the distribution.
scale : float, optional
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., `(m, n, k)`, then `m * n * k`
samples are drawn. If size is `None` (default), a scalar tensor containing
a single value is returned if loc and scale are both scalars.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray
Drawn samples from the parameterized normal distribution.
Notes
-----
This function currently does not support ``loc`` and ``scale`` as ndarrays.
"""
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = 'float32'
ctx = kwargs.pop('ctx', None)
if ctx is None:
ctx = current_context()
out = kwargs.pop('out', None)
if size is None and out is None:
size = ()
if (not isinstance(loc, numeric_types)) or (not isinstance(scale, numeric_types)):
raise NotImplementedError('np.random.normal only supports loc and scale of '
'numeric types for now')
return _npi.random_normal(loc, scale, shape=size, dtype=dtype, ctx=ctx, out=out, **kwargs)
def multinomial(n, pvals, size=None):
"""multinomial(n, pvals, size=None)
Draw samples from a multinomial distribution.
The multinomial distribution is a multivariate generalisation of the binomial distribution.
Take an experiment with one of ``p`` possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the distribution represents n such experiments.
Its values, ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome was ``i``.
Parameters
----------
n : int
Number of experiments.
pvals : sequence of floats, length p
Probabilities of each of the p different outcomes. These should sum to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Default is None, in which case a single value is returned.
Returns
-------
out : ndarray
The drawn samples, of shape size, if that was provided. If not, the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution.
Examples
--------
Throw a dice 1000 times, and 1000 times again:
>>> np.random.multinomial(1000, [1/6.]*6, size=2)
array([[164, 161, 179, 158, 150, 188],
[178, 162, 177, 143, 163, 177]])
A loaded die is more likely to land on number 6:
>>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
array([19, 14, 12, 11, 21, 23])
>>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3])
array([32, 68])
"""
if isinstance(pvals, NDArray):
return _npi.multinomial(pvals, pvals=None, n=n, size=size)
else:
if isinstance(pvals, np.ndarray):
raise ValueError('numpy ndarray is not supported!')
if any(isinstance(i, list) for i in pvals):
raise ValueError('object too deep for desired array')
return _npi.multinomial(n=n, pvals=pvals, size=size)
| reminisce/mxnet | python/mxnet/ndarray/numpy/random.py | Python | apache-2.0 | 9,554 | [
"Gaussian"
] | f1460b452400ca080bb0c50d00cd14628fdc226e502494af13b8b1f029872a32 |
# archan.py ---
#
# Filename: archan.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Mon Apr 27 15:34:07 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 16:04:17 2011 (+0530)
# By: Subhasis Ray
# Update #: 10
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
#
#
# Code:
import config
import moose
from numpy import exp, linspace
from channel import ChannelBase
class AR(ChannelBase):
"""Combined cation current."""
v = ChannelBase.v_array
m_inf = 1 / ( 1 + exp( ( v * 1e3 + 75 ) / 5.5 ) )
tau_m = 1e-3 / ( exp( -14.6 - 0.086 * v * 1e3) + exp( -1.87 + 0.07 * v * 1e3))
def __init__(self, name, parent, Ek=-35e-3):
ChannelBase.__init__(self, name, parent, 1, 0)
for i in range(len(self.xGate.A)):
self.xGate.A[i] = AR.tau_m[i]
self.xGate.B[i] = AR.m_inf[i]
self.xGate.tweakTau()
self.X = 0.25
self.Ek = Ek
#
# archan.py ends here
| BhallaLab/moose-thalamocortical | DEMOS/pymoose/traub2005/py/archan.py | Python | lgpl-2.1 | 966 | [
"MOOSE"
] | a2cde406b0695f896ec1d721a71590b087df4b0d28966ceefbb7426917b2ec67 |
from gpaw import GPAW
from ase import *
from gpaw.utilities import equal
a = 7.0
Be_solid = Atoms([Atom('Be', (0.0, 0.0, 0.0), magmom=0)],
cell=(a, a, a), pbc=True)
Be_alone = Atoms([Atom('Be', (a/2., a/2., a/2.), magmom=0)],
cell=(a, a, a), pbc=False)
Be_solidC = Atoms([Atom('Be', (0.0, 0.0, 0.0), magmom=1)],
cell=(a, a, a), pbc=True)
Be_aloneC = Atoms([Atom('Be', (a/2., a/2., a/2.), magmom=1)],
cell=(a, a, a), pbc=False)
Be_solid.set_calculator(GPAW(h=0.3, nbands=1))
E_solid_neutral = Be_solid.get_potential_energy()
Be_solidC.set_calculator(GPAW(h=0.3, charge=+1, nbands=1))
E_solid_charged = Be_solidC.get_potential_energy()
Be_alone.set_calculator(GPAW(h=0.3, nbands=1))
E_alone_neutral = Be_alone.get_potential_energy()
Be_aloneC.set_calculator(GPAW(h=0.3, charge=+1, nbands=1))
E_alone_charged = Be_aloneC.get_potential_energy()
print "A test for periodic charged calculations"
print "Be neutal solid: ", E_solid_neutral, " eV"
print "Be neutal alone: ", E_alone_neutral, " eV"
print "Be charged solid: ", E_solid_charged, " eV"
print "Be charged alone: ", E_alone_charged, " eV"
IPs = E_solid_neutral - E_solid_charged
IPa = E_alone_neutral - E_alone_charged
print "Ionization potential solid", IPs, " eV"
print "Ionization potential alone", IPa, " eV"
# Make sure that the ionization potential won't differ by more than 0.05eV
equal(Ips, IPa, 0.05)
from gpaw.utilities import equal
"""Some results:
WITH CORRECTION TURNED ON:
a = 12.0
Be neutal solid: 0.000241919665987 eV
Be neutal alone: 0.000520077168021 eV
Be charged solid: 8.95928916603 eV
Be charged alone: 9.03395959412 eV
Ionization potential solid -8.95904724637 eV
Ionization potential alone -9.03343951695 eV
Difference: 0.07eV
WITHOUT CORRECTION:
Be neutal solid: 0.000241919665987 eV
Be neutal alone: 0.000520077168021 eV
Be charged solid: 7.30611652236 eV !!!
Be charged alone: 9.03395959412 eV
Difference: 1.73eV
"""
| qsnake/gpaw | oldtest/Be_charged.py | Python | gpl-3.0 | 2,014 | [
"ASE",
"GPAW"
] | 5a04ab10649c40ecbd5a9af0047cd5c8464cf62ffe72d5302fcd034a96df755a |
"""DIRAC Administrator API Class
All administrative functionality is exposed through the DIRAC Admin API. Examples include
site banning and unbanning, WMS proxy uploading etc.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import six
import os
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.PromptUser import promptUser
from DIRAC.Core.Base.API import API
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
from DIRAC.WorkloadManagementSystem.Client.PilotManagerClient import PilotManagerClient
voName = ''
ret = getProxyInfo(disableVOMS=True)
if ret['OK'] and 'group' in ret['Value']:
voName = getVOForGroup(ret['Value']['group'])
COMPONENT_NAME = '/Interfaces/API/DiracAdmin'
class DiracAdmin(API):
""" Administrative functionalities
"""
#############################################################################
def __init__(self):
"""Internal initialization of the DIRAC Admin API.
"""
super(DiracAdmin, self).__init__()
self.csAPI = CSAPI()
self.dbg = False
if gConfig.getValue(self.section + '/LogLevel', 'DEBUG') == 'DEBUG':
self.dbg = True
self.scratchDir = gConfig.getValue(self.section + '/ScratchDir', '/tmp')
self.currentDir = os.getcwd()
self.rssFlag = ResourceStatus().rssFlag
self.sitestatus = SiteStatus()
#############################################################################
def uploadProxy(self):
"""Upload a proxy to the DIRAC WMS. This method
Example usage:
>>> print diracAdmin.uploadProxy('dteam_pilot')
{'OK': True, 'Value': 0L}
:return: S_OK,S_ERROR
:param permanent: Indefinitely update proxy
:type permanent: boolean
"""
return gProxyManager.uploadProxy()
#############################################################################
def setProxyPersistency(self, userDN, userGroup, persistent=True):
"""Set the persistence of a proxy in the Proxy Manager
Example usage:
>>> gLogger.notice(diracAdmin.setProxyPersistency( 'some DN', 'dirac group', True ))
{'OK': True }
:param userDN: User DN
:type userDN: string
:param userGroup: DIRAC Group
:type userGroup: string
:param persistent: Persistent flag
:type persistent: boolean
:return: S_OK,S_ERROR
"""
return gProxyManager.setPersistency(userDN, userGroup, persistent)
#############################################################################
def checkProxyUploaded(self, userDN, userGroup, requiredTime):
"""Set the persistence of a proxy in the Proxy Manager
Example usage:
>>> gLogger.notice(diracAdmin.setProxyPersistency( 'some DN', 'dirac group', True ))
{'OK': True, 'Value' : True/False }
:param userDN: User DN
:type userDN: string
:param userGroup: DIRAC Group
:type userGroup: string
:param requiredTime: Required life time of the uploaded proxy
:type requiredTime: boolean
:return: S_OK,S_ERROR
"""
return gProxyManager.userHasProxy(userDN, userGroup, requiredTime)
#############################################################################
def getSiteMask(self, printOutput=False, status='Active'):
"""Retrieve current site mask from WMS Administrator service.
Example usage:
>>> gLogger.notice(diracAdmin.getSiteMask())
{'OK': True, 'Value': 0L}
:return: S_OK,S_ERROR
"""
result = self.sitestatus.getSites(siteState=status)
if result['OK']:
sites = result['Value']
if printOutput:
sites.sort()
for site in sites:
gLogger.notice(site)
return result
#############################################################################
def getBannedSites(self, printOutput=False):
"""Retrieve current list of banned and probing sites.
Example usage:
>>> gLogger.notice(diracAdmin.getBannedSites())
{'OK': True, 'Value': []}
:return: S_OK,S_ERROR
"""
bannedSites = self.sitestatus.getSites(siteState='Banned')
if not bannedSites['OK']:
return bannedSites
probingSites = self.sitestatus.getSites(siteState='Probing')
if not probingSites['OK']:
return probingSites
mergedList = sorted(bannedSites['Value'] + probingSites['Value'])
if printOutput:
gLogger.notice('\n'.join(mergedList))
return S_OK(mergedList)
#############################################################################
def getSiteSection(self, site, printOutput=False):
"""Simple utility to get the list of CEs for DIRAC site name.
Example usage:
>>> gLogger.notice(diracAdmin.getSiteSection('LCG.CERN.ch'))
{'OK': True, 'Value':}
:return: S_OK,S_ERROR
"""
gridType = site.split('.')[0]
if not gConfig.getSections('/Resources/Sites/%s' % (gridType))['OK']:
return S_ERROR('/Resources/Sites/%s is not a valid site section' % (gridType))
result = gConfig.getOptionsDict('/Resources/Sites/%s/%s' % (gridType, site))
if printOutput and result['OK']:
gLogger.notice(self.pPrint.pformat(result['Value']))
return result
#############################################################################
def allowSite(self, site, comment, printOutput=False):
"""Adds the site to the site mask.
Example usage:
>>> gLogger.notice(diracAdmin.allowSite())
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
result = self._checkSiteIsValid(site)
if not result['OK']:
return result
result = self.getSiteMask(status='Active')
if not result['OK']:
return result
siteMask = result['Value']
if site in siteMask:
if printOutput:
gLogger.notice('Site %s is already Active' % site)
return S_OK('Site %s is already Active' % site)
if self.rssFlag:
result = self.sitestatus.setSiteStatus(site, 'Active', comment)
else:
result = WMSAdministratorClient().allowSite(site, comment)
if not result['OK']:
return result
if printOutput:
gLogger.notice('Site %s status is set to Active' % site)
return result
#############################################################################
def getSiteMaskLogging(self, site=None, printOutput=False):
"""Retrieves site mask logging information.
Example usage:
>>> gLogger.notice(diracAdmin.getSiteMaskLogging('LCG.AUVER.fr'))
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
result = self._checkSiteIsValid(site)
if not result['OK']:
return result
if self.rssFlag:
result = ResourceStatusClient().selectStatusElement('Site', 'History', name=site)
else:
result = WMSAdministratorClient().getSiteMaskLogging(site)
if not result['OK']:
return result
if printOutput:
if site:
gLogger.notice('\nSite Mask Logging Info for %s\n' % site)
else:
gLogger.notice('\nAll Site Mask Logging Info\n')
sitesLogging = result['Value']
if isinstance(sitesLogging, dict):
for siteName, tupleList in sitesLogging.items(): # can be an iterator
if not siteName:
gLogger.notice('\n===> %s\n' % siteName)
for tup in tupleList:
stup = str(tup[0]).ljust(8) + str(tup[1]).ljust(20)
stup += '( ' + str(tup[2]).ljust(len(str(tup[2]))) + ' ) "' + str(tup[3]) + '"'
gLogger.notice(stup)
gLogger.notice(' ')
elif isinstance(sitesLogging, list):
sitesLoggingList = [(sl[1], sl[3], sl[4]) for sl in sitesLogging]
for siteLog in sitesLoggingList:
gLogger.notice(siteLog)
return S_OK()
#############################################################################
def banSite(self, site, comment, printOutput=False):
"""Removes the site from the site mask.
Example usage:
>>> gLogger.notice(diracAdmin.banSite())
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
result = self._checkSiteIsValid(site)
if not result['OK']:
return result
mask = self.getSiteMask(status='Banned')
if not mask['OK']:
return mask
siteMask = mask['Value']
if site in siteMask:
if printOutput:
gLogger.notice('Site %s is already Banned' % site)
return S_OK('Site %s is already Banned' % site)
if self.rssFlag:
result = self.sitestatus.setSiteStatus(site, 'Banned', comment)
else:
result = WMSAdministratorClient().banSite(site, comment)
if not result['OK']:
return result
if printOutput:
gLogger.notice('Site %s status is set to Banned' % site)
return result
#############################################################################
def getServicePorts(self, setup='', printOutput=False):
"""Checks the service ports for the specified setup. If not given this is
taken from the current installation (/DIRAC/Setup)
Example usage:
>>> gLogger.notice(diracAdmin.getServicePorts())
{'OK': True, 'Value':''}
:return: S_OK,S_ERROR
"""
if not setup:
setup = gConfig.getValue('/DIRAC/Setup', '')
setupList = gConfig.getSections('/DIRAC/Setups', [])
if not setupList['OK']:
return S_ERROR('Could not get /DIRAC/Setups sections')
setupList = setupList['Value']
if setup not in setupList:
return S_ERROR('Setup %s is not in allowed list: %s' % (setup, ', '.join(setupList)))
serviceSetups = gConfig.getOptionsDict('/DIRAC/Setups/%s' % setup)
if not serviceSetups['OK']:
return S_ERROR('Could not get /DIRAC/Setups/%s options' % setup)
serviceSetups = serviceSetups['Value'] # dict
systemList = gConfig.getSections('/Systems')
if not systemList['OK']:
return S_ERROR('Could not get Systems sections')
systemList = systemList['Value']
result = {}
for system in systemList:
if system in serviceSetups:
path = '/Systems/%s/%s/Services' % (system, serviceSetups[system])
servicesList = gConfig.getSections(path)
if not servicesList['OK']:
self.log.warn('Could not get sections in %s' % path)
else:
servicesList = servicesList['Value']
if not servicesList:
servicesList = []
self.log.verbose('System: %s ServicesList: %s' % (system, ', '.join(servicesList)))
for service in servicesList:
spath = '%s/%s/Port' % (path, service)
servicePort = gConfig.getValue(spath, 0)
if servicePort:
self.log.verbose('Found port for %s/%s = %s' % (system, service, servicePort))
result['%s/%s' % (system, service)] = servicePort
else:
self.log.warn('No port found for %s' % spath)
else:
self.log.warn('%s is not defined in /DIRAC/Setups/%s' % (system, setup))
if printOutput:
gLogger.notice(self.pPrint.pformat(result))
return S_OK(result)
#############################################################################
def getProxy(self, userDN, userGroup, validity=43200, limited=False):
"""Retrieves a proxy with default 12hr validity and stores
this in a file in the local directory by default.
Example usage:
>>> gLogger.notice(diracAdmin.getProxy())
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
return gProxyManager.downloadProxy(userDN, userGroup, limited=limited,
requiredTimeLeft=validity)
#############################################################################
def getVOMSProxy(self, userDN, userGroup, vomsAttr=False, validity=43200, limited=False):
"""Retrieves a proxy with default 12hr validity and VOMS extensions and stores
this in a file in the local directory by default.
Example usage:
>>> gLogger.notice(diracAdmin.getVOMSProxy())
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
return gProxyManager.downloadVOMSProxy(userDN, userGroup, limited=limited,
requiredVOMSAttribute=vomsAttr,
requiredTimeLeft=validity)
#############################################################################
def getPilotProxy(self, userDN, userGroup, validity=43200):
"""Retrieves a pilot proxy with default 12hr validity and stores
this in a file in the local directory by default.
Example usage:
>>> gLogger.notice(diracAdmin.getVOMSProxy())
{'OK': True, 'Value': }
:return: S_OK,S_ERROR
"""
return gProxyManager.getPilotProxyFromDIRACGroup(userDN, userGroup, requiredTimeLeft=validity)
#############################################################################
def resetJob(self, jobID):
"""Reset a job or list of jobs in the WMS. This operation resets the reschedule
counter for a job or list of jobs and allows them to run as new.
Example::
>>> gLogger.notice(dirac.reset(12345))
{'OK': True, 'Value': [12345]}
:param job: JobID
:type job: integer or list of integers
:return: S_OK,S_ERROR
"""
if isinstance(jobID, six.string_types):
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or convertible integer for existing jobID')
elif isinstance(jobID, list):
try:
jobID = [int(job) for job in jobID]
except Exception as x:
return self._errorReport(str(x), 'Expected integer or convertible integer for existing jobIDs')
result = JobManagerClient(useCertificates=False).resetJob(jobID)
return result
#############################################################################
def getJobPilotOutput(self, jobID, directory=''):
"""Retrieve the pilot output for an existing job in the WMS.
The output will be retrieved in a local directory unless
otherwise specified.
>>> gLogger.notice(dirac.getJobPilotOutput(12345))
{'OK': True, StdOut:'',StdError:''}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
if not directory:
directory = self.currentDir
if not os.path.exists(directory):
return self._errorReport('Directory %s does not exist' % directory)
result = WMSAdministratorClient().getJobPilotOutput(jobID)
if not result['OK']:
return result
outputPath = '%s/pilot_%s' % (directory, jobID)
if os.path.exists(outputPath):
self.log.info('Remove %s and retry to continue' % outputPath)
return S_ERROR('Remove %s and retry to continue' % outputPath)
if not os.path.exists(outputPath):
self.log.verbose('Creating directory %s' % outputPath)
os.mkdir(outputPath)
outputs = result['Value']
if 'StdOut' in outputs:
stdout = '%s/std.out' % (outputPath)
with open(stdout, 'w') as fopen:
fopen.write(outputs['StdOut'])
self.log.verbose('Standard output written to %s' % (stdout))
else:
self.log.warn('No standard output returned')
if 'StdError' in outputs:
stderr = '%s/std.err' % (outputPath)
with open(stderr, 'w') as fopen:
fopen.write(outputs['StdError'])
self.log.verbose('Standard error written to %s' % (stderr))
else:
self.log.warn('No standard error returned')
self.log.always('Outputs retrieved in %s' % outputPath)
return result
#############################################################################
def getPilotOutput(self, gridReference, directory=''):
"""Retrieve the pilot output (std.out and std.err) for an existing job in the WMS.
>>> gLogger.notice(dirac.getJobPilotOutput(12345))
{'OK': True, 'Value': {}}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, six.string_types):
return self._errorReport('Expected string for pilot reference')
if not directory:
directory = self.currentDir
if not os.path.exists(directory):
return self._errorReport('Directory %s does not exist' % directory)
result = PilotManagerClient().getPilotOutput(gridReference)
if not result['OK']:
return result
gridReferenceSmall = gridReference.split('/')[-1]
if not gridReferenceSmall:
gridReferenceSmall = 'reference'
outputPath = '%s/pilot_%s' % (directory, gridReferenceSmall)
if os.path.exists(outputPath):
self.log.info('Remove %s and retry to continue' % outputPath)
return S_ERROR('Remove %s and retry to continue' % outputPath)
if not os.path.exists(outputPath):
self.log.verbose('Creating directory %s' % outputPath)
os.mkdir(outputPath)
outputs = result['Value']
if 'StdOut' in outputs:
stdout = '%s/std.out' % (outputPath)
with open(stdout, 'w') as fopen:
fopen.write(outputs['StdOut'])
self.log.info('Standard output written to %s' % (stdout))
else:
self.log.warn('No standard output returned')
if 'StdErr' in outputs:
stderr = '%s/std.err' % (outputPath)
with open(stderr, 'w') as fopen:
fopen.write(outputs['StdErr'])
self.log.info('Standard error written to %s' % (stderr))
else:
self.log.warn('No standard error returned')
self.log.always('Outputs retrieved in %s' % outputPath)
return result
#############################################################################
def getPilotInfo(self, gridReference):
"""Retrieve info relative to a pilot reference
>>> gLogger.notice(dirac.getPilotInfo(12345))
{'OK': True, 'Value': {}}
:param gridReference: Pilot Job Reference
:type gridReference: string
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, six.string_types):
return self._errorReport('Expected string for pilot reference')
result = PilotManagerClient().getPilotInfo(gridReference)
return result
#############################################################################
def killPilot(self, gridReference):
"""Kill the pilot specified
>>> gLogger.notice(dirac.getPilotInfo(12345))
{'OK': True, 'Value': {}}
:param gridReference: Pilot Job Reference
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, six.string_types):
return self._errorReport('Expected string for pilot reference')
result = PilotManagerClient().killPilot(gridReference)
return result
#############################################################################
def getPilotLoggingInfo(self, gridReference):
"""Retrieve the pilot logging info for an existing job in the WMS.
>>> gLogger.notice(dirac.getPilotLoggingInfo(12345))
{'OK': True, 'Value': {"The output of the command"}}
:param gridReference: Gridp pilot job reference Id
:type gridReference: string
:return: S_OK,S_ERROR
"""
if not isinstance(gridReference, six.string_types):
return self._errorReport('Expected string for pilot reference')
return PilotManagerClient().getPilotLoggingInfo(gridReference)
#############################################################################
def getJobPilots(self, jobID):
"""Extract the list of submitted pilots and their status for a given
jobID from the WMS. Useful information is printed to the screen.
>>> gLogger.notice(dirac.getJobPilots())
{'OK': True, 'Value': {PilotID:{StatusDict}}}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
if isinstance(jobID, six.string_types):
try:
jobID = int(jobID)
except Exception as x:
return self._errorReport(str(x), 'Expected integer or string for existing jobID')
result = PilotManagerClient().getPilots(jobID)
if result['OK']:
gLogger.notice(self.pPrint.pformat(result['Value']))
return result
#############################################################################
def getPilotSummary(self, startDate='', endDate=''):
"""Retrieve the pilot output for an existing job in the WMS. Summary is
printed at INFO level, full dictionary of results also returned.
>>> gLogger.notice(dirac.getPilotSummary())
{'OK': True, 'Value': {CE:{Status:Count}}}
:param job: JobID
:type job: integer or string
:return: S_OK,S_ERROR
"""
result = PilotManagerClient().getPilotSummary(startDate, endDate)
if not result['OK']:
return result
ceDict = result['Value']
headers = 'CE'.ljust(28)
i = 0
for ce, summary in ceDict.items():
states = list(summary)
if len(states) > i:
i = len(states)
for i in range(i):
headers += 'Status'.ljust(12) + 'Count'.ljust(12)
gLogger.notice(headers)
for ce, summary in ceDict.items():
line = ce.ljust(28)
states = sorted(summary)
for state in states:
count = str(summary[state])
line += state.ljust(12) + count.ljust(12)
gLogger.notice(line)
return result
#############################################################################
def setSiteProtocols(self, site, protocolsList, printOutput=False):
"""
Allows to set the defined protocols for each SE for a given site.
"""
result = self._checkSiteIsValid(site)
if not result['OK']:
return result
siteSection = '/Resources/Sites/%s/%s/SE' % (site.split('.')[0], site)
siteSEs = gConfig.getValue(siteSection, [])
if not siteSEs:
return S_ERROR('No SEs found for site %s in section %s' % (site, siteSection))
defaultProtocols = gConfig.getValue('/Resources/StorageElements/DefaultProtocols', [])
self.log.verbose('Default list of protocols are', ', '.join(defaultProtocols))
for protocol in protocolsList:
if protocol not in defaultProtocols:
return S_ERROR('Requested to set protocol %s in list but %s is not '
'in default list of protocols:\n%s' % (protocol, protocol, ', '.join(defaultProtocols)))
modifiedCS = False
result = promptUser('Do you want to add the following default protocols:'
' %s for SE(s):\n%s' % (', '.join(protocolsList), ', '.join(siteSEs)))
if not result['OK']:
return result
if result['Value'].lower() != 'y':
self.log.always('No protocols will be added')
return S_OK()
for se in siteSEs:
sections = gConfig.getSections('/Resources/StorageElements/%s/' % (se))
if not sections['OK']:
return sections
for section in sections['Value']:
if gConfig.getValue('/Resources/StorageElements/%s/%s/ProtocolName' % (se, section), '') == 'SRM2':
path = '/Resources/StorageElements/%s/%s/ProtocolsList' % (se, section)
self.log.verbose('Setting %s to %s' % (path, ', '.join(protocolsList)))
result = self.csSetOption(path, ', '.join(protocolsList))
if not result['OK']:
return result
modifiedCS = True
if modifiedCS:
result = self.csCommitChanges(False)
if not result['OK']:
return S_ERROR('CS Commit failed with message = %s' % (result['Message']))
else:
if printOutput:
gLogger.notice('Successfully committed changes to CS')
else:
if printOutput:
gLogger.notice('No modifications to CS required')
return S_OK()
#############################################################################
def csSetOption(self, optionPath, optionValue):
"""
Function to modify an existing value in the CS.
"""
return self.csAPI.setOption(optionPath, optionValue)
#############################################################################
def csSetOptionComment(self, optionPath, comment):
"""
Function to modify an existing value in the CS.
"""
return self.csAPI.setOptionComment(optionPath, comment)
#############################################################################
def csModifyValue(self, optionPath, newValue):
"""
Function to modify an existing value in the CS.
"""
return self.csAPI.modifyValue(optionPath, newValue)
#############################################################################
def csRegisterUser(self, username, properties):
"""
Registers a user in the CS.
- username: Username of the user (easy;)
- properties: Dict containing:
- DN
- groups : list/tuple of groups the user belongs to
- <others> : More properties of the user, like mail
"""
return self.csAPI.addUser(username, properties)
#############################################################################
def csDeleteUser(self, user):
"""
Deletes a user from the CS. Can take a list of users
"""
return self.csAPI.deleteUsers(user)
#############################################################################
def csModifyUser(self, username, properties, createIfNonExistant=False):
"""
Modify a user in the CS. Takes the same params as in addUser and
applies the changes
"""
return self.csAPI.modifyUser(username, properties, createIfNonExistant)
#############################################################################
def csListUsers(self, group=False):
"""
Lists the users in the CS. If no group is specified return all users.
"""
return self.csAPI.listUsers(group)
#############################################################################
def csDescribeUsers(self, mask=False):
"""
List users and their properties in the CS.
If a mask is given, only users in the mask will be returned
"""
return self.csAPI.describeUsers(mask)
#############################################################################
def csModifyGroup(self, groupname, properties, createIfNonExistant=False):
"""
Modify a user in the CS. Takes the same params as in addGroup and applies
the changes
"""
return self.csAPI.modifyGroup(groupname, properties, createIfNonExistant)
#############################################################################
def csListHosts(self):
"""
Lists the hosts in the CS
"""
return self.csAPI.listHosts()
#############################################################################
def csDescribeHosts(self, mask=False):
"""
Gets extended info for the hosts in the CS
"""
return self.csAPI.describeHosts(mask)
#############################################################################
def csModifyHost(self, hostname, properties, createIfNonExistant=False):
"""
Modify a host in the CS. Takes the same params as in addHost and applies
the changes
"""
return self.csAPI.modifyHost(hostname, properties, createIfNonExistant)
#############################################################################
def csListGroups(self):
"""
Lists groups in the CS
"""
return self.csAPI.listGroups()
#############################################################################
def csDescribeGroups(self, mask=False):
"""
List groups and their properties in the CS.
If a mask is given, only groups in the mask will be returned
"""
return self.csAPI.describeGroups(mask)
#############################################################################
def csSyncUsersWithCFG(self, usersCFG):
"""
Synchronize users in cfg with its contents
"""
return self.csAPI.syncUsersWithCFG(usersCFG)
#############################################################################
def csCommitChanges(self, sortUsers=True):
"""
Commit the changes in the CS
"""
return self.csAPI.commitChanges(sortUsers=False)
#############################################################################
def sendMail(self, address, subject, body, fromAddress=None, localAttempt=True, html=False):
"""
Send mail to specified address with body.
"""
notification = NotificationClient()
return notification.sendMail(address, subject, body, fromAddress, localAttempt, html)
#############################################################################
def sendSMS(self, userName, body, fromAddress=None):
"""
Send mail to specified address with body.
"""
if len(body) > 160:
return S_ERROR('Exceeded maximum SMS length of 160 characters')
notification = NotificationClient()
return notification.sendSMS(userName, body, fromAddress)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| yujikato/DIRAC | src/DIRAC/Interfaces/API/DiracAdmin.py | Python | gpl-3.0 | 29,759 | [
"DIRAC"
] | 7d0ce51b2bfef9fe21fa95db571cd550aa958e06f6196219bc04eb6513416940 |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* Without a direct way to compute N steps forward, the semantics of
jumpahead(n) are weakened to simply jump to another distant state and rely
on the large period to avoid overlapping sequences.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
#from warnings import warn as _warn
#from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
#from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
#from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
#from os import urandom as _urandom
#from binascii import hexlify as _hexlify
#import hashlib as _hashlib
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
# Import _random wrapper from grumpy std lib.
# It is used as a replacement for the CPython random generator.
import _random
# NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
# TWOPI = 2.0*_pi
# LOG4 = _log(4.0)
# SG_MAGICCONST = 1.0 + _log(4.5)
BPF = _random.BPF
RECIP_BPF = _random.RECIP_BPF
class Random(_random.GrumpyRandom):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandbits() method so that randrange() can cover
arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state of the random number generator.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or is an int or long, hash(a) is used instead.
Hash values for some types are nondeterministic when the
PYTHONHASHSEED environment variable is enabled.
"""
super(Random, self).seed(a)
self.gauss_next = None
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is None:
if istart > 0:
if istart >= _maxwidth:
return self._randbelow(istart)
return _int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= _maxwidth:
return _int(istart + self._randbelow(width))
return _int(istart + _int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= _maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*_int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
_int = int
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
# def sample(self, population, k):
# """Chooses k unique random elements from a population sequence.
# Returns a new list containing elements from the population while
# leaving the original population unchanged. The resulting list is
# in selection order so that all sub-slices will also be valid random
# samples. This allows raffle winners (the sample) to be partitioned
# into grand prize and second place winners (the subslices).
# Members of the population need not be hashable or unique. If the
# population contains repeats, then each occurrence is a possible
# selection in the sample.
# To choose a sample in a range of integers, use xrange as an argument.
# This is especially fast and space efficient for sampling from a
# large population: sample(xrange(10000000), 60)
# """
# # Sampling without replacement entails tracking either potential
# # selections (the pool) in a list or previous selections in a set.
# # When the number of selections is small compared to the
# # population, then tracking selections is efficient, requiring
# # only a small set and an occasional reselection. For
# # a larger number of selections, the pool tracking method is
# # preferred since the list takes less space than the
# # set and it doesn't suffer from frequent reselections.
# n = len(population)
# if not 0 <= k <= n:
# raise ValueError("sample larger than population")
# random = self.random
# _int = int
# result = [None] * k
# setsize = 21 # size of a small set minus size of an empty list
# if k > 5:
# setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
# if n <= setsize or hasattr(population, "keys"):
# # An n-length list is smaller than a k-length set, or this is a
# # mapping type so the other algorithm wouldn't work.
# pool = list(population)
# for i in xrange(k): # invariant: non-selected at [0,n-i)
# j = _int(random() * (n-i))
# result[i] = pool[j]
# pool[j] = pool[n-i-1] # move non-selected item into vacancy
# else:
# try:
# selected = set()
# selected_add = selected.add
# for i in xrange(k):
# j = _int(random() * n)
# while j in selected:
# j = _int(random() * n)
# selected_add(j)
# result[i] = population[j]
# except (TypeError, KeyError): # handle (at least) sets
# if isinstance(population, list):
# raise
# return self.sample(tuple(population), k)
# return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
# def triangular(self, low=0.0, high=1.0, mode=None):
# """Triangular distribution.
# Continuous distribution bounded by given lower and upper limits,
# and having a given mode value in-between.
# http://en.wikipedia.org/wiki/Triangular_distribution
# """
# u = self.random()
# try:
# c = 0.5 if mode is None else (mode - low) / (high - low)
# except ZeroDivisionError:
# return low
# if u > c:
# u = 1.0 - u
# c = 1.0 - c
# low, high = high, low
# return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
# def normalvariate(self, mu, sigma):
# """Normal distribution.
# mu is the mean, and sigma is the standard deviation.
# """
# # mu = mean, sigma = standard deviation
# # Uses Kinderman and Monahan method. Reference: Kinderman,
# # A.J. and Monahan, J.F., "Computer generation of random
# # variables using the ratio of uniform deviates", ACM Trans
# # Math Software, 3, (1977), pp257-260.
# random = self.random
# while 1:
# u1 = random()
# u2 = 1.0 - random()
# z = NV_MAGICCONST*(u1-0.5)/u2
# zz = z*z/4.0
# if zz <= -_log(u2):
# break
# return mu + z*sigma
## -------------------- lognormal distribution --------------------
# def lognormvariate(self, mu, sigma):
# """Log normal distribution.
# If you take the natural logarithm of this distribution, you'll get a
# normal distribution with mean mu and standard deviation sigma.
# mu can have any value, and sigma must be greater than zero.
# """
# return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
# def expovariate(self, lambd):
# """Exponential distribution.
# lambd is 1.0 divided by the desired mean. It should be
# nonzero. (The parameter would be called "lambda", but that is
# a reserved word in Python.) Returned values range from 0 to
# positive infinity if lambd is positive, and from negative
# infinity to 0 if lambd is negative.
# """
# # lambd: rate lambd = 1/mean
# # ('lambda' is a Python reserved word)
# # we use 1-random() instead of random() to preclude the
# # possibility of taking the log of zero.
# return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
# def vonmisesvariate(self, mu, kappa):
# """Circular data distribution.
# mu is the mean angle, expressed in radians between 0 and 2*pi, and
# kappa is the concentration parameter, which must be greater than or
# equal to zero. If kappa is equal to zero, this distribution reduces
# to a uniform random angle over the range 0 to 2*pi.
# """
# # mu: mean angle (in radians between 0 and 2*pi)
# # kappa: concentration parameter kappa (>= 0)
# # if kappa = 0 generate uniform random angle
# # Based upon an algorithm published in: Fisher, N.I.,
# # "Statistical Analysis of Circular Data", Cambridge
# # University Press, 1993.
# # Thanks to Magnus Kessler for a correction to the
# # implementation of step 4.
# random = self.random
# if kappa <= 1e-6:
# return TWOPI * random()
# s = 0.5 / kappa
# r = s + _sqrt(1.0 + s * s)
# while 1:
# u1 = random()
# z = _cos(_pi * u1)
# d = z / (r + z)
# u2 = random()
# if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
# break
# q = 1.0 / r
# f = (q + z) / (1.0 + q * z)
# u3 = random()
# if u3 > 0.5:
# theta = (mu + _acos(f)) % TWOPI
# else:
# theta = (mu - _acos(f)) % TWOPI
# return theta
## -------------------- gamma distribution --------------------
# def gammavariate(self, alpha, beta):
# """Gamma distribution. Not the gamma function!
# Conditions on the parameters are alpha > 0 and beta > 0.
# The probability distribution function is:
# x ** (alpha - 1) * math.exp(-x / beta)
# pdf(x) = --------------------------------------
# math.gamma(alpha) * beta ** alpha
# """
# # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# # Warning: a few older sources define the gamma distribution in terms
# # of alpha > -1.0
# if alpha <= 0.0 or beta <= 0.0:
# raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
# random = self.random
# if alpha > 1.0:
# # Uses R.C.H. Cheng, "The generation of Gamma
# # variables with non-integral shape parameters",
# # Applied Statistics, (1977), 26, No. 1, p71-74
# ainv = _sqrt(2.0 * alpha - 1.0)
# bbb = alpha - LOG4
# ccc = alpha + ainv
# while 1:
# u1 = random()
# if not 1e-7 < u1 < .9999999:
# continue
# u2 = 1.0 - random()
# v = _log(u1/(1.0-u1))/ainv
# x = alpha*_exp(v)
# z = u1*u1*u2
# r = bbb+ccc*v-x
# if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
# return x * beta
# elif alpha == 1.0:
# # expovariate(1)
# u = random()
# while u <= 1e-7:
# u = random()
# return -_log(u) * beta
# else: # alpha is between 0 and 1 (exclusive)
# # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
# while 1:
# u = random()
# b = (_e + alpha)/_e
# p = b*u
# if p <= 1.0:
# x = p ** (1.0/alpha)
# else:
# x = -_log((b-p)/alpha)
# u1 = random()
# if p > 1.0:
# if u1 <= x ** (alpha - 1.0):
# break
# elif u1 <= _exp(-x):
# break
# return x * beta
## -------------------- Gauss (faster alternative) --------------------
# def gauss(self, mu, sigma):
# """Gaussian distribution.
# mu is the mean, and sigma is the standard deviation. This is
# slightly faster than the normalvariate() function.
# Not thread-safe without a lock around calls.
# """
# # When x and y are two variables from [0, 1), uniformly
# # distributed, then
# #
# # cos(2*pi*x)*sqrt(-2*log(1-y))
# # sin(2*pi*x)*sqrt(-2*log(1-y))
# #
# # are two *independent* variables with normal distribution
# # (mu = 0, sigma = 1).
# # (Lambert Meertens)
# # (corrected version; bug discovered by Mike Miller, fixed by LM)
# # Multithreading note: When two threads call this function
# # simultaneously, it is possible that they will receive the
# # same return value. The window is very small though. To
# # avoid this, you have to use a lock around all calls. (I
# # didn't want to slow this down in the serial case by using a
# # lock here.)
# random = self.random
# z = self.gauss_next
# self.gauss_next = None
# if z is None:
# x2pi = random() * TWOPI
# g2rad = _sqrt(-2.0 * _log(1.0 - random()))
# z = _cos(x2pi) * g2rad
# self.gauss_next = _sin(x2pi) * g2rad
# return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
# def betavariate(self, alpha, beta):
# """Beta distribution.
# Conditions on the parameters are alpha > 0 and beta > 0.
# Returned values range between 0 and 1.
# """
# # This version due to Janne Sinkkonen, and matches all the std
# # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
# y = self.gammavariate(alpha, 1.)
# if y == 0:
# return 0.0
# else:
# return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
# def paretovariate(self, alpha):
# """Pareto distribution. alpha is the shape parameter."""
# # Jain, pg. 495
# u = 1.0 - self.random()
# return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
# def weibullvariate(self, alpha, beta):
# """Weibull distribution.
# alpha is the scale parameter and beta is the shape parameter.
# """
# # Jain, pg. 499; bug fix courtesy Bill Arms
# u = 1.0 - self.random()
# return alpha * pow(-_log(u), 1.0/beta)
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
getrandbits = _inst.getrandbits
getstate = _inst.getstate
setstate = _inst.setstate
uniform = _inst.uniform
def _notimplemented(*args, **kwargs):
raise NotImplementedError
shuffle = _notimplemented
choices = _notimplemented
sample = _notimplemented
triangular = _notimplemented
normalvariate = _notimplemented
lognormvariate = _notimplemented
expovariate = _notimplemented
vonmisesvariate = _notimplemented
gammavariate = _notimplemented
gauss = _notimplemented
betavariate = _notimplemented
paretovariate = _notimplemented
weibullvariate = _notimplemented
if __name__ == '__main__':
pass
#_test() | google/grumpy | third_party/stdlib/random.py | Python | apache-2.0 | 22,864 | [
"Gaussian"
] | d1f29fd27da70591b1ba3054551d81b271dc4d1986bee32a1fd818e4134b02a7 |
#!/usr/bin/env python
# Copyright (c) 2015, Matt Johansen
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Python script to parse through a Web log file and return the number of requests and unique IPs in a given time period"""
import sys
import fileinput
import re
import time
import datetime
import optparse
from collections import Counter
__version__ = '1.0.1'
__author__ = 'Matt Johansen <@mattjay>'
__license__ = 'BSD'
desc = "Python script to parse through a Web log file and return the number of requests and unique IPs in a given time period. It takes an Apache log file via stdin, the number of hours back you'd like to look -t (Default = 24 hours), and if you want the actual list of unique IPs -i"
parser = optparse.OptionParser(description=desc)
parser.add_option('-t', help='number of hours back you want to look', dest='hours', default=24, type=int, action='store')
parser.add_option('-i', '--ips', help='shows list of unique IPs', dest='bool', default=False, action='store_true')
parser.add_option('-n', help='List the n most common IPs to visit in the given time period', dest='ips', default=0, type=int, action='store')
parser.add_option('-f', '--files', help='list of web log file paths', dest='files', type='string', action='store')
(opts, args) = parser.parse_args()
filenames = [x.strip() for x in opts.files.split(',')]
hoursAgo = datetime.datetime.today() - datetime.timedelta(hours = opts.hours)
apacheHoursAgo = hoursAgo.strftime('%d/%b/%Y:%H:%M:%S')
t2 = time.strptime(apacheHoursAgo.split()[0], '%d/%b/%Y:%H:%M:%S')
d2 = datetime.datetime(*t2[:6])
requests = []
ips = Counter()
for f in filenames:
with open(f) as fi:
for line in fi:
m = map(''.join, re.findall(r'\"(.*?)\"|\[(.*?)\]|(\S+)', line))
if m != None:
t1 = time.strptime(m[3].split()[0], '%d/%b/%Y:%H:%M:%S')
d1 = datetime.datetime(*t1[:6])
if d1 > d2:
requests.append(d1)
ips[m[0]] += 1
try:
from pyfiglet import Figlet
fig = Figlet()
print(fig.renderText("Web Log Stats"))
except ImportError:
print "Install pyfiglet for a (useless) pretty cool header!"
print "Total Unique IP Addresses Since", d2, " : ", len(ips)
print "Total Requests Since", d2, " : ", len(requests)
if opts.bool == True:
print "Unique Ips Since", d2, " : ", ips.keys()
if opts.ips > 0:
print "Top", opts.ips, "Visitor(s) :", ips.most_common(opts.ips)
try:
from ascii_graph import Pyasciigraph
graph = Pyasciigraph()
for line in graph.graph("Most Common Ips", ips.most_common(opts.ips)):
print line
except ImportError:
print "Install ascii_graph for a cool graph!"
| mattjay/WebLogStats | webStats.py | Python | mit | 3,876 | [
"VisIt"
] | b0b30a6febd0a50fe33840f02635abe1ee98ddeb04be51c32bb0d917450c2c17 |
"""Cliches are cliché."""
from proselint.tools import existence_check, memoize
@memoize
def check_cliches_garner(text):
"""Check the text.
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
"""
err = "cliches.garner"
msg = "'{}' is cliché."
cliches = [
"a fate worse than death",
"alas and alack",
"at the end of the day",
"bald-faced lie",
"between a rock and a hard place",
"between Scylla and Charybdis",
"between the devil and the deep blue sea",
"betwixt and between",
"blissful ignorance",
"blow a fuse",
"bulk large",
"but that's another story",
"cast aspersions",
"chase a red herring",
"comparing apples and oranges",
"compleat",
"conspicuous by its absence",
"crystal clear",
"cutting edge",
"decision-making process",
"dubious distinction",
"duly authorized",
"eyes peeled",
"far be it from me",
"fast and loose",
"fills the bill",
"first and foremost",
"for free",
"get with the program",
"gilding the lily",
"have a short fuse",
"he's got his hands full",
"his own worst enemy",
"his work cut out for him",
"hither and yon",
"Hobson's choice",
"horns of a dilemma",
"if you catch my drift",
"in light of",
"in the final analysis",
"in the last analysis",
"innocent bystander",
"it's not what you know, it's who you know",
"last but not least",
"make a mockery of",
"male chauvinism",
"moment of truth",
"more in sorrow than in anger",
"more sinned against than sinning",
"my better half",
"nip in the bud",
"olden days",
"on the same page",
"presidential timber",
"pulled no punches",
"quantum jump",
"quantum leap",
"redound to one's credit",
"redound to the benefit of",
"sea change",
"shirked his duties",
"six of one, half a dozen of the other",
"stretched to the breaking point",
"than you can shake a stick at",
"the cream of the crop",
"the cream rises to the top",
"the straw that broke the camel's back",
"thick as thieves",
"thinking outside the box",
"thought leaders?",
"throw the baby out with the bathwater",
"various and sundry",
"viable alternative",
"wax eloquent",
"wax poetic",
"we've got a situation here",
"whet (?:the|your) appetite",
"wool pulled over our eyes",
"writ large",
]
return existence_check(text, cliches, err, msg, join=True)
@memoize
def check_cliches_write_good(text):
"""Check the text.
source: write-good
source_url: https://github.com/btford/write-good
"""
err = "cliches.write_good"
msg = "'{}' is a cliché."
cliches = [
"a chip off the old block",
"a clean slate",
"a dark and stormy night",
"a far cry",
"a fine kettle of fish",
"a loose cannon",
"a penny saved is a penny earned",
"a tough row to hoe",
"a word to the wise",
"ace in the hole",
"acid test",
"add insult to injury",
"against all odds",
"air your dirty laundry",
"all fun and games",
"all in a day's work",
"all talk, no action",
"all thumbs",
"all your eggs in one basket",
"all's fair in love and war",
"all's well that ends well",
"almighty dollar",
"American as apple pie",
"an axe to grind",
"another day, another dollar",
"armed to the teeth",
"as luck would have it",
"as old as time",
"as the crow flies",
"at loose ends",
"at my wits end",
"avoid like the plague",
"babe in the woods",
"back against the wall",
"back in the saddle",
"back to square one",
"back to the drawing board",
"bad to the bone",
"badge of honor",
"bald faced liar",
"ballpark figure",
"banging your head against a brick wall",
"baptism by fire",
"barking up the wrong tree",
"bat out of hell",
"be all and end all",
"beat a dead horse",
"beat around the bush",
"been there, done that",
"beggars can't be choosers",
"behind the eight ball",
"bend over backwards",
"benefit of the doubt",
"bent out of shape",
"best thing since sliced bread",
"bet your bottom dollar",
"better half",
"better late than never",
"better mousetrap",
"better safe than sorry",
"between a rock and a hard place",
"beyond the pale",
"bide your time",
"big as life",
"big cheese",
"big fish in a small pond",
"big man on campus",
"bigger they are the harder they fall",
"bird in the hand",
"bird's eye view",
"birds and the bees",
"birds of a feather flock together",
"bit the hand that feeds you",
"bite the bullet",
"bite the dust",
"bitten off more than he can chew",
"black as coal",
"black as pitch",
"black as the ace of spades",
"blast from the past",
"bleeding heart",
"blessing in disguise",
"blind ambition",
"blind as a bat",
"blind leading the blind",
"blood is thicker than water",
"blood sweat and tears",
"blow off steam",
"blow your own horn",
"blushing bride",
"boils down to",
"bolt from the blue",
"bone to pick",
"bored stiff",
"bored to tears",
"bottomless pit",
"boys will be boys",
"bright and early",
"brings home the bacon",
"broad across the beam",
"broken record",
"brought back to reality",
"bull by the horns",
"bull in a china shop",
"burn the midnight oil",
"burning question",
"burning the candle at both ends",
"burst your bubble",
"bury the hatchet",
"busy as a bee",
"by hook or by crook",
"call a spade a spade",
"called onto the carpet",
"calm before the storm",
"can of worms",
"can't cut the mustard",
"can't hold a candle to",
"case of mistaken identity",
"cat got your tongue",
"cat's meow",
"caught in the crossfire",
"caught red-handed",
"checkered past",
"chomping at the bit",
"cleanliness is next to godliness",
"clear as a bell",
"clear as mud",
"close to the vest",
"cock and bull story",
"cold shoulder",
"come hell or high water",
"cool as a cucumber",
"cool, calm, and collected",
"cost a king's ransom",
"count your blessings",
"crack of dawn",
"crash course",
"creature comforts",
"cross that bridge when you come to it",
"crushing blow",
"cry like a baby",
"cry me a river",
"cry over spilt milk",
"crystal clear",
"curiosity killed the cat",
"cut and dried",
"cut through the red tape",
"cut to the chase",
"cute as a bugs ear",
"cute as a button",
"cute as a puppy",
"cuts to the quick",
"dark before the dawn",
"day in, day out",
"dead as a doornail",
"devil is in the details",
"dime a dozen",
"divide and conquer",
"dog and pony show",
"dog days",
"dog eat dog",
"dog tired",
"don't burn your bridges",
"don't count your chickens",
"don't look a gift horse in the mouth",
"don't rock the boat",
"don't step on anyone's toes",
"don't take any wooden nickels",
"down and out",
"down at the heels",
"down in the dumps",
"down the hatch",
"down to earth",
"draw the line",
"dressed to kill",
"dressed to the nines",
"drives me up the wall",
"dull as dishwater",
"dyed in the wool",
"eagle eye",
"ear to the ground",
"early bird catches the worm",
"easier said than done",
"easy as pie",
"eat your heart out",
"eat your words",
"eleventh hour",
"even the playing field",
"every dog has its day",
"every fiber of my being",
"everything but the kitchen sink",
"eye for an eye",
"face the music",
"facts of life",
"fair weather friend",
"fall by the wayside",
"fan the flames",
"feast or famine",
"feather your nest",
"feathered friends",
"few and far between",
"fifteen minutes of fame",
"filthy vermin",
"fine kettle of fish",
"fish out of water",
"fishing for a compliment",
"fit as a fiddle",
"fit the bill",
"fit to be tied",
"flash in the pan",
"flat as a pancake",
"flip your lid",
"flog a dead horse",
"fly by night",
"fly the coop",
"follow your heart",
"for all intents and purposes",
"for the birds",
"for what it's worth",
"force of nature",
"force to be reckoned with",
"forgive and forget",
"fox in the henhouse",
"free and easy",
"free as a bird",
"fresh as a daisy",
"full steam ahead",
"fun in the sun",
"garbage in, garbage out",
"gentle as a lamb",
"get a kick out of",
"get a leg up",
"get down and dirty",
"get the lead out",
"get to the bottom of",
"get your feet wet",
"gets my goat",
"gilding the lily",
"give and take",
"go against the grain",
"go at it tooth and nail",
"go for broke",
"go him one better",
"go the extra mile",
"go with the flow",
"goes without saying",
"good as gold",
"good deed for the day",
"good things come to those who wait",
"good time was had by all",
"good times were had by all",
"greased lightning",
"greek to me",
"green thumb",
"green-eyed monster",
"grist for the mill",
"growing like a weed",
"hair of the dog",
"hand to mouth",
"happy as a clam",
"happy as a lark",
"hasn't a clue",
"have a nice day",
"have high hopes",
"have the last laugh",
"haven't got a row to hoe",
"head honcho",
"head over heels",
"hear a pin drop",
"heard it through the grapevine",
"heart's content",
"heavy as lead",
"hem and haw",
"high and dry",
"high and mighty",
"high as a kite",
"hit paydirt",
"hold your head up high",
"hold your horses",
"hold your own",
"hold your tongue",
"honest as the day is long",
"horns of a dilemma",
"horse of a different color",
"hot under the collar",
"hour of need",
"I beg to differ",
"icing on the cake",
"if the shoe fits",
"if the shoe were on the other foot",
"in a jam",
"in a jiffy",
"in a nutshell",
"in a pig's eye",
"in a pinch",
"in a word",
"in hot water",
"in the gutter",
"in the nick of time",
"in the thick of it",
"in your dreams",
"it ain't over till the fat lady sings",
"it goes without saying",
"it takes all kinds",
"it takes one to know one",
"it's a small world",
"it's only a matter of time",
"ivory tower",
"Jack of all trades",
"jockey for position",
"jog your memory",
"joined at the hip",
"judge a book by its cover",
"jump down your throat",
"jump in with both feet",
"jump on the bandwagon",
"jump the gun",
"jump to conclusions",
"just a hop, skip, and a jump",
"just the ticket",
"justice is blind",
"keep a stiff upper lip",
"keep an eye on",
"keep it simple, stupid",
"keep the home fires burning",
"keep up with the Joneses",
"keep your chin up",
"keep your fingers crossed",
"kick the bucket",
"kick up your heels",
"kick your feet up",
"kid in a candy store",
"kill two birds with one stone",
"kiss of death",
"knock it out of the park",
"knock on wood",
"knock your socks off",
"know him from Adam",
"know the ropes",
"know the score",
"knuckle down",
"knuckle sandwich",
"knuckle under",
"labor of love",
"ladder of success",
"land on your feet",
"lap of luxury",
"last but not least",
"last hurrah",
"last-ditch effort",
"law of the jungle",
"law of the land",
"lay down the law",
"leaps and bounds",
"let sleeping dogs lie",
"let the cat out of the bag",
"let the good times roll",
"let your hair down",
"let's talk turkey",
"letter perfect",
"lick your wounds",
"lies like a rug",
"life's a bitch",
"life's a grind",
"light at the end of the tunnel",
"lighter than a feather",
"lighter than air",
"like clockwork",
"like father like son",
"like taking candy from a baby",
"like there's no tomorrow",
"lion's share",
"live and learn",
"live and let live",
"long and short of it",
"long lost love",
"look before you leap",
"look down your nose",
"look what the cat dragged in",
"looking a gift horse in the mouth",
"looks like death warmed over",
"loose cannon",
"lose your head",
"lose your temper",
"loud as a horn",
"lounge lizard",
"loved and lost",
"low man on the totem pole",
"luck of the draw",
"luck of the Irish",
"make hay while the sun shines",
"make money hand over fist",
"make my day",
"make the best of a bad situation",
"make the best of it",
"make your blood boil",
"man of few words",
"man's best friend",
"mark my words",
"meaningful dialogue",
"missed the boat on that one",
"moment in the sun",
"moment of glory",
"moment of truth",
"money to burn",
"more power to you",
"more than one way to skin a cat",
"movers and shakers",
"moving experience",
"naked as a jaybird",
"naked truth",
"neat as a pin",
"needle in a haystack",
"needless to say",
"neither here nor there",
"never look back",
"never say never",
"nip and tuck",
"nip it in the bud",
"no guts, no glory",
"no love lost",
"no pain, no gain",
"no skin off my back",
"no stone unturned",
"no time like the present",
"no use crying over spilled milk",
"nose to the grindstone",
"not a hope in hell",
"not a minute's peace",
"not in my backyard",
"not playing with a full deck",
"not the end of the world",
"not written in stone",
"nothing to sneeze at",
"nothing ventured nothing gained",
"now we're cooking",
"off the top of my head",
"off the wagon",
"off the wall",
"old hat",
"older and wiser",
"older than dirt",
"older than Methuselah",
"on a roll",
"on cloud nine",
"on pins and needles",
"on the bandwagon",
"on the money",
"on the nose",
"on the rocks",
"on the spot",
"on the tip of my tongue",
"on the wagon",
"on thin ice",
"once bitten, twice shy",
"one bad apple doesn't spoil the bushel",
"one born every minute",
"one brick short",
"one foot in the grave",
"one in a million",
"one red cent",
"only game in town",
"open a can of worms",
"open and shut case",
"open the flood gates",
"opportunity doesn't knock twice",
"out of pocket",
"out of sight, out of mind",
"out of the frying pan into the fire",
"out of the woods",
"out on a limb",
"over a barrel",
"over the hump",
"pain and suffering",
"pain in the",
"panic button",
"par for the course",
"part and parcel",
"party pooper",
"pass the buck",
"patience is a virtue",
"pay through the nose",
"penny pincher",
"perfect storm",
"pig in a poke",
"pile it on",
"pillar of the community",
"pin your hopes on",
"pitter patter of little feet",
"plain as day",
"plain as the nose on your face",
"play by the rules",
"play your cards right",
"playing the field",
"playing with fire",
"pleased as punch",
"plenty of fish in the sea",
"point with pride",
"poor as a church mouse",
"pot calling the kettle black",
"pretty as a picture",
"pull a fast one",
"pull your punches",
"pulling your leg",
"pure as the driven snow",
"put it in a nutshell",
"put one over on you",
"put the cart before the horse",
"put the pedal to the metal",
"put your best foot forward",
"put your foot down",
"quick as a bunny",
"quick as a lick",
"quick as a wink",
"quick as lightning",
"quiet as a dormouse",
"rags to riches",
"raining buckets",
"raining cats and dogs",
"rank and file",
"rat race",
"reap what you sow",
"red as a beet",
"red herring",
"reinvent the wheel",
"rich and famous",
"rings a bell",
"ripe old age",
"ripped me off",
"rise and shine",
"road to hell is paved with good intentions",
"rob Peter to pay Paul",
"roll over in the grave",
"rub the wrong way",
"ruled the roost",
"running in circles",
"sad but true",
"sadder but wiser",
"salt of the earth",
"scared stiff",
"scared to death",
"sealed with a kiss",
"second to none",
"see eye to eye",
"seen the light",
"seize the day",
"set the record straight",
"set the world on fire",
"set your teeth on edge",
"sharp as a tack",
"shoot for the moon",
"shoot the breeze",
"shot in the dark",
"shoulder to the wheel",
"sick as a dog",
"sigh of relief",
"signed, sealed, and delivered",
"sink or swim",
"six of one, half a dozen of another",
"skating on thin ice",
"slept like a log",
"slinging mud",
"slippery as an eel",
"slow as molasses",
"smart as a whip",
"smooth as a baby's bottom",
"sneaking suspicion",
"snug as a bug in a rug",
"sow wild oats",
"spare the rod, spoil the child",
"speak of the devil",
"spilled the beans",
"spinning your wheels",
"spitting image of",
"spoke with relish",
"spread like wildfire",
"spring to life",
"squeaky wheel gets the grease",
"stands out like a sore thumb",
"start from scratch",
"stick in the mud",
"still waters run deep",
"stitch in time",
"stop and smell the roses",
"straight as an arrow",
"straw that broke the camel's back",
"strong as an ox",
"stubborn as a mule",
"stuff that dreams are made of",
"stuffed shirt",
"sweating blood",
"sweating bullets",
"take a load off",
"take one for the team",
"take the bait",
"take the bull by the horns",
"take the plunge",
"takes one to know one",
"takes two to tango",
"the more the merrier",
"the real deal",
"the real McCoy",
"the red carpet treatment",
"the same old story",
"there is no accounting for taste",
"thick as a brick",
"thick as thieves",
"thin as a rail",
"think outside of the box",
"third time's the charm",
"this day and age",
"this hurts me worse than it hurts you",
"this point in time",
"three sheets to the wind",
"through thick and thin",
"throw in the towel",
"tie one on",
"tighter than a drum",
"time and time again",
"time is of the essence",
"tip of the iceberg",
"tired but happy",
"to coin a phrase",
"to each his own",
"to make a long story short",
"to the best of my knowledge",
"toe the line",
"tongue in cheek",
"too good to be true",
"too hot to handle",
"too numerous to mention",
"touch with a ten foot pole",
"tough as nails",
"trial and error",
"trials and tribulations",
"tried and true",
"trip down memory lane",
"twist of fate",
"two cents worth",
"two peas in a pod",
"ugly as sin",
"under the counter",
"under the gun",
"under the same roof",
"under the weather",
"until the cows come home",
"unvarnished truth",
"up the creek",
"uphill battle",
"upper crust",
"upset the applecart",
"vain attempt",
"vain effort",
"vanquish the enemy",
"vested interest",
"waiting for the other shoe to drop",
"wakeup call",
"warm welcome",
"watch your p's and q's",
"watch your tongue",
"watching the clock",
"water under the bridge",
"weather the storm",
"weed them out",
"week of Sundays",
"went belly up",
"wet behind the ears",
"what goes around comes around",
"what you see is what you get",
"when it rains, it pours",
"when push comes to shove",
"when the cat's away",
"when the going gets tough, the tough get going",
"white as a sheet",
"whole ball of wax",
"whole hog",
"whole nine yards",
"wild goose chase",
"will wonders never cease?",
"wisdom of the ages",
"wise as an owl",
"wolf at the door",
"words fail me",
"work like a dog",
"world weary",
"worst nightmare",
"worth its weight in gold",
"wrong side of the bed",
"yanking your chain",
"yappy as a dog",
"years young",
"you are what you eat",
"you can run but you can't hide",
"you only live once",
"you're the boss ",
"young and foolish",
"young and vibrant",
]
return existence_check(text, cliches, err, msg, join=True)
@memoize
def check_cliches_gnu_diction(text):
"""Check the text.
source: GNU diction
source_url: https://directory.fsf.org/wiki/Diction
"""
err = "cliches.gnu_diction"
msg = "'{}' is a cliché."
list = [
"a matter of concern",
"all things being equal",
"as a last resort",
"attached hereto",
"by no means",
"conspicuous by its absence",
"easier said than done",
"enclosed herewith",
"if and when",
"in reference to",
"in short supply",
"in the foreseeable future",
"in the long run",
"in the matter of",
"it stands to reason",
"many and diverse",
"on the right track",
"par for the course",
"please feel free to",
"pursuant to your request",
"regarding the matter of",
"slowly but surely",
"this will acknowledge",
"we are pleased to advice",
"we regret to inform you",
"we wish to state",
"you are hereby advised that",
]
return existence_check(text, list, err, msg, join=True, ignore_case=True)
| amperser/proselint | proselint/checks/cliches/misc.py | Python | bsd-3-clause | 25,334 | [
"BLAST",
"CRYSTAL"
] | 6ff7d19e6f185104cfe49411ec4f2b0f593e00a61d807a73b3721033a594f9ee |
#!/usr/bin/env python
#
# Copyright 2005, 2006, 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, modulation_utils
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import random, time, struct, sys, math
# from current dir
from transmit_path_lb import transmit_path
from receive_path_lb import receive_path
import fusb_options
class awgn_channel(gr.hier_block2):
def __init__(self, sample_rate, noise_voltage, frequency_offset, seed=False):
gr.hier_block2.__init__(self, "awgn_channel",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
# Create the Gaussian noise source
if not seed:
self.noise = gr.noise_source_c(gr.GR_GAUSSIAN, noise_voltage)
else:
rseed = int(time.time())
self.noise = gr.noise_source_c(gr.GR_GAUSSIAN, noise_voltage, rseed)
self.adder = gr.add_cc()
# Create the frequency offset
self.offset = gr.sig_source_c(1, gr.GR_SIN_WAVE,
frequency_offset, 1.0, 0.0)
self.mixer = gr.multiply_cc()
# Connect the components
self.connect(self, (self.mixer, 0))
self.connect(self.offset, (self.mixer, 1))
self.connect(self.mixer, (self.adder, 0))
self.connect(self.noise, (self.adder, 1))
self.connect(self.adder, self)
class my_top_block(gr.top_block):
def __init__(self, mod_class, demod_class, rx_callback, options):
gr.top_block.__init__(self)
channelon = True;
SNR = 10.0**(options.snr/10.0)
frequency_offset = options.frequency_offset
power_in_signal = abs(options.tx_amplitude)**2
noise_power = power_in_signal/SNR
noise_voltage = math.sqrt(noise_power)
self.txpath = transmit_path(mod_class, options)
self.throttle = gr.throttle(gr.sizeof_gr_complex, options.sample_rate)
self.rxpath = receive_path(demod_class, rx_callback, options)
if channelon:
self.channel = awgn_channel(options.sample_rate, noise_voltage,
frequency_offset, options.seed)
if options.discontinuous:
z = 20000*[0,]
self.zeros = gr.vector_source_c(z, True)
packet_size = 5*((4+8+4+1500+4) * 8)
self.mux = gr.stream_mux(gr.sizeof_gr_complex, [packet_size-0, int(9e5)])
# Connect components
self.connect(self.txpath, (self.mux,0))
self.connect(self.zeros, (self.mux,1))
self.connect(self.mux, self.channel, self.rxpath)
else:
self.connect(self.txpath, self.channel, self.rxpath)
else:
# Connect components
self.connect(self.txpath, self.throttle, self.rxpath)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
global n_rcvd, n_right
n_rcvd = 0
n_right = 0
def rx_callback(ok, payload):
global n_rcvd, n_right
(pktno,) = struct.unpack('!H', payload[0:2])
n_rcvd += 1
if ok:
n_right += 1
print "ok = %5s pktno = %4d n_rcvd = %4d n_right = %4d" % (
ok, pktno, n_rcvd, n_right)
# print payload[2:len(payload)]
def send_pkt(payload='', eof=False):
return tb.txpath.send_pkt(payload, eof)
mods = modulation_utils.type_1_mods()
demods = modulation_utils.type_1_demods()
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
channel_grp = parser.add_option_group("Channel")
parser.add_option("-m", "--modulation", type="choice", choices=mods.keys(),
default='dbpsk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(mods.keys()),))
parser.add_option("-s", "--size", type="eng_float", default=1500,
help="set packet size [default=%default]")
parser.add_option("-M", "--megabytes", type="eng_float", default=1.0,
help="set megabytes to transmit [default=%default]")
parser.add_option("","--discontinuous", action="store_true", default=False,
help="enable discontinous transmission (bursts of 5 packets)")
channel_grp.add_option("", "--sample-rate", type="eng_float", default=1e5,
help="set speed of channel/simulation rate to RATE [default=%default]")
channel_grp.add_option("", "--snr", type="eng_float", default=30,
help="set the SNR of the channel in dB [default=%default]")
channel_grp.add_option("", "--frequency-offset", type="eng_float", default=0,
help="set frequency offset introduced by channel [default=%default]")
channel_grp.add_option("", "--seed", action="store_true", default=False,
help="use a random seed for AWGN noise [default=%default]")
transmit_path.add_options(parser, expert_grp)
receive_path.add_options(parser, expert_grp)
for mod in mods.values():
mod.add_options(expert_grp)
for demod in demods.values():
demod.add_options(expert_grp)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
sys.exit(1)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: failed to enable realtime scheduling"
# Create an instance of a hierarchical block
tb = my_top_block(mods[options.modulation], demods[options.modulation], rx_callback, options)
tb.start()
# generate and send packets
nbytes = int(1e6 * options.megabytes)
n = 0
pktno = 0
pkt_size = int(options.size)
while n < nbytes:
send_pkt(struct.pack('!H', pktno & 0xffff) + (pkt_size - 2) * chr(pktno & 0xff))
n += pkt_size
pktno += 1
send_pkt(eof=True)
tb.wait()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| trnewman/VT-USRP-daughterboard-drivers_python | gnuradio-examples/python/digital/benchmark_loopback.py | Python | gpl-3.0 | 7,150 | [
"Gaussian"
] | c6024c0e2c819dc7d9e4e860bec75b1b175aa4a2ab17fcbeaa3ec02f9300c2c8 |
import textwrap
def print_intro():
# Level - 1
# TODO print an introduction to the game, with some helpful hints
pprint("intro")
def print_help():
# Level - 2
# TODO print help text
pprint("help")
def print_goodbye():
# Level - 2
global g_score
# TODO print goodbye text, along with the player's current score
pprint("goodbye: ", g_score)
def print_room_description(room_name):
# Level - 5
global g_rooms
# TODO print the room's description and list the items it currently
# contains, which means a loop that iterates over its item list,
# (which you retrieve from the "items" entry in the g_rooms
# dictionary), and then prints out the list. If the list contains
# no items, skip that part.
pprint("room description")
def print_items():
# Level - 5
global g_player_items
# TODO print a list of the item names that the player has, which
# means a loop that iterates over the g_items list, and prints
# out the list. If the player has nothing, print "You've got nothing"
pprint("You've got nothing")
def check_move_command(room_name, command):
# Level - 4
global g_rooms
# TODO see if this is the name of a door in this room.
# If so, call move_to_room() and return True. Otherwise return False.
return False
def check_general_command(room_name, command):
# Level - 2
# TODO see if the command is one that we want to respond to, without
# doing anything. E.g. if they're swearing at us, tell them to keep
# it clean. If we don't have any match, return False.
if command.startswith("hit "):
pprint("There's no violence allowed at Bitney!")
return True
else:
return False
def move_to_room(room_name):
# Level - 5
global g_current_room_name
global g_visited_room_names
global g_score
# TODO set the current_room to be room.
# If this is the first time in this room (check visited_rooms) then
# add the room's score to the player's score, and let them know they
# just earned some additional points. Also print out the room's
# description if it's the first time, otherwise just the room name.
g_current_room_name = room_name
def take_item_command(room_name, command):
# Level - 5
global g_player_items
global g_rooms
# First use the handy-dandy get_item_name utility function to extract
# the item name from the command.
item_name = get_item_name("take", command)
# TODO command will be "take xxx", where xxx is the name of an
# item in the room. If xxx isn't the name of an item in the room, then
# print an error message. Otherwise move the item from
# the room's list of items, and put it into the player's list of items (g_player_items),
# and print a string that says "you now have the xxx". Though
# you only want to do this if the item is takeable!
pprint("I don't know how to take " + item_name)
def examine_item_command(room_name, command):
# Level - 3
global g_player_items
global g_items
# First use the handy-dandy get_item_name utility function to extract
# the item name from the command.
item_name = get_item_name("examine", command)
# TODO if item_name isn't in the player's
# list of items, print an error message. Otherwise print
# the item's description. You can use the player_has_item(item_name)
# utility function to help with this.
pprint("I don't know how to examine %s" % item_name)
def drop_item_command(room_name, command):
# Level - 5
global g_player_items
global g_rooms
# First use the handy-dandy get_item_name utility function to extract
# the item name from the command.
item_name = get_item_name("drop", command)
# TODO command should be "drop xxx", where xxx is the name of an
# item in the player's list of items. If xxx isn't the name of an item
# in the player's list of items (call the handy-dandy player_has_item
# utility to find out), then print a an error message.
# Otherwise move the item from the player's list of items to the room's
# list of items, and print a string that says "you no longer have
# the xxx"
pprint("I don't know how to drop %s" % item_name)
def check_item_command(room_name, command):
# Level - 5
# TODO see if the command is for any of the items in the player's
# list of items. If so, print out an appropriate message, and do
# the action, and return True so that we know the command has been
# handled.
if command == "eat donut":
if not player_has_item("donut"):
pprint("You don't have anything to eat")
else:
# TODO you need to remove the donut from the player's list of
# items, because they've eaten it.
pprint("Yum, that was tasty!")
# We handled the command, so return True
return True
else:
# We didn't handle the command, so return False
return False
def game_complete():
# Level - 3
global g_visited_room_names
global g_score
# TODO decide when to congratulate user and return True. This would
# be the case for when they've visited every room. So you can either
# compare their score against the sum of scores from every room, or
# if the g_visited_room_names list length is == the number of rooms.
return False
# This is a handy utility routine that you give an action (like "take")
# and a command (like "take key"), and it returns just the item (e.g. "key")
def get_item_name(action, command):
if command.startswith(action + " "):
return command[len(action) + 1:]
else:
return None
# This is a handy utility routine that you give an item name (like "key")
# and it returns True if the user has that item, otherwise False.
def player_has_item(item_name):
global g_player_items
# Return true if the user has the item, otherwise false.
return g_player_items.count(item_name) > 0
# This is a handy utility routine that wraps the text to be at most 80
# characters wide.
def pprint(text):
print textwrap.fill(text, 80)
# This is a debugging function that ensures all rooms are reachable, and all
# doors lead to a named room.
def check_room_graph(room_name):
global g_rooms
current_room = room_name
visited_rooms = set()
check_room(visited_rooms, current_room)
# Now verify that we visited every room.
for room in g_rooms.keys():
if not room in visited_rooms:
pprint("We never visited %s" % room)
def check_room(visited_rooms, room):
if not room in visited_rooms:
visited_rooms.add(room)
if not room in g_rooms:
pprint("%s is not a room name in the g_rooms dictionary" % room)
return
doors = g_rooms[room]["doors"]
for door in doors.keys():
next_room = doors[door]
# print "%s from %s goes to %s" % (door, room, next_room)
check_room(visited_rooms, next_room)
# This is a debugging function that ensures all items are located in some
# room, but only one room.
def check_items():
global g_items
global g_rooms
# We start off with a list of all items, and remove ones that we find, so what's
# left will be the actual missing items
missing_items = set(g_game_items.keys())
# We start off with no found items, and we add to this as we find items, so we
# can check for the same item being in two different rooms.
found_items = set()
for room in g_rooms.keys():
if not hasattr(g_rooms[room], "items"):
pprint("Room '%s' doesn't have an 'items' key in its dictionary" % room)
continue
room_items = g_rooms[room]["items"]
for room_item in room_items:
if room_item in found_items:
pprint("%s is in two different rooms" % room_item)
elif not room_item in missing_items:
pprint("Item %s in room %s isn't a valid item name" % (room_item, room))
else:
# print "We found %s in %s" % (room_item, room)
found_items.add(room_item)
missing_items.remove(room_item)
for missing_item in missing_items:
pprint("%s is not in any room" % missing_item)
# This is a list of names of items that player has taken. It starts off
# as empty. When you take an item, it gets added to this list, and when
# you drop an item, it gets removed from this list.
g_player_items = []
# This is the name of the current room that the player is in.
g_current_room_name = None
# This is a list of all of names of all the rooms that the player has visited.
# It starts off with just the current room that they're in.
g_visited_room_names = []
# This is the player's current score. They get points for visiting a room
# (but only the first time!)
g_score = 0
# rooms is a dictionary, where the key is the room name, and the value is a "room"
# Each room is also a dictionary, where the key is one of several possible values
# description -> string that describes the room. This should include all doors.
# items -> list of item names for items found in that room
# value -> points for visiting the room
# doors -> dictionary that maps from a door name ("north", "up", etc) to a room name
#
# You can also have other room-specific attributed, e.g. the computer lab could have
# a "locked": True attribute, and you have to unlock it first before you can go through
# that door. Use your imagination.
g_rooms = {"Computer Lab":
{"description": "The computer lab is filled with glowing screens and old chairs, your back is to a white board. There is a door to the east",
"items": ["Notebook"],
"value": 5,
"doors": {"east": "Hallway"}},
"Hallway":
{"description": "The hallway is filled with colorful murals, lockers line the western wall. There are hallways to the north and east, and a door to the east and west",
"items": ["Key"],
"value": 0,
"doors": {"west": "Computer Lab", "east": "Mr. Wood's Room", "north": "North Hallway", "south": "South Hallway"}},
"North Hallway":
{"description": "the North Hallway contains artwork, there is a door labeled 'Boys Bathroom' to your east. to your north appears to be a more open area. To the south there is the Hallway..",
"items": [],
"value": 0,
"doors": {"south": "Hallway", "north": "Atrium", "east": "Boys Bathroom"}},
"South Hallway":
{"description": "the south hallway also holds more artwork and murals on the walls. There is the Girls Bathroom to the east, a door to the west, and an open area to the south. to the north is the hallway.",
"items": [],
"value": 0,
"doors": {"north": "Hallway", "south": "South Area", "west": "Storage Room", "east": "Girls Bathroom"}},
"Boys Bathroom":
{"description": "The bathroom has a sink, a mirror, a urinal, and a stall. No surprises here. the stall is occupied. The exit is to your west.",
"items": [],
"value": 0,
"doors": {"west": "North Hallway"}},
"Girls Bathroom":
{"description": "A calming pink room with art in progress on the walls. It has 2 stalls, 2 sinks, 2 mirrors and a window. The exit is to your west.",
"items": ["Physics Binder"],
"value": 2,
"doors": {"west": "South Hallway"}},
"Storage Room":
{"description": "The storage room is locked. Head east to return to the South Hallway.",
"items": ["Hitler Doll"],
"value": 0,
"doors": {"east": "South Hallway"}},
"Atrium":
{"description": "A small room with a bench and some artwork. There is a door to the west, east, and north. The North Hallway is to the south...",
"items": ["Crumpled Note"],
"value": 5,
"doors": {"north": "Bistro", "west": "Math Room", "east": "Atrium Deck", "south": "North Hallway"}},
"South Area":
{"description": "An area well lit by the windows. There is a table with some chairs. There is a hallway to your north, a door to your east, south, and west. There is another door on the south wall, but it is locked",
"items": [],
"value": 0,
"doors": {"north": "South Hallway", "south": "Spanish Room", "west": "Mrs. Simpton's Room", "east": "Basketball Court"}},
"Mr. Wood's Room":
{"description": "The messy room of a maddened artist. This room is barely lit and has many tables. The walls are covered in propaganda all in different languages. There are two white bords one has a map of america and the other has a map of Russia. There is a life sized statue of George Washington in the room. There is a door to the north, east, and west",
"items": ["Statue"],
"value": 1,
"doors": {"north": "Art Closet", "west": "Hallway", "east": "Picnic Tables"}},
"Art Closet":
{"description": "A cluttered confusion of art supplies. The odor of paint fills your nose. the exit is to your south.",
"items": [],
"value": 0,
"doors": {"south": "Mr. Wood's Room"}},
"Math Room":
{"description": "There are some tables and... math books? There is a door to the north and a door to the east.",
"items": [],
"value": 0,
"doors": {"north": "Atrium Deck", "east": "Atrium"}},
"Bistro":
{"description": "The place to be at lunch time. The bistro is a small closet of a room with quotes on every inch of the wall. It contains an abundance of tasty lunch-time snacks. there is a door to the east and a door to the south.",
"items": ["Donut"],
"value": 10,
"doors": {"south": "Atrium", "east": "Atrium Deck"}},
"Spanish Room":
{"description": "Mrs. Phillips is single-handedly teaching Spanish to all grades in a small, rectangular room. the exit is to the north.",
"items": [],
"value": 3,
"doors": {"north": "South Area"}},
"Atrium Deck":
{"description": "You end up outside on a deck. To the east you see some teachers talking in their area. To the west is a door",
"items": [],
"value": 5,
"doors": {"east": "Teacher Area", "west": "Atrium"}},
"Picnic Tables":
{"description": "You are outside under a green tent, surrounded by green picknic tables whose tops are pocadotted with paint and peeled away paint. There is the remnence of a freshmans lunch on a table. to the north are some teachers talking in their area. to the west is a door. to the south is a basketball court that has a few cars parked on it.",
"items": [],
"value": 10,
"doors": {"north": "Teacher Area", "south": "Basketball Court", "west": "Mr. Wood's Room", "east": "Fence Post"}},
"Fence Post":
{"description": "You just whacked your head into a fence post. Head west or south to turn around a different direction",
"items": [],
"value": 50,
"doors": {"west": "Picnic Tables", "south": "Greenhouse"}},
"Greenhouse":
{"description": "you just whacked your head into the greenhouse.... that must have hurt.... head west to take a break at the Basketball Court tables",
"items": [],
"value": 40,
"doors": {"north": "Fence Post", "west": "Basketball Court"}},
"Teacher Area":
{"description": "you listen in on the teachers as they are discussing a student with low grades. they shoo you off claiming the conversation is confidential. to the north there is the parking area, to the south there is the Picnic tables. to the west there is a deck of sorts",
"items": [],
"value": 5,
"doors": {"north": "Parking Area", "south": "Picnic Tables", "west": "Atrium Deck"}},
"Mrs. Simpton's Room":
{"description": "A single windowed room with mysterious symbols on the walls. It smells strongly of body oder. the exit is to the east... better hurry! it smells!",
"items": [],
"value": 10,
"doors": {"east": "South Area"}},
"Science Room":
{"description": "A rather large room full of desks, chairs, and science tools. There are doors to the north, east, and south.",
"items": [],
"value": 10,
"doors": {"north": "Science Bathroom", "east": "Secret Hallway", "south": "Parking Area"}},
"Elkin's Car":
{"description": "A brown scion is parked, and Mr. Elkin is there, happily chewing on a sandwich. You look around, and notice a deck to your south, another door off to your north, and the base of some stairs to your west. There is also the smiley guys parking lot to the east. elkin warns you not to go because you may get run over, but you may try anyways.",
"items": [],
"value": 30,
"doors": {"east": "Smiley Guys Parking Lot", "south": "Office Porch", "west": "Base of Stairs", "north": "Humanities Hall"}},
"Smiley Guys Parking Lot":
{"description": "*crunch* *slam* *honk*. you just got hit by a car and are dead. But, seeing as you are new here, we'll give you a second chance at life. enter 'respawn' if you wish to try again",
"items": [],
"value": 100,
"doors": {"respawn": "Elkin's Car"}},
"Secret Hallway":
{"description": "A small unlit hallway with a door at either end, not very exciting. not sure why it's 'Secret'. there is a door to the west and east",
"items": [],
"value": 5,
"doors": {"east": "Humanities Hall", "west": "Science Room"}},
"Science Bathroom":
{"description": "A small bathroom with random scribblings on the wall, a painting made by Mr. Wood hangs above the toilet. the exit is to your east.",
"items": ["Random Number"],
"value": 20,
"doors": {"south": "Science Room"}},
"Humanities Bathroom":
{"description": "A cramped bathroom, the walls are painted a vibrant orange color. the exit is to the south.",
"items": [],
"value": 10,
"doors": {"south": "Humanities Hall"}},
"Humanities Hall":
{"description": "Several long tables form a 'U' shape facing a podium. There is an odd door to the north. there are also doors to the east, south, and west.",
"items": [],
"value": 20,
"doors": {"east": "Kill Room", "west": "Secret Hallway", "south": "Elkin's Car", "north": "Humanities Bathroom"}},
"Kill Room":
{"description": "Completely dark.... the clanking sounds of folded chairs can be hears. the exit is to the south.",
"items": [],
"value": 10,
"doors": {"south": "Humanities Hall"}},
"Parking Area":
{"description": "There are a bunch of parked cars around you. To the north you see a door labeled 'science'. to the east, you see a set of stairs. To the south you see a group of teachers talking.",
"items": [],
"value": 30,
"doors": {"north": "Science Room", "south": "Teacher Area", "east": "Base of Stairs"}},
"Base of Stairs":
{"description": "you find yourself at a base of stairs. you can either go south and go up, or you can west to the parking area, or east over the Mr. Elkin",
"items": [],
"value": 0,
"doors": {"south": "Back Porch", "east": "Elkin's Car", "west": "Parking Area"}},
"Back Porch":
{"description": "you find yourself on a porch to the back of the office building. you can either go north down the stairs in the direction of the parking area, or you can go east to a door that leads inside.",
"items": [],
"value": 10,
"doors": {"north": "Base of Stairs", "east": "Upstairs Area"}},
"Upstairs Area":
{"description": "There is a long table with many chairs around it. There are four doors, but two are labeled off limits. there is a door open to the north and south. there is also a door to the west. you also notice two sets of stairs heading down to a landing... you can go that way by commanding 'down'",
"items": [],
"value": 20,
"doors": {"north": "Russ' Office", "south": "Kitchen", "west": "Back Porch", "down": "Stair Landing"}},
"Parking Lot":
{"description": "*BRAAAAP*, you just got slammed by Dave on his bike. 'respawn' if you want to try and live again",
"items": [],
"value": 90,
"doors": {"respawn": "Office Porch"}},
"Office Porch":
{"description": "This is where the cool kids chill out. to the north you see Mr. elkin chewing on a sandwich. to the west there is a great wooden double door. to the east is the parking lot, which looks dangerous, but you may try to escape there for some food...",
"items": [],
"value": 40,
"doors": {"north": "Elkin's Car", "east": "Parking Lot", "west": "Lobby"}},
"Basketball Court":
{"description": "There is a basketball hoop with cars parked around it. not very good for playing basketball. there are picnic tables to the north, a door off the west, and a greenhouse to the east....",
"items": ["Basketball"],
"value": 10,
"doors": {"north": "Picnic Tables", "west": "South Area", "east": "Greenhouse"}},
"Lobby":
{"description": "The lobby is a place where people go to chill. There are stairs to the west, there is a door to the south and east, and an approachable desk to the north",
"items": [],
"value": 2,
"doors": {"west": "Stair Landing", "north": "Angelina's Desk Area", "south": "Mr. Young's Room", "east": "Office Porch"}},
"Angelina's Desk Area":
{"description": "part of the office lobby where Angelina resides. There is a door to the north and west. A lobby is to your south",
"items": [],
"value": 10,
"doors": {"west": "Teacher's Lounge", "north": "Dave's Office", "south": "Lobby"}},
"Stair Landing":
{"description": "there are two stairs leading up to an upper area. go east to return to the lobby, command 'up' if you wish to go up",
"items": [],
"value": 0,
"doors": {"up": "Upstairs Area", "east": "Lobby"}},
"Mr. Young's Room":
{"description": "Where the Pop Tart king resides. This room is full of light from the windows. there is a cart, a desk, and a projector. To the north and west there are doors",
"items": ["Meme"],
"value": 20,
"doors": {"north": "Lobby", "west": "Office Bathroom"}},
"Dave's Office":
{"description": "A small office with a round table, and a desk with a Mac on it. the exit is to the south",
"items": [],
"value": 20,
"doors": {"south": "Angelina's Desk Area"}},
"Teacher's Lounge":
{"description": "There is a table with chairs surrounding it, and a printer in the corner. There are several bookshelves. There is also a closet that has a sign saying 'KEEP OUT'. the exit is to the east.",
"items": [],
"value": 5,
"doors": {"east": "Angelina's Desk Area"}},
"Russ' Office":
{"description": "The room where Russ resides and handles the daily responsiblies of a principal which is upstairs of the office. the exit is to the south",
"items": [],
"value": 30,
"doors": {"south": "Upstairs Area"}},
"Kitchen":
{"description": "This room contains a refridgerator, stove, sink, and countertops. A couple windows. the exit is to the north.",
"items": [],
"value": 20,
"doors": {"north": "Upstairs Area"}},
"Office Bathroom":
{"description": "A bathroom that smells weird. there is a small window that is slightly ajar... not big enough to fit through. the exit is to the east.",
"items": [],
"value": 0,
"doors": {"east": "Mr. Young's Room"}},
}
# items is a dictionary, where the key is the item name, and the value is an "item"
# Each item is also a dictionary, where the key is one of several possible values
# description -> string that describes the item
# takeable -> boolean for whether the item can be taken or not.
#
# You can also have other item-specific attributed, e.g. a bottle of water could have
# an "empty": False attribute, and this changes to True after you've had a drink.
# Use your imagination.
g_items = {
"Notebook":
{"description":
'''notebook containing all kinds of complex diagrams, equations, assignments
(many with very low grades), etc. in a completely random order. None of the
pages have any students names on them, but Mr. Schneider has obviously written
in the name "Peggy???" in red ink on several of the graded assignments.''',
"takeable": True},
"Key":
{"description":
'''small, nondescript key''',
"takeable": True},
"Crumpled Note":
{"description":
'''loose-leaf sheet of paper that was crumpled up into a ball before it was seemingly discarded. It reads,
"I can't find my stupid physics binder anywhere! Mr. Schneider is going to kill me when I get to class."''',
"takeable": True},
"Physics Binder":
{"description":
'''notebook containing all kinds of complex diagrams, equations, assignments (many with very low grades),
etc. in a completely random order. None of the pages have any students names on them, but Mr. Schneider
has obviously written in the name "Peggy???" in red ink on several of the graded assignments.''',
"takeable": True},
"Donut":
{"description":
'''a chocolate donut with multicolored sprinkles''',
"takeable": True},
"Meme":
{"description":
'''an element of a culture or system of behavior that may be considered to be passed from one individual
to another by nongenetic means, especially imitation.''',
"takeable": True},
"Basketball":
{"description":
'''It's a basketball, 'nuff said''',
"takeable": True},
"Statue":
{"description":
'''A lifesized bronze statue of George Washington.''',
"takeable": False},
"Hitler Doll":
{"description":
'''It's a doll of Germany's favorite dictator''',
"takeable": True},
"Random Number":
{"description":
'''An unknown phone number written on the wall''',
"takeable": False},
}
# ============================================================
# Start of the main game
# ============================================================
# Print out the welcome message
print_intro()
# Start the player in the hallway. Which is why this room isn't worth
# any points, as you get there automatically.
move_to_room("Hallway")
# Keep looping until the game is complete (or the user enters "bye")
while not game_complete():
# Print an empty line
print("")
# Get the user's command
command = raw_input("> ")
# See if the command is one of our special commands.
if command == "bye":
# Print a goodbye message, and then break out of the loop, thus
# ending the game.
print_goodbye()
break
if command == "help":
print_help()
continue
if command == "list":
print_items()
continue
if command == "look":
print_room_description(g_current_room_name)
continue
if command == "check":
check_room_graph(g_current_room_name)
check_items()
continue
if command.startswith("take"):
take_item_command(g_current_room_name, command)
continue
if command.startswith("drop"):
drop_item_command(g_current_room_name, command)
continue
if command.startswith("examine"):
examine_item_command(g_current_room_name, command)
continue
# See if the command is the name of a door
if check_move_command(g_current_room_name, command):
continue
# See if the command is an action on an item the user
# has, in the appropriate room. If so, take that action
# on that item.
if check_item_command(g_current_room_name, command):
continue
# See if the command is something we want to respond to
# with special text.
if check_general_command(g_current_room_name, command):
continue
# No idea what they want to do
print("I don't understand that")
| kkrugler/bitney-adventure | adventure-skeleton.py | Python | apache-2.0 | 29,758 | [
"exciting"
] | 19b65cbd13bc4eeb92f4e8352f797dd12b0581491f80b4f2e25a5e3c17b582d7 |
# axes.py
from pymol.cgo import *
from pymol import cmd
from pymol.vfont import plain
# create the axes object, draw axes with cylinders coloured red, green,
#blue for X, Y and Z
obj = [
CYLINDER, 0., 0., 0., 10., 0., 0., 0.2, 1.0, 1.0, 1.0, 1.0, 0.0, 0.,
CYLINDER, 0., 0., 0., 0., 10., 0., 0.2, 1.0, 1.0, 1.0, 0., 1.0, 0.,
CYLINDER, 0., 0., 0., 0., 0., 10., 0.2, 1.0, 1.0, 1.0, 0., 0.0, 1.0,
]
# add labels to axes object (requires pymol version 0.8 or greater, I
# believe
cyl_text(obj,plain,[-5.,-5.,-1],'Origin',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
cyl_text(obj,plain,[10.,0.,0.],'X',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
cyl_text(obj,plain,[0.,10.,0.],'Y',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
cyl_text(obj,plain,[0.,0.,10.],'Z',0.20,axes=[[3,0,0],[0,3,0],[0,0,3]])
# then we load it into PyMOL
cmd.load_cgo(obj,'axes')
| demharters/git_scripts | pymol_axes2.py | Python | apache-2.0 | 845 | [
"PyMOL"
] | 4334a1701742de7618cf347658a81f6d617882aff701bf10515153345aa2c0dc |
"""
Test for the various mlab source functions.
These tests are higher level than the tests testing directly the
MlabSource subclasses. They are meant to capture errors in the formatting
of the input arguments.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
import unittest
import numpy as np
from mayavi.tools import sources
################################################################################
# `BaseTestSource`
################################################################################
class BaseTestSource(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def all_close(self, a, b):
""" Similar to numpy's allclose, but works also for a=None.
"""
if a is None or b is None:
self.assertIsNone(a)
self.assertIsNone(b)
else:
self.assert_(np.allclose(a, a))
def check_positions(self, source, x, y, z):
""" Check that the position vectors of the source do correspond
to the given input positions
"""
self.assert_(np.allclose(source.mlab_source.x, x))
self.assert_(np.allclose(source.mlab_source.y, y))
self.assert_(np.allclose(source.mlab_source.z, z))
def check_vectors(self, source, u, v, w):
""" Check that the vector data corresponds to the given arrays.
"""
self.all_close(source.mlab_source.u, u)
self.all_close(source.mlab_source.v, v)
self.all_close(source.mlab_source.w, w)
def check_scalars(self, source, s):
""" Check that the scalar data corresponds to the given array.
"""
self.all_close(source.mlab_source.scalars, s)
################################################################################
# `TestScalarScatter`
################################################################################
class TestScalarScatter(BaseTestSource):
def test_input_args(self):
""" Check that scalar_scatter can take different input arguments """
# Check for a single number as position vectors.
ss = sources.scalar_scatter(0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a single number as scalar data, and no position
# vectors.
ss = sources.scalar_scatter(0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, 0)
self.check_vectors(ss, None, None, None)
# Check for a list as position vectors.
ss = sources.scalar_scatter([0, 1], [0, 1], [0, 1], figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a list as scalar data, and no position vectors.
ss = sources.scalar_scatter([0, 1], figure=None)
self.check_scalars(ss, [0, 1])
self.check_vectors(ss, None, None, None)
# Check for a 1D array as position vectors.
a = np.array([0, 1])
ss = sources.scalar_scatter(a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a 1D array as a scalar data, and no position vectors.
ss = sources.scalar_scatter(a, figure=None)
self.check_scalars(ss, a)
self.check_vectors(ss, None, None, None)
# Check for a 2D array as position vectors.
a = np.array([[0, 1], [2, 3]])
ss = sources.scalar_scatter(a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a 2D array as scalar data, and no position vectors.
ss = sources.scalar_scatter(a, figure=None)
self.check_scalars(ss, a)
self.check_vectors(ss, None, None, None)
# Check for a 2D array as scalar data, and no position vectors.
ss = sources.scalar_scatter(a, figure=None)
self.check_scalars(ss, a)
self.check_vectors(ss, None, None, None)
################################################################################
# `TestVectorScatter`
################################################################################
class TestVectorScatter(BaseTestSource):
def test_input_args(self):
""" Check that vector_scatter can take different input arguments """
# Check for a single number as a position vector.
ss = sources.vector_scatter(0, 0, 0, 0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
self.check_vectors(ss, 0, 0, 0)
# Check for no position vectors, and single numbers for vector
# data.
ss = sources.vector_scatter(0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
self.check_vectors(ss, 0, 0, 0)
# Check for a list as a position vector.
ss = sources.vector_scatter([0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1], figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, None)
self.check_vectors(ss, [0, 1], [0, 1], [0, 1])
# Check for a lists as a vector data, and no position vectors
ss = sources.vector_scatter([0, 1], [0, 1], [0, 1], figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, [0, 1], [0, 1], [0, 1])
# Check for a 1D array as a position vector.
a = np.array([0, 1])
ss = sources.vector_scatter(a, a, a, a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 1D array as vector data, and no position vectors.
ss = sources.vector_scatter(a, a, a, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 2D array as a position vector.
a = np.array([[0, 1], [2, 3]])
ss = sources.vector_scatter(a, a, a, a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 2D array as vector data, and no position vectors.
ss = sources.vector_scatter(a, a, a, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 3D array as a position vector.
x, y, z = np.mgrid[0:3, 0:3, 0:3]
ss = sources.vector_scatter(x, y, z, x, y, z, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, None)
self.check_vectors(ss, x, y, z)
# Check for a 3D array as vector data, and no position vectors.
x, y, z = np.mgrid[0:3, 0:3, 0:3]
ss = sources.scalar_scatter(z, figure=None)
self.check_scalars(ss, z)
X, Y, Z = np.indices(z.shape)
self.check_positions(ss, X, Y, Z)
################################################################################
# `TestArray2DSource`
################################################################################
class TestArray2DSource(BaseTestSource):
def test_input_args(self):
""" Check that array2d_source can take different input arguments """
# Check for a single number as data and no position arrays.
ss = sources.array2d_source(0, figure=None)
self.check_scalars(ss, 0)
# Check for a list as data, and no position arrays.
ss = sources.array2d_source([0, 1], figure=None)
self.check_scalars(ss, [0, 1])
# Check for a 1D array as data, and no position arrays.
a = np.array([0, 1])
ss = sources.array2d_source(a, figure=None)
self.check_scalars(ss, a)
# Check for a 2D array as data, and no position arrays.
a = np.array([[0, 1], [2, 3]])
ss = sources.array2d_source(a, figure=None)
self.check_scalars(ss, a)
# Check for 2 lists as positions vectors, and a 2D list as data
x = [0, 1]
y = [0, 1]
s = [[0, 1], [2, 3]]
ss = sources.array2d_source(x, y, s, figure=None)
self.check_scalars(ss, s)
# Check for an ogrid as position vectors, and a function for the
# scalars
x, y = np.ogrid[-3:3, -3:3]
f = lambda x, y: x**2 + y**2
ss = sources.array2d_source(x, y, f, figure=None)
self.check_scalars(ss, f(x, y))
# Check for an mgrid as position vectors, and a 2D array for the
# scalars
x, y = np.mgrid[-3:3, -3:3]
s = np.zeros_like(x)
ss = sources.array2d_source(x, y, x, figure=None)
self.check_scalars(ss, s)
################################################################################
# `TestScalarField`
################################################################################
class TestScalarField(BaseTestSource):
def test_input_args(self):
""" Check that scalar_field can take different input arguments """
# Check for 2D arrays as positions vectors, and a function for
# the data
f = lambda x, y, z: x**2 + y**2
x, y = np.mgrid[-3:3, -3:3]
z = np.zeros_like(x)
ss = sources.scalar_field(x, y, z, f, figure=None)
self.check_positions(ss, x, y, z)
s = f(x, y, z)
self.check_scalars(ss, s)
# Check for a 2D array as data, and no position vectors
s = np.random.random((10, 10))
ss = sources.scalar_field(s, figure=None)
self.check_scalars(ss, s)
# Check for a 3D array as data, and no position vectors
s = np.random.random((10, 10, 10))
ss = sources.scalar_field(s, figure=None)
self.check_scalars(ss, s)
# Check for a 3D array as data, and 3D arrays as position
x, y, z = np.mgrid[-3:3, -3:3, -3:3]
ss = sources.scalar_field(x, y, z, z, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, z)
################################################################################
# `TestVectorField`
################################################################################
class TestVectorField(BaseTestSource):
def test_input_args(self):
""" Check that vector_field can take different input arguments """
# Check for 2D arrays as positions vectors, and a function for
# the data
x, y = np.mgrid[-3:3, -3:3]
z = np.zeros_like(x)
def f(x, y, z):
return y, z, x
ss = sources.vector_field(x, y, z, f, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, y, z, x)
# Check for a 2D array as data, and no position vectors
u = np.random.random((10, 10))
v = np.random.random((10, 10))
w = np.random.random((10, 10))
ss = sources.vector_field(u, v, w, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, u, v, w)
# Check for a 3D array as data, and no position vectors
u = np.random.random((10, 10, 10))
v = np.random.random((10, 10, 10))
w = np.random.random((10, 10, 10))
ss = sources.vector_field(u, v, w, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, u, v, w)
# Check for a 3D array as data, and 3D arrays as position
x, y, z = np.mgrid[-3:3, -3:3, -3:3]
ss = sources.vector_field(x, y, z, y, z, x, figure=None)
self.check_scalars(ss, None)
self.check_positions(ss, x, y, z)
self.check_vectors(ss, y, z, x)
################################################################################
# `TestLineSource`
################################################################################
class TestLineSource(BaseTestSource):
def test_input_args(self):
""" Check that vector_field can take different input arguments """
# Check for numbers as position vectors
ss = sources.line_source(0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
# Check for lists as position vectors and as data
ss = sources.line_source([0, 1], [0, 1], [0, 1], [2, 3], figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, [2, 3])
# Check for arrays as position vectors and a function as data
x, y, z = np.random.random((3, 10))
f = lambda x, y, z: x + y + z
ss = sources.line_source(x, y, z, f, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, f(x, y, z))
################################################################################
# `TestVerticalVectorsSource`
################################################################################
class TestVerticalVectorsSource(BaseTestSource):
def test_input_args(self):
""" Check that vector_field can take different input arguments """
# Check for numbers as position vectors
ss = sources.vertical_vectors_source(0, 0, 1, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, 1)
self.check_vectors(ss, 0, 0, 1)
ss = sources.vertical_vectors_source(0, 0, 1, 1, figure=None)
self.check_positions(ss, 0, 0, 1)
self.check_scalars(ss, 1)
self.check_vectors(ss, 0, 0, 1)
# Check for lists as position vectors and as data
ss = sources.vertical_vectors_source([0, 1], [0, 1], [0, 1], [2, 3],
figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, [2, 3])
self.check_vectors(ss, [0, 0], [0, 0], [2, 3])
# Check for arrays as position vectors and a function as data
x, y, z = np.random.random((3, 10))
zeros = np.zeros_like(x)
f = lambda x, y, z: x + y + z
ss = sources.vertical_vectors_source(x, y, z, f, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, f(x, y, z))
self.check_vectors(ss, zeros, zeros, z)
ss = sources.vertical_vectors_source(x, y, z, figure=None)
self.check_positions(ss, x, y, zeros)
self.check_scalars(ss, z)
self.check_vectors(ss, zeros, zeros, z)
################################################################################
# `TestSourceInfinite`
################################################################################
class TestVerticalVectorsSource(unittest.TestCase):
def test_infinite(self):
""" Check that passing in arrays with infinite values raises
errors """
# Some arrays
x = np.random.random((10, 3, 4))
y = np.random.random((10, 3, 4))
z = np.random.random((10, 3, 4))
u = np.random.random((10, 3, 4))
v = np.random.random((10, 3, 4))
w = np.random.random((10, 3, 4))
s = np.random.random((10, 3, 4))
# Add a few infinite values:
u[2, 2, 1] = np.inf
s[0, 0, 0] = -np.inf
# Check value errors are raised because of the infinite values
self.assertRaises(ValueError,
sources.grid_source, x[0], y[0], z[0], scalars=s[0],
figure=None)
self.assertRaises(ValueError,
sources.vertical_vectors_source, x, y, z, s,
figure=None)
self.assertRaises(ValueError,
sources.array2d_source, x[0], y[0], s[0],
figure=None)
self.assertRaises(ValueError,
sources.scalar_field, x, y, z, s,
figure=None)
self.assertRaises(ValueError,
sources.scalar_scatter, x, y, z, s,
figure=None)
self.assertRaises(ValueError,
sources.vector_scatter, x, y, z, u, v, w,
figure=None)
self.assertRaises(ValueError,
sources.vector_field, x, y, z, u, v, w,
figure=None)
self.assertRaises(ValueError,
sources.line_source, x[0, 0], y[0, 0], z[0, 0], s[0, 0],
figure=None)
if __name__ == '__main__':
unittest.main()
| liulion/mayavi | mayavi/tests/test_mlab_source_integration.py | Python | bsd-3-clause | 16,642 | [
"Mayavi"
] | 3dc1d26ae3b94c15f4c0c5d702d38e6290c037a00c93d847bb29de770bf59379 |
#
# @file TestReadSBML.py
# @brief Read SBML unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestReadSBML.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
import re
USE_LIBXML = 0
USE_EXPAT = 0
USE_XERCES = 0
def setXMLParser():
make_config = "../../../config/makefile-common-vars.mk"
global USE_LIBXML
global USE_EXPAT
global USE_XERCES
re_expat = re.compile('^ USE_EXPAT \s* = \s* 1', re.X)
re_libxml = re.compile('^ USE_LIBXML \s* = \s* 1', re.X)
re_xerces = re.compile('^ USE_XERCES \s* = \s* 1', re.X)
f = open(make_config)
for line in f:
if re_expat.match(line) : USE_EXPAT = 1
if re_libxml.match(line) : USE_LIBXML = 1
if re_xerces.match(line) : USE_XERCES = 1
def wrapString(s):
return s
pass
def SBML_FOOTER():
return "</model> </sbml>"
pass
def SBML_HEADER_L1v1():
return "<sbml xmlns='http://www.sbml.org/sbml/level1' level='1' version='1'> <model name='m'>\n"
pass
def SBML_HEADER_L1v2():
return "<sbml xmlns='http://www.sbml.org/sbml/level1' level='1' version='2'> <model name='m'>\n"
pass
def SBML_HEADER_L2v1():
return "<sbml xmlns='http://www.sbml.org/sbml/level2' level='2' version='1'> <model name='m'>\n"
pass
def SBML_HEADER_L2v2():
return "<sbml xmlns='http://www.sbml.org/sbml/level2/version2' level='2' version='2'> <model name='m'>\n"
pass
def SBML_HEADER_L2v3():
return "<sbml xmlns='http://www.sbml.org/sbml/level2/version3' level='2' version='3'> <model name='m'>\n"
pass
def XML_HEADER():
return "<?xml version='1.0' encoding='UTF-8'?>\n"
pass
def wrapSBML_L1v1(s):
r = XML_HEADER()
r += SBML_HEADER_L1v1()
r += s
r += SBML_FOOTER()
return r
pass
def wrapSBML_L1v2(s):
r = XML_HEADER()
r += SBML_HEADER_L1v2()
r += s
r += SBML_FOOTER()
return r
pass
def wrapSBML_L2v1(s):
r = XML_HEADER()
r += SBML_HEADER_L2v1()
r += s
r += SBML_FOOTER()
return r
pass
def wrapSBML_L2v2(s):
r = XML_HEADER()
r += SBML_HEADER_L2v2()
r += s
r += SBML_FOOTER()
return r
pass
def wrapSBML_L2v3(s):
r = XML_HEADER()
r += SBML_HEADER_L2v3()
r += s
r += SBML_FOOTER()
return r
pass
def wrapXML(s):
r = XML_HEADER()
r += s
return r
pass
class TestReadSBML(unittest.TestCase):
global M
M = None
global D
D = None
def setUp(self):
self.D = None
pass
def tearDown(self):
_dummyList = [ self.D ]; _dummyList[:] = []; del _dummyList
pass
def test_ReadSBML_AlgebraicRule(self):
s = wrapSBML_L1v2("<listOfRules>" +
" <algebraicRule formula='x + 1'/>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
ar = self.M.getRule(0)
self.assert_(( "x + 1" == ar.getFormula() ))
pass
def test_ReadSBML_AlgebraicRule_L2(self):
s = wrapSBML_L2v1("<listOfRules>" +
" <algebraicRule>" +
" <math>" +
" <apply>" +
" <minus/>" +
" <apply>" +
" <plus/>" +
" <ci> S1 </ci>" +
" <ci> S2 </ci>" +
" </apply>" +
" <ci> T </ci>" +
" </apply>" +
" </math>" +
" </algebraicRule>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
ar = self.M.getRule(0)
self.assert_( ar != None )
self.assertEqual( True, ar.isSetMath() )
math = ar.getMath()
formula = ar.getFormula()
self.assert_( formula != None )
self.assert_(( "S1 + S2 - T" == formula ))
pass
def test_ReadSBML_AssignmentRule(self):
s = wrapSBML_L2v1("<listOfRules>" +
" <assignmentRule variable='k'>" +
" <math>" +
" <apply>" +
" <divide/>" +
" <ci> k3 </ci>" +
" <ci> k2 </ci>" +
" </apply>" +
" </math>" +
" </assignmentRule>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
ar = self.M.getRule(0)
self.assert_( ar != None )
self.assertEqual( True, ar.isSetMath() )
math = ar.getMath()
formula = ar.getFormula()
self.assert_( formula != None )
self.assert_(( "k3 / k2" == formula ))
pass
def test_ReadSBML_Compartment(self):
s = wrapSBML_L1v2("<listOfCompartments>" +
" <compartment name='mitochondria' volume='.0001' units='milliliters'" +
" outside='cell'/>" +
"</listOfCompartments>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumCompartments() == 1 )
c = self.M.getCompartment(0)
self.assert_(( "mitochondria" == c.getId() ))
self.assert_(( "milliliters" == c.getUnits() ))
self.assert_(( "cell" == c.getOutside() ))
self.assert_( c.getVolume() == .0001 )
self.assertEqual( True, c.isSetVolume() )
self.assertEqual( True, c.isSetSize() )
pass
def test_ReadSBML_CompartmentVolumeRule(self):
s = wrapSBML_L1v2("<listOfRules>" +
" <compartmentVolumeRule compartment='A' formula='0.10 * t'/>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
cvr = self.M.getRule(0)
self.assertEqual( True, cvr.isCompartmentVolume() )
self.assert_(( "A" == cvr.getVariable() ))
self.assert_(( "0.10 * t" == cvr.getFormula() ))
self.assert_( cvr.getType() == libsbml.RULE_TYPE_SCALAR )
pass
def test_ReadSBML_Compartment_L2(self):
s = wrapSBML_L2v1("<listOfCompartments>" +
" <compartment id='membrane' size='.3' spatialDimensions='2'" +
" units='area' outside='tissue' constant='false'/>" +
"</listOfCompartments>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumCompartments() == 1 )
c = self.M.getCompartment(0)
self.assertEqual( True, c.isSetId() )
self.assertEqual( False, c.isSetName() )
self.assertEqual( True, c.isSetVolume() )
self.assertEqual( True, c.isSetSize() )
self.assertEqual( True, c.isSetUnits() )
self.assertEqual( True, c.isSetOutside() )
self.assert_(( "membrane" == c.getId() ))
self.assert_(( "area" == c.getUnits() ))
self.assert_(( "tissue" == c.getOutside() ))
self.assert_( c.getSpatialDimensions() == 2 )
self.assert_( c.getSize() == .3 )
pass
def test_ReadSBML_Compartment_defaults(self):
s = wrapSBML_L1v2("<listOfCompartments> <compartment name='cell'/> </listOfCompartments>"
)
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumCompartments() == 1 )
c = self.M.getCompartment(0)
self.assertEqual( True, c.isSetId() )
self.assertEqual( True, c.isSetVolume() )
self.assertEqual( False, c.isSetSize() )
self.assertEqual( False, c.isSetUnits() )
self.assertEqual( False, c.isSetOutside() )
self.assert_(( "cell" == c.getId() ))
self.assert_( c.getVolume() == 1.0 )
pass
def test_ReadSBML_Compartment_defaults_L2(self):
s = wrapSBML_L2v1("<listOfCompartments> <compartment id='cell'/> </listOfCompartments>"
)
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumCompartments() == 1 )
c = self.M.getCompartment(0)
self.assertEqual( True, c.isSetId() )
self.assertEqual( False, c.isSetName() )
self.assertEqual( False, c.isSetSize() )
self.assertEqual( False, c.isSetUnits() )
self.assertEqual( False, c.isSetOutside() )
self.assert_(( "cell" == c.getId() ))
self.assert_( c.getSpatialDimensions() == 3 )
self.assert_( c.getConstant() == True )
pass
def test_ReadSBML_Event(self):
s = wrapSBML_L2v2("<listOfEvents>" +
" <event id='e1' name='MyEvent' timeUnits='time'/>" +
"</listOfEvents>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumEvents() == 1 )
e = self.M.getEvent(0)
self.assert_( e != None )
self.assertEqual( True, e.isSetId() )
self.assertEqual( True, e.isSetName() )
self.assertEqual( True, e.isSetTimeUnits() )
self.assertEqual( False, e.isSetTrigger() )
self.assertEqual( False, e.isSetDelay() )
self.assert_(( "e1" == e.getId() ))
self.assert_(( "MyEvent" == e.getName() ))
self.assert_(( "time" == e.getTimeUnits() ))
pass
def test_ReadSBML_EventAssignment(self):
s = wrapSBML_L2v1("<listOfEvents>" +
" <event>" +
" <listOfEventAssignments>" +
" <eventAssignment variable='k2'>" +
" <math> <cn> 0 </cn> </math>" +
" </eventAssignment>" +
" </listOfEventAssignments>" +
" </event>" +
"</listOfEvents>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumEvents() == 1 )
e = self.M.getEvent(0)
self.assert_( e != None )
self.assert_( e.getNumEventAssignments() == 1 )
ea = e.getEventAssignment(0)
self.assert_( ea != None )
self.assertEqual( True, ea.isSetVariable() )
self.assert_(( "k2" == ea.getVariable() ))
self.assertEqual( True, ea.isSetMath() )
math = ea.getMath()
formula = libsbml.formulaToString(math)
self.assert_( formula != None )
self.assert_(( "0" == formula ))
pass
def test_ReadSBML_Event_delay(self):
s = wrapSBML_L2v1("<listOfEvents>" +
" <event> <delay> <math> <cn> 5 </cn> </math> </delay> </event>" +
"</listOfEvents>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumEvents() == 1 )
e = self.M.getEvent(0)
self.assert_( e != None )
self.assertEqual( True, e.isSetDelay() )
self.assertEqual( False, e.isSetTrigger() )
delay = e.getDelay()
formula = libsbml.formulaToString(delay.getMath())
self.assert_( formula != None )
self.assert_(( "5" == formula ))
pass
def test_ReadSBML_Event_trigger(self):
s = wrapSBML_L2v1("<listOfEvents>" +
" <event>" +
" <trigger>" +
" <math>" +
" <apply>" +
" <leq/>" +
" <ci> P1 </ci>" +
" <ci> t </ci>" +
" </apply>" +
" </math>" +
" </trigger>" +
" </event>" +
"</listOfEvents>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumEvents() == 1 )
e = self.M.getEvent(0)
self.assert_( e != None )
self.assertEqual( False, e.isSetDelay() )
self.assertEqual( True, e.isSetTrigger() )
trigger = e.getTrigger()
formula = libsbml.formulaToString(trigger.getMath())
self.assert_( formula != None )
self.assert_(( "leq(P1, t)" == formula ))
pass
def test_ReadSBML_FunctionDefinition(self):
s = wrapSBML_L2v1("<listOfFunctionDefinitions>" +
" <functionDefinition id='pow3' name='cubed'>" +
" <math>" +
" <lambda>" +
" <bvar><ci> x </ci></bvar>" +
" <apply>" +
" <power/>" +
" <ci> x </ci>" +
" <cn> 3 </cn>" +
" </apply>" +
" </lambda>" +
" </math>" +
" </functionDefinition>" +
"</listOfFunctionDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumFunctionDefinitions() == 1 )
fd = self.M.getFunctionDefinition(0)
self.assert_( fd != None )
self.assertEqual( True, fd.isSetId() )
self.assertEqual( True, fd.isSetName() )
self.assert_(( "pow3" == fd.getId() ))
self.assert_(( "cubed" == fd.getName() ))
self.assertEqual( True, fd.isSetMath() )
math = fd.getMath()
formula = libsbml.formulaToString(math)
self.assert_( formula != None )
self.assert_(( "lambda(x, pow(x, 3))" == formula ))
pass
def test_ReadSBML_KineticLaw(self):
s = wrapSBML_L1v2("<listOfReactions>" +
" <reaction name='J1'>" +
" <kineticLaw formula='k1*X0'/>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
kl = r.getKineticLaw()
self.assert_(( "k1*X0" == kl.getFormula() ))
pass
def test_ReadSBML_KineticLaw_L2(self):
s = wrapSBML_L2v1("<listOfReactions>" +
" <reaction id='J1'>" +
" <kineticLaw>" +
" <math>" +
" <apply>" +
" <times/>" +
" <ci> k </ci>" +
" <ci> S2 </ci>" +
" <ci> X0 </ci>" +
" </apply>" +
" </math>" +
" </kineticLaw>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_( r != None )
kl = r.getKineticLaw()
self.assert_( kl != None )
self.assertEqual( True, kl.isSetMath() )
math = kl.getMath()
formula = kl.getFormula()
self.assert_( formula != None )
self.assert_(( "k * S2 * X0" == formula ))
pass
def test_ReadSBML_KineticLaw_Parameter(self):
s = wrapSBML_L1v2("<listOfReactions>" +
" <reaction name='J1'>" +
" <kineticLaw formula='k1*X0'>" +
" <listOfParameters>" +
" <parameter name='k1' value='0'/>" +
" </listOfParameters>" +
" </kineticLaw>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
kl = r.getKineticLaw()
self.assert_(( "k1*X0" == kl.getFormula() ))
self.assert_( kl.getNumParameters() == 1 )
p = kl.getParameter(0)
self.assert_(( "k1" == p.getId() ))
self.assert_( p.getValue() == 0 )
pass
def test_ReadSBML_Model(self):
s = wrapXML("<sbml level='1' version='1'>" +
" <model name='testModel'></model>" +
"</sbml>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_(( "testModel" == self.M.getId() ))
pass
def test_ReadSBML_Model_L2(self):
s = wrapXML("<sbml level='2' version='1'>" +
" <model id='testModel'> </model>" +
"</sbml>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assertEqual( True, self.M.isSetId() )
self.assertEqual( False, self.M.isSetName() )
self.assert_(( "testModel" == self.M.getId() ))
pass
def test_ReadSBML_Parameter(self):
s = wrapSBML_L1v2("<listOfParameters>" +
" <parameter name='Km1' value='2.3' units='second'/>" +
"</listOfParameters>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumParameters() == 1 )
p = self.M.getParameter(0)
self.assert_(( "Km1" == p.getId() ))
self.assert_(( "second" == p.getUnits() ))
self.assert_( p.getValue() == 2.3 )
self.assert_( p.isSetValue() == True )
pass
def test_ReadSBML_ParameterRule(self):
s = wrapSBML_L1v2("<listOfRules>" +
" <parameterRule name='k' formula='k3/k2'/>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
pr = self.M.getRule(0)
self.assertEqual( True, pr.isParameter() )
self.assert_(( "k" == pr.getVariable() ))
self.assert_(( "k3/k2" == pr.getFormula() ))
self.assert_( pr.getType() == libsbml.RULE_TYPE_SCALAR )
pass
def test_ReadSBML_Parameter_L2(self):
s = wrapSBML_L2v1("<listOfParameters>" +
" <parameter id='T' value='4.6' units='Celsius' constant='false'/>" +
"</listOfParameters>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumParameters() == 1 )
p = self.M.getParameter(0)
self.assertEqual( True, p.isSetId() )
self.assertEqual( False, p.isSetName() )
self.assertEqual( True, p.isSetValue() )
self.assertEqual( True, p.isSetUnits() )
self.assert_(( "T" == p.getId() ))
self.assert_(( "Celsius" == p.getUnits() ))
self.assert_( p.getValue() == 4.6 )
self.assert_( p.getConstant() == False )
pass
def test_ReadSBML_Parameter_L2_defaults(self):
s = wrapSBML_L2v1("<listOfParameters> <parameter id='x'/> </listOfParameters>"
)
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumParameters() == 1 )
p = self.M.getParameter(0)
self.assertEqual( True, p.isSetId() )
self.assertEqual( False, p.isSetName() )
self.assertEqual( False, p.isSetValue() )
self.assertEqual( False, p.isSetUnits() )
self.assert_(( "x" == p.getId() ))
self.assert_( p.getConstant() == True )
pass
def test_ReadSBML_RateRule(self):
s = wrapSBML_L2v1("<listOfRules>" +
" <rateRule variable='x'>" +
" <math>" +
" <apply>" +
" <times/>" +
" <apply>" +
" <minus/>" +
" <cn> 1 </cn>" +
" <ci> x </ci>" +
" </apply>" +
" <apply>" +
" <ln/>" +
" <ci> x </ci>" +
" </apply>" +
" </apply>" +
" </math>" +
" </rateRule>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
rr = self.M.getRule(0)
self.assert_( rr != None )
self.assertEqual( True, rr.isSetMath() )
math = rr.getMath()
formula = rr.getFormula()
self.assert_( formula != None )
self.assert_(( "(1 - x) * log(x)" == formula ))
pass
def test_ReadSBML_Reaction(self):
s = wrapSBML_L1v2("<listOfReactions>" +
" <reaction name='reaction_1' reversible='false'/>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_(( "reaction_1" == r.getId() ))
self.assert_( r.getReversible() == False )
self.assert_( r.getFast() == False )
pass
def test_ReadSBML_Reaction_L2(self):
s = wrapSBML_L2v1("<listOfReactions>" +
" <reaction id='r1' reversible='false' fast='false'/>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assertEqual( True, r.isSetId() )
self.assertEqual( False, r.isSetName() )
self.assertEqual( True, r.isSetFast() )
self.assert_(( "r1" == r.getId() ))
self.assert_( r.getReversible() == False )
self.assert_( r.getFast() == False )
pass
def test_ReadSBML_Reaction_L2_defaults(self):
s = wrapSBML_L2v1("<listOfReactions> <reaction id='r1'/> </listOfReactions>"
)
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assertEqual( True, r.isSetId() )
self.assertEqual( False, r.isSetName() )
self.assertEqual( False, r.isSetFast() )
self.assert_(( "r1" == r.getId() ))
self.assert_( r.getReversible() == True )
pass
def test_ReadSBML_Reaction_defaults(self):
s = wrapSBML_L1v2("<listOfReactions>" +
" <reaction name='reaction_1'/>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_(( "reaction_1" == r.getId() ))
self.assert_( r.getReversible() != False )
self.assert_( r.getFast() == False )
pass
def test_ReadSBML_SBML(self):
s = wrapXML("<sbml level='1' version='1'> </sbml>")
self.D = libsbml.readSBMLFromString(s)
self.assert_( self.D.getLevel() == 1 )
self.assert_( self.D.getVersion() == 1 )
pass
def test_ReadSBML_Specie(self):
s = wrapSBML_L1v1("<listOfSpecie>" +
" <specie name='Glucose' compartment='cell' initialAmount='4.1'" +
" units='volume' boundaryCondition='false' charge='6'/>" +
"</listOfSpecie>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumSpecies() == 1 )
sp = self.M.getSpecies(0)
self.assert_(( "Glucose" == sp.getId() ))
self.assert_(( "cell" == sp.getCompartment() ))
self.assert_(( "volume" == sp.getUnits() ))
self.assert_( sp.getInitialAmount() == 4.1 )
self.assert_( sp.getBoundaryCondition() == False )
self.assert_( sp.getCharge() == 6 )
self.assert_( sp.isSetInitialAmount() == True )
self.assert_( sp.isSetCharge() == True )
pass
def test_ReadSBML_SpecieConcentrationRule(self):
s = wrapSBML_L1v1("<listOfRules>" +
" <specieConcentrationRule specie='s2' formula='k * t/(1 + k)'/>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
scr = self.M.getRule(0)
self.assertEqual( True, scr.isSpeciesConcentration() )
self.assert_(( "s2" == scr.getVariable() ))
self.assert_(( "k * t/(1 + k)" == scr.getFormula() ))
self.assert_( scr.getType() == libsbml.RULE_TYPE_SCALAR )
pass
def test_ReadSBML_SpecieConcentrationRule_rate(self):
s = wrapSBML_L1v1("<listOfRules>" +
" <specieConcentrationRule specie='s2' formula='k * t/(1 + k)' " +
" type='rate'/>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
scr = self.M.getRule(0)
self.assertEqual( True, scr.isSpeciesConcentration() )
self.assert_(( "s2" == scr.getVariable() ))
self.assert_(( "k * t/(1 + k)" == scr.getFormula() ))
self.assert_( scr.getType() == libsbml.RULE_TYPE_RATE )
pass
def test_ReadSBML_SpecieReference_Product(self):
s = wrapSBML_L1v1("<listOfReactions>" +
" <reaction name='reaction_1' reversible='false'>" +
" <listOfProducts>" +
" <specieReference specie='S1' stoichiometry='1'/>" +
" </listOfProducts>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_(( "reaction_1" == r.getId() ))
self.assert_( r.getReversible() == False )
self.assert_( r.getNumProducts() == 1 )
sr = r.getProduct(0)
self.assert_(( "S1" == sr.getSpecies() ))
self.assert_( sr.getStoichiometry() == 1 )
self.assert_( sr.getDenominator() == 1 )
pass
def test_ReadSBML_SpecieReference_Reactant(self):
s = wrapSBML_L1v1("<listOfReactions>" +
" <reaction name='reaction_1' reversible='false'>" +
" <listOfReactants>" +
" <specieReference specie='X0' stoichiometry='1'/>" +
" </listOfReactants>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_(( "reaction_1" == r.getId() ))
self.assert_( r.getReversible() == False )
self.assert_( r.getNumReactants() == 1 )
sr = r.getReactant(0)
self.assert_(( "X0" == sr.getSpecies() ))
self.assert_( sr.getStoichiometry() == 1 )
self.assert_( sr.getDenominator() == 1 )
pass
def test_ReadSBML_SpecieReference_defaults(self):
s = wrapSBML_L1v1("<listOfReactions>" +
" <reaction name='reaction_1' reversible='false'>" +
" <listOfReactants>" +
" <specieReference specie='X0'/>" +
" </listOfReactants>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_(( "reaction_1" == r.getId() ))
self.assert_( r.getReversible() == False )
self.assert_( r.getNumReactants() == 1 )
sr = r.getReactant(0)
self.assert_(( "X0" == sr.getSpecies() ))
self.assert_( sr.getStoichiometry() == 1 )
self.assert_( sr.getDenominator() == 1 )
pass
def test_ReadSBML_Specie_defaults(self):
s = wrapSBML_L1v1("<listOfSpecie>" +
" <specie name='Glucose' compartment='cell' initialAmount='1.0'/>" +
"</listOfSpecie>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumSpecies() == 1 )
sp = self.M.getSpecies(0)
self.assert_(( "Glucose" == sp.getId() ))
self.assert_(( "cell" == sp.getCompartment() ))
self.assert_( sp.getInitialAmount() == 1.0 )
self.assert_( sp.getBoundaryCondition() == False )
self.assert_( sp.isSetInitialAmount() == True )
self.assert_( sp.isSetCharge() == False )
pass
def test_ReadSBML_Species(self):
s = wrapSBML_L1v2("<listOfSpecies>" +
" <species name='Glucose' compartment='cell' initialAmount='4.1'" +
" units='volume' boundaryCondition='false' charge='6'/>" +
"</listOfSpecies>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumSpecies() == 1 )
sp = self.M.getSpecies(0)
self.assert_(( "Glucose" == sp.getId() ))
self.assert_(( "cell" == sp.getCompartment() ))
self.assert_(( "volume" == sp.getUnits() ))
self.assert_( sp.getInitialAmount() == 4.1 )
self.assert_( sp.getBoundaryCondition() == False )
self.assert_( sp.getCharge() == 6 )
self.assert_( sp.isSetInitialAmount() == True )
self.assert_( sp.isSetCharge() == True )
pass
def test_ReadSBML_SpeciesConcentrationRule(self):
s = wrapSBML_L1v2("<listOfRules>" +
" <speciesConcentrationRule species='s2' formula='k * t/(1 + k)'/>" +
"</listOfRules>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumRules() == 1 )
scr = self.M.getRule(0)
self.assertEqual( True, scr.isSpeciesConcentration() )
self.assert_(( "s2" == scr.getVariable() ))
self.assert_(( "k * t/(1 + k)" == scr.getFormula() ))
self.assert_( scr.getType() == libsbml.RULE_TYPE_SCALAR )
pass
def test_ReadSBML_SpeciesReference_StoichiometryMath_1(self):
s = wrapSBML_L2v1("<listOfReactions>" +
" <reaction name='r1'>" +
" <listOfReactants>" +
" <speciesReference species='X0'>" +
" <stoichiometryMath>" +
" <math> <ci> x </ci> </math>" +
" </stoichiometryMath>" +
" </speciesReference>" +
" </listOfReactants>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_( r != None )
self.assert_( r.getNumReactants() == 1 )
sr = r.getReactant(0)
self.assert_( sr != None )
self.assertEqual( True, sr.isSetStoichiometryMath() )
math = sr.getStoichiometryMath()
formula = libsbml.formulaToString(math.getMath())
self.assert_( formula != None )
self.assert_(( "x" == formula ))
pass
def test_ReadSBML_SpeciesReference_StoichiometryMath_2(self):
s = wrapSBML_L2v1("<listOfReactions>" +
" <reaction name='r1'>" +
" <listOfReactants>" +
" <speciesReference species='X0'>" +
" <stoichiometryMath>" +
" <math> <cn type='rational'> 3 <sep/> 2 </cn> </math>" +
" </stoichiometryMath>" +
" </speciesReference>" +
" </listOfReactants>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_( r != None )
self.assert_( r.getNumReactants() == 1 )
sr = r.getReactant(0)
self.assert_( sr != None )
self.assertEqual( False, sr.isSetStoichiometryMath() )
self.assert_( sr.getStoichiometry() == 3 )
self.assert_( sr.getDenominator() == 2 )
pass
def test_ReadSBML_SpeciesReference_defaults(self):
s = wrapSBML_L1v2("<listOfReactions>" +
" <reaction name='reaction_1' reversible='false'>" +
" <listOfReactants>" +
" <speciesReference species='X0'/>" +
" </listOfReactants>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumReactions() == 1 )
r = self.M.getReaction(0)
self.assert_(( "reaction_1" == r.getId() ))
self.assert_( r.getReversible() == False )
self.assert_( r.getNumReactants() == 1 )
sr = r.getReactant(0)
self.assert_(( "X0" == sr.getSpecies() ))
self.assert_( sr.getStoichiometry() == 1 )
self.assert_( sr.getDenominator() == 1 )
pass
def test_ReadSBML_Species_L2_1(self):
s = wrapSBML_L2v1("<listOfSpecies>" +
" <species id='Glucose' compartment='cell' initialConcentration='4.1'" +
" substanceUnits='item' spatialSizeUnits='volume'" +
" boundaryCondition='true' charge='6' constant='true'/>" +
"</listOfSpecies>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumSpecies() == 1 )
sp = self.M.getSpecies(0)
self.assertEqual( True, sp.isSetId() )
self.assertEqual( False, sp.isSetName() )
self.assertEqual( True, sp.isSetCompartment() )
self.assertEqual( False, sp.isSetInitialAmount() )
self.assertEqual( True, sp.isSetInitialConcentration() )
self.assertEqual( True, sp.isSetSubstanceUnits() )
self.assertEqual( True, sp.isSetSpatialSizeUnits() )
self.assertEqual( True, sp.isSetCharge() )
self.assert_(( "Glucose" == sp.getId() ))
self.assert_(( "cell" == sp.getCompartment() ))
self.assert_(( "item" == sp.getSubstanceUnits() ))
self.assert_(( "volume" == sp.getSpatialSizeUnits() ))
self.assert_( sp.getInitialConcentration() == 4.1 )
self.assert_( sp.getHasOnlySubstanceUnits() == False )
self.assert_( sp.getBoundaryCondition() == True )
self.assert_( sp.getCharge() == 6 )
self.assert_( sp.getConstant() == True )
pass
def test_ReadSBML_Species_L2_2(self):
s = wrapSBML_L2v1("<listOfSpecies>" +
" <species id='s' compartment='c' hasOnlySubstanceUnits='true'/>" +
"</listOfSpecies>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumSpecies() == 1 )
sp = self.M.getSpecies(0)
self.assertEqual( True, sp.isSetId() )
self.assertEqual( False, sp.isSetName() )
self.assertEqual( True, sp.isSetCompartment() )
self.assertEqual( False, sp.isSetInitialAmount() )
self.assertEqual( False, sp.isSetInitialConcentration() )
self.assertEqual( False, sp.isSetSubstanceUnits() )
self.assertEqual( False, sp.isSetSpatialSizeUnits() )
self.assertEqual( False, sp.isSetCharge() )
self.assert_(( "s" == sp.getId() ))
self.assert_(( "c" == sp.getCompartment() ))
self.assert_( sp.getHasOnlySubstanceUnits() == True )
self.assert_( sp.getBoundaryCondition() == False )
self.assert_( sp.getConstant() == False )
pass
def test_ReadSBML_Species_L2_defaults(self):
s = wrapSBML_L2v1("<listOfSpecies>" +
" <species id='Glucose_6_P' compartment='cell'/>" +
"</listOfSpecies>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumSpecies() == 1 )
sp = self.M.getSpecies(0)
self.assertEqual( True, sp.isSetId() )
self.assertEqual( False, sp.isSetName() )
self.assertEqual( True, sp.isSetCompartment() )
self.assertEqual( False, sp.isSetInitialAmount() )
self.assertEqual( False, sp.isSetInitialConcentration() )
self.assertEqual( False, sp.isSetSubstanceUnits() )
self.assertEqual( False, sp.isSetSpatialSizeUnits() )
self.assertEqual( False, sp.isSetCharge() )
self.assert_(( "Glucose_6_P" == sp.getId() ))
self.assert_(( "cell" == sp.getCompartment() ))
self.assert_( sp.getHasOnlySubstanceUnits() == False )
self.assert_( sp.getBoundaryCondition() == False )
self.assert_( sp.getConstant() == False )
pass
def test_ReadSBML_Unit(self):
s = wrapSBML_L1v2("<listOfUnitDefinitions>" +
" <unitDefinition name='substance'>" +
" <listOfUnits> <unit kind='mole' scale='-3'/> </listOfUnits>" +
" </unitDefinition>" +
"</listOfUnitDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumUnitDefinitions() == 1 )
ud = self.M.getUnitDefinition(0)
self.assert_(( "substance" == ud.getId() ))
self.assert_( ud.getNumUnits() == 1 )
u = ud.getUnit(0)
self.assert_( u.getKind() == libsbml.UNIT_KIND_MOLE )
self.assert_( u.getExponent() == 1 )
self.assert_( u.getScale() == -3 )
pass
def test_ReadSBML_UnitDefinition(self):
s = wrapSBML_L1v2("<listOfUnitDefinitions>" +
" <unitDefinition name='mmls'/>" +
"</listOfUnitDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumUnitDefinitions() == 1 )
ud = self.M.getUnitDefinition(0)
self.assert_(( "mmls" == ud.getId() ))
pass
def test_ReadSBML_UnitDefinition_L2(self):
s = wrapSBML_L2v1("<listOfUnitDefinitions>" +
" <unitDefinition id='mmls' name='mmol/ls'/>" +
"</listOfUnitDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumUnitDefinitions() == 1 )
ud = self.M.getUnitDefinition(0)
self.assertEqual( True, ud.isSetId() )
self.assertEqual( True, ud.isSetName() )
self.assert_(( "mmls" == ud.getId() ))
self.assert_(( "mmol/ls" == ud.getName() ))
pass
def test_ReadSBML_Unit_L2(self):
s = wrapSBML_L2v1("<listOfUnitDefinitions>" +
" <unitDefinition id='Fahrenheit'>" +
" <listOfUnits>" +
" <unit kind='Celsius' multiplier='1.8' offset='32'/>" +
" </listOfUnits>" +
" </unitDefinition>" +
"</listOfUnitDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumUnitDefinitions() == 1 )
ud = self.M.getUnitDefinition(0)
self.assertEqual( True, ud.isSetId() )
self.assert_(( "Fahrenheit" == ud.getId() ))
self.assert_( ud.getNumUnits() == 1 )
u = ud.getUnit(0)
self.assert_( u.getKind() == libsbml.UNIT_KIND_CELSIUS )
self.assert_( u.getExponent() == 1 )
self.assert_( u.getScale() == 0 )
self.assert_( u.getMultiplier() == 1.8 )
self.assert_( u.getOffset() == 32 )
pass
def test_ReadSBML_Unit_defaults_L1_L2(self):
s = wrapSBML_L1v2("<listOfUnitDefinitions>" +
" <unitDefinition name='bogomips'>" +
" <listOfUnits> <unit kind='second'/> </listOfUnits>" +
" </unitDefinition>" +
"</listOfUnitDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNumUnitDefinitions() == 1 )
ud = self.M.getUnitDefinition(0)
self.assert_(( "bogomips" == ud.getId() ))
self.assert_( ud.getNumUnits() == 1 )
u = ud.getUnit(0)
self.assert_( u.getKind() == libsbml.UNIT_KIND_SECOND )
self.assert_( u.getExponent() == 1 )
self.assert_( u.getScale() == 0 )
self.assert_( u.getMultiplier() == 1.0 )
self.assert_( u.getOffset() == 0.0 )
pass
def test_ReadSBML_annotation(self):
s = wrapSBML_L2v3("<annotation xmlns:mysim=\"http://www.mysim.org/ns\">" +
" <mysim:nodecolors mysim:bgcolor=\"green\" mysim:fgcolor=\"white\">" +
" </mysim:nodecolors>" +
" <mysim:timestamp>2000-12-18 18:31 PST</mysim:timestamp>" +
"</annotation>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getAnnotation() != None )
ann = self.M.getAnnotation()
self.assert_( ann.getNumChildren() == 2 )
pass
def test_ReadSBML_annotation_sbml(self):
s = wrapXML("<sbml level=\"1\" version=\"1\">" +
" <annotation xmlns:jd = \"http://www.sys-bio.org/sbml\">" +
" <jd:header>" +
" <VersionHeader SBMLVersion = \"1.0\"/>" +
" </jd:header>" +
" <jd:display>" +
" <SBMLGraphicsHeader BackGroundColor = \"15728639\"/>" +
" </jd:display>" +
" </annotation>" +
"</sbml>")
self.D = libsbml.readSBMLFromString(s)
self.assert_( self.D.getNumErrors() > 0 )
pass
def test_ReadSBML_annotation_sbml_L2(self):
s = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level2\" level=\"2\" version=\"1\"> " +
" <annotation>" +
" <rdf xmlns=\"http://www.w3.org/1999/anything\">" +
" </rdf>" +
" </annotation>" +
" <model>" +
" </model>" +
" </sbml>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.D.getNumErrors() == 0 )
pass
def test_ReadSBML_invalid_default_namespace(self):
valid = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level2/version4\" level=\"2\" version=\"4\"> " +
" <model>" +
" <notes>" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">Some text.</p>" +
" </notes>" +
" <annotation>" +
" <example xmlns=\"http://www.example.org/\"/>" +
" </annotation>" +
" <listOfCompartments>" +
" <compartment id=\"compartmentOne\" size=\"1\"/>" +
" </listOfCompartments>" +
" <listOfSpecies>" +
" <species id=\"S1\" initialConcentration=\"1\" compartment=\"compartmentOne\"/>" +
" <species id=\"S2\" initialConcentration=\"0\" compartment=\"compartmentOne\"/>" +
" </listOfSpecies>" +
" <listOfParameters>" +
" <parameter id=\"t\" value = \"1\" units=\"second\"/>" +
" </listOfParameters>" +
" <listOfConstraints>" +
" <constraint sboTerm=\"SBO:0000064\">" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">" +
" <apply>" +
" <leq/>" +
" <ci> S1 </ci>" +
" <ci> t </ci>" +
" </apply>" +
" </math>" +
" <message>" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\"> Species S1 is out of range </p>" +
" </message>" +
" </constraint>" +
" </listOfConstraints>" +
" <listOfReactions>" +
" <reaction id=\"reaction_1\" reversible=\"false\">" +
" <listOfReactants>" +
" <speciesReference species=\"S1\"/>" +
" </listOfReactants>" +
" <listOfProducts>" +
" <speciesReference species=\"S2\">" +
" </speciesReference>" +
" </listOfProducts>" +
" </reaction>" +
" </listOfReactions>" +
" </model>" +
" </sbml>")
invalid = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level2/version4\" level=\"2\" version=\"4\"> " +
" <model xmlns=\"http://invalid/custom/default/uri\">" +
" <notes xmlns=\"http://invalid/custom/default/uri/in/notes\">" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">Some text.</p>" +
" </notes>" +
" <annotation xmlns=\"http://invalid/custom/default/uri/in/annotation\">" +
" <example xmlns=\"http://www.example.org/\"/>" +
" </annotation>" +
" <listOfCompartments>" +
" <compartment id=\"compartmentOne\" size=\"1\"/>" +
" </listOfCompartments>" +
" <listOfSpecies>" +
" <notes xmlns=\"http://invalid/custom/default/uri/in/notes\">" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">Some text.</p>" +
" </notes>" +
" <annotation xmlns=\"http://invalid/custom/default/uri/in/annotation\">" +
" <example xmlns=\"http://www.example.org/\"/>" +
" </annotation>" +
" <species id=\"S1\" initialConcentration=\"1\" compartment=\"compartmentOne\"/>" +
" <species id=\"S2\" initialConcentration=\"0\" compartment=\"compartmentOne\"/>" +
" </listOfSpecies>" +
" <listOfParameters>" +
" <parameter id=\"t\" value = \"1\" units=\"second\"/>" +
" </listOfParameters>" +
" <listOfConstraints>" +
" <constraint sboTerm=\"SBO:0000064\">" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">" +
" <apply>" +
" <leq/>" +
" <ci> S1 </ci>" +
" <ci> t </ci>" +
" </apply>" +
" </math>" +
" <message xmlns=\"http://invalid/custom/default/uri/in/message\">" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\"> Species S1 is out of range </p>" +
" </message>" +
" </constraint>" +
" </listOfConstraints>" +
" <listOfReactions>" +
" <reaction id=\"reaction_1\" reversible=\"false\">" +
" <listOfReactants>" +
" <speciesReference xmlns=\"http://invalid/custom/default/uri\" species=\"S1\"/>" +
" </listOfReactants>" +
" <listOfProducts>" +
" <speciesReference species=\"S2\">" +
" <notes xmlns=\"http://invalid/custom/default/uri/in/notes\">" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">Some text.</p>" +
" </notes>" +
" <annotation xmlns=\"http://invalid/custom/default/uri/in/annotation\">" +
" <example xmlns=\"http://www.example.org/\"/>" +
" </annotation>" +
" </speciesReference>" +
" </listOfProducts>" +
" </reaction>" +
" </listOfReactions>" +
" </model>" +
" </sbml>")
self.D = libsbml.readSBMLFromString(valid)
self.assert_( self.D.getNumErrors() == 0 )
_dummyList = [ self.D ]; _dummyList[:] = []; del _dummyList
self.D = libsbml.readSBMLFromString(invalid)
self.assert_( self.D.getNumErrors() == 9 )
pass
def test_ReadSBML_line_col_numbers(self):
#setXMLParser()
s = wrapString("<?xml version='1.0' encoding='UTF-8'?>\n" +
"<sbml xmlns='http://www.sbml.org/sbml/level2' level='2' version='1'>\n" +
" <model id='testModel' name='testModel'>\n" +
" <listOfReactions> <reaction/> </listOfReactions>\n" +
" </model>\n" +
"</sbml>\n")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
sb = self.M
sb = self.M.getListOfReactions()
sb = self.M.getReaction(0)
pass
def test_ReadSBML_metaid(self):
s = wrapSBML_L2v1("<listOfFunctionDefinitions>" +
" <functionDefinition metaid='fd'/>" +
"</listOfFunctionDefinitions>" +
"<listOfUnitDefinitions>" +
" <unitDefinition metaid='ud'/>" +
"</listOfUnitDefinitions>" +
"<listOfCompartments>" +
" <compartment metaid='c'/>" +
"</listOfCompartments>" +
"<listOfSpecies>" +
" <species metaid='s'/>" +
"</listOfSpecies>" +
"<listOfParameters>" +
" <parameter metaid='p'/>" +
"</listOfParameters>" +
"<listOfRules>" +
" <rateRule metaid='rr'/>" +
"</listOfRules>" +
"<listOfReactions>" +
" <reaction metaid='rx'/>" +
"</listOfReactions>" +
"<listOfEvents>" +
" <event metaid='e'/>" +
"</listOfEvents>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
sb = self.M.getFunctionDefinition(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "fd" == sb.getMetaId() ))
sb = self.M.getUnitDefinition(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "ud" == sb.getMetaId() ))
sb = self.M.getCompartment(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "c" == sb.getMetaId() ))
sb = self.M.getSpecies(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "s" == sb.getMetaId() ))
sb = self.M.getParameter(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "p" == sb.getMetaId() ))
sb = self.M.getRule(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "rr" == sb.getMetaId() ))
sb = self.M.getReaction(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "rx" == sb.getMetaId() ))
sb = self.M.getEvent(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "e" == sb.getMetaId() ))
pass
def test_ReadSBML_metaid_Event(self):
s = wrapSBML_L2v1("<listOfEvents>" +
" <event metaid='e'>" +
" <listOfEventAssignments metaid='loea'>" +
" <eventAssignment metaid='ea'/>" +
" </listOfEventAssignments>" +
" </event>" +
"</listOfEvents>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
e = self.M.getEvent(0)
sb = e
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "e" == sb.getMetaId() ))
sb = e.getListOfEventAssignments()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "loea" == sb.getMetaId() ))
sb = e.getEventAssignment(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "ea" == sb.getMetaId() ))
pass
def test_ReadSBML_metaid_ListOf(self):
s = wrapSBML_L2v1("<listOfFunctionDefinitions metaid='lofd'/>" +
"<listOfUnitDefinitions metaid='loud'/>" +
"<listOfCompartments metaid='loc'/>" +
"<listOfSpecies metaid='los'/>" +
"<listOfParameters metaid='lop'/>" +
"<listOfRules metaid='lor'/>" +
"<listOfReactions metaid='lorx'/>" +
"<listOfEvents metaid='loe'/>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
sb = self.M.getListOfFunctionDefinitions()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lofd" == sb.getMetaId() ))
sb = self.M.getListOfUnitDefinitions()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "loud" == sb.getMetaId() ))
sb = self.M.getListOfCompartments()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "loc" == sb.getMetaId() ))
sb = self.M.getListOfSpecies()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "los" == sb.getMetaId() ))
sb = self.M.getListOfParameters()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lop" == sb.getMetaId() ))
sb = self.M.getListOfRules()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lor" == sb.getMetaId() ))
sb = self.M.getListOfReactions()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lorx" == sb.getMetaId() ))
sb = self.M.getListOfEvents()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "loe" == sb.getMetaId() ))
pass
def test_ReadSBML_metaid_Reaction(self):
s = wrapSBML_L2v1("<listOfReactions>" +
" <reaction metaid='r'>" +
" <listOfReactants metaid='lor'>" +
" <speciesReference metaid='sr1'/>" +
" </listOfReactants>" +
" <listOfProducts metaid='lop'>" +
" <speciesReference metaid='sr2'/>" +
" </listOfProducts>" +
" <listOfModifiers metaid='lom'>" +
" <modifierSpeciesReference metaid='msr'/>" +
" </listOfModifiers>" +
" <kineticLaw metaid='kl'/>" +
" </reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
r = self.M.getReaction(0)
sb = r
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "r" == sb.getMetaId() ))
sb = r.getListOfReactants()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lor" == sb.getMetaId() ))
sb = r.getReactant(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "sr1" == sb.getMetaId() ))
sb = r.getListOfProducts()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lop" == sb.getMetaId() ))
sb = r.getProduct(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "sr2" == sb.getMetaId() ))
sb = r.getListOfModifiers()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lom" == sb.getMetaId() ))
sb = r.getModifier(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "msr" == sb.getMetaId() ))
sb = r.getKineticLaw()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "kl" == sb.getMetaId() ))
pass
def test_ReadSBML_metaid_Unit(self):
s = wrapSBML_L2v1("<listOfUnitDefinitions>" +
" <unitDefinition metaid='ud'>" +
" <listOfUnits metaid='lou'>" +
" <unit metaid='u'/>" +
" </listOfUnits>" +
" </unitDefinition>" +
"</listOfUnitDefinitions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
ud = self.M.getUnitDefinition(0)
sb = ud
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "ud" == sb.getMetaId() ))
sb = ud.getListOfUnits()
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "lou" == sb.getMetaId() ))
sb = ud.getUnit(0)
self.assertEqual( True, sb.isSetMetaId() )
self.assert_(( "u" == sb.getMetaId() ))
pass
def test_ReadSBML_notes(self):
s = wrapSBML_L2v3("<listOfReactions>" +
"<reaction name='J1'>" +
" <kineticLaw formula='k1*X0'>" +
" <notes>This is a test note.</notes>" +
" <listOfParameters>" +
" <parameter name='k1' value='0'/>" +
" </listOfParameters>" +
" </kineticLaw>" +
"</reaction>" +
"</listOfReactions>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
r = self.M.getReaction(0)
kl = r.getKineticLaw()
self.assert_( kl.getNotes() != None )
notes = kl.getNotes().getChild(0).getCharacters()
self.assert_( ( "This is a test note." != notes ) == False )
pass
def test_ReadSBML_notes_ListOf(self):
s = wrapSBML_L2v1("<listOfFunctionDefinitions>" +
" <notes>My Functions</notes>" +
" <functionDefinition/>" +
"</listOfFunctionDefinitions>" +
"<listOfUnitDefinitions>" +
" <notes>My Units</notes>" +
" <unitDefinition/>" +
"</listOfUnitDefinitions>" +
"<listOfCompartments>" +
" <notes>My Compartments</notes>" +
" <compartment/>" +
"</listOfCompartments>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M != None )
sb = self.M.getListOfFunctionDefinitions()
self.assertEqual( True, sb.isSetNotes() )
notes = sb.getNotes().getChild(0).getCharacters()
self.assert_( ( "My Functions" != notes ) == False )
sb = self.M.getListOfUnitDefinitions()
self.assertEqual( True, sb.isSetNotes() )
notes = sb.getNotes().getChild(0).getCharacters()
self.assert_( ( "My Units" != notes ) == False )
sb = self.M.getListOfCompartments()
self.assertEqual( True, sb.isSetNotes() )
notes = sb.getNotes().getChild(0).getCharacters()
self.assert_( ( "My Compartments" != notes ) == False )
pass
def test_ReadSBML_notes_sbml(self):
s = wrapXML("<sbml level='1' version='1'>" +
" <notes>Notes are not allowed as part of the SBML element.</notes>" +
"</sbml>")
self.D = libsbml.readSBMLFromString(s)
self.assert_( self.D.getNotes() != None )
notes = self.D.getNotes().getChild(0).getCharacters()
self.assert_( ( "Notes are not allowed as part of the SBML element." != notes ) == False )
self.assert_( self.D.getNumErrors() > 0 )
pass
def test_ReadSBML_notes_sbml_L2(self):
s = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level2\" level=\"2\" version=\"1\"> " +
" <notes>" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">" +
" </html>" +
" </notes>" +
" <model>" +
" </model>" +
" </sbml>")
self.D = libsbml.readSBMLFromString(s)
self.assert_( self.D.getNotes() != None )
self.assert_( self.D.getNumErrors() == 0 )
pass
def test_ReadSBML_notes_xmlns(self):
s = wrapSBML_L2v3("<notes>" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">Some text.</body>" +
"</notes>")
self.D = libsbml.readSBMLFromString(s)
self.M = self.D.getModel()
self.assert_( self.M.getNotes() != None )
ns = self.M.getNotes().getChild(0).getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_(( "http://www.w3.org/1999/xhtml" == ns.getURI(0) ))
notes = self.M.getNotes().getChild(0).getChild(0).getCharacters()
self.assert_( ( "Some text." != notes ) == False )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestReadSBML))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/sbml/TestReadSBML.py | Python | gpl-3.0 | 55,343 | [
"VisIt"
] | 2afea172e5a7226cc0a396267f03ff6cd6ac20d0c0e1a8d694189c76a8375f60 |
#!/ysr/bin/env python
from __future__ import division
import utils as utils
import numpy as np
import numpy.random as npr
import theano
import theano.tensor as T
import time
from HelperFuncs import to_fX
def my_batched_dot(A, B):
"""
Compute: [np.dot(A[i,:,:], B[i,:,:]) for i in range(A.shape[0])]
"""
C = A.dimshuffle([0,1,2,'x']) * B.dimshuffle([0,'x',1,2])
return C.sum(axis=-2)
############################################
############################################
## Class for painting objects into images ##
############################################
############################################
class ObjectPainter(object):
def __init__(self, img_height, img_width, obj_type='circle', obj_scale=0.2):
"""
A class for drawing a few simple objects with subpixel resolution.
"""
self.img_height = img_height
self.img_width = img_width
self.obj_type = obj_type
self.obj_scale = obj_scale
# make coordinate system for points in the object to render
obj_x_coords, obj_y_coords = self._construct_obj_coords( \
obj_type=self.obj_type, obj_scale=self.obj_scale)
self.obj_x = T.constant(obj_x_coords)
self.obj_y = T.constant(obj_y_coords)
self.obj_x_range = [np.min(obj_x_coords), np.max(obj_x_coords)]
self.obj_y_range = [np.min(obj_y_coords), np.max(obj_y_coords)]
# make coordinate system for x and y location in the image.
# -- image coordinates for the smallest dimension range over
# [-init_scale....init_scale], and coordinates for the largest
# dimension are at the same scale, but over a larger range.
img_x_coords, img_y_coords = self._construct_img_coords( \
x_dim=self.img_width, y_dim=self.img_height)
self.img_x = T.constant(img_x_coords)
self.img_y = T.constant(img_y_coords)
self.img_x_range = [np.min(img_x_coords), np.max(img_x_coords)]
self.img_y_range = [np.min(img_y_coords), np.max(img_y_coords)]
return
def _construct_obj_coords(self, obj_type='circle', obj_scale=0.2):
"""
Construct coordinates for circle, square, or cross.
"""
if obj_type == 'circle':
coords = [(-1, 3), (0, 3), (1, 3), (2, 2), \
(3, 1), (3, 0), (3, -1), (2, -2), \
(1, -3), (0, -3), (-1, -3), (-2, -2), \
(-3, -1), (-3, 0), (-3, 1), (-2, 2)]
elif obj_type == 'square':
coords = [(-2, 2), (-1, 2), (0, 2), (1, 2), \
(2, 2), (2, 1), (2, 0), (2, -1), \
(2, -2), (1, -2), (0, -2), (-1, -2), \
(-2, -2), (-2, -1), (-2, 0), (-2, 1)]
elif obj_type == 'cross':
coords = [(0, 3), (0, 2), (0, 1), (0, 0), \
(1, 0), (2, 0), (3, 0), (0, -1), \
(0, -2), (0, -3), (-1, 0), (-2, 0), \
(-3, 0)]
elif obj_type == 't-up':
coords = [(-3, 3), (-2, 3), (-1, 3), (0, 3), \
(1, 3), (2, 3), (3, 3), (0, 2), \
(0, 1), (0, 0), (0, -1), (0, -2), \
(0, -3)]
elif obj_type == 't-down':
coords = [(-3, -3), (-2, -3), (-1, -3), (0, 3), \
(1, -3), (2, -3), (3, -3), (0, 2), \
(0, 1), (0, 0), (0, -1), (0, -2), \
(0, -3)]
elif obj_type == 't-left':
coords = [(-3, 3), (-3, 2), (-3, 1), (-3, 0), \
(-3, -1), (-3, -2), (-3, -3), (-2, 0), \
(-1, 0), (0, 0), (1, 0), (2, 0), \
(3, 0)]
elif obj_type == 't-right':
coords = [(3, 3), (3, 2), (3, 1), (-3, 0), \
(3, -1), (3, -2), (3, -3), (-2, 0), \
(-1, 0), (0, 0), (1, 0), (2, 0), \
(3, 0)]
else:
coords = [(-1, 1), (0, 1), (1, 1), (1, 0), \
(1, -1), (0, -1), (-1, -1), (-1, 0)]
x_coords = np.asarray([float(pt[0]) for pt in coords])
y_coords = np.asarray([float(pt[1]) for pt in coords])
rescale = max(np.max(x_coords), np.max(y_coords))
x_coords = (obj_scale / rescale) * x_coords
y_coords = (obj_scale / rescale) * y_coords
x_coords = x_coords.astype(theano.config.floatX)
y_coords = y_coords.astype(theano.config.floatX)
return x_coords, y_coords
def _construct_img_coords(self, x_dim=32, y_dim=32):
"""
Construct coordinates for all points in the base images.
"""
min_dim = float( min(x_dim, y_dim) )
x_scale = x_dim / min_dim
y_scale = y_dim / min_dim
xc = x_scale * np.linspace(start=-1., stop=1., num=x_dim)
yc = y_scale * np.linspace(start=-1., stop=1., num=y_dim)
coords = []
for x_idx in range(x_dim):
for y_idx in range(y_dim):
coords.append((xc[x_idx], yc[y_idx]))
x_coords = np.asarray([float(pt[0]) for pt in coords])
y_coords = np.asarray([float(pt[1]) for pt in coords])
x_coords = x_coords.astype(theano.config.floatX)
y_coords = y_coords.astype(theano.config.floatX)
return x_coords, y_coords
def filterbank_matrices(self, center_y, center_x, delta, sigma):
"""
Create a Fy and a Fx
Parameters
----------
center_y : T.vector (shape: batch_size)
center_x : T.vector (shape: batch_size)
Y and X center coordinates for the attention window
delta : T.vector (shape: batch_size)
sigma : T.vector (shape: batch_size)
Returns
-------
FY, FX
"""
tol = 1e-4
# construct x and y coordinates for the grid points
obj_x = center_x.dimshuffle(0, 'x') + \
(delta.dimshuffle(0, 'x') * self.obj_x)
obj_y = center_y.dimshuffle(0, 'x') + \
(delta.dimshuffle(0, 'x') * self.obj_y)
# construct unnormalized attention weights for each grid point
FX = T.exp( -(self.img_x - obj_x.dimshuffle(0,1,'x'))**2. / \
(2. * sigma.dimshuffle(0,'x','x')**2.) )
FY = T.exp( -(self.img_y - obj_y.dimshuffle([0,1,'x']))**2. / \
(2. * sigma.dimshuffle(0,'x','x')**2.) )
# normalize the attention weights
#FX = FX / (FX.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
#FY = FY / (FY.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
FX = FX / (T.max(FX.sum(axis=-1)) + tol)
FY = FY / (T.max(FY.sum(axis=-1)) + tol)
return FY, FX
def write(self, center_y, center_x, delta, sigma):
"""
Write a batch of objects into full sized images.
Parameters
----------
center_y : :class:`~tensor.TensorVariable`
Center coordinates for the objects.
Expected shape: (batch_size,)
center_x : :class:`~tensor.TensorVariable`
Center coordinates for the objects.
Expected shape: (batch_size,)
delta : :class:`~tensor.TensorVariable`
Scale for the objects.
Expected shape: (batch_size,)
sigma : :class:`~tensor.TensorVariable`
Std. dev. for Gaussian writing kernel.
Expected shape: (batch_size,)
Returns
-------
images : :class:`~tensor.TensorVariable`
images of objects: (batch_size x img_height*img_width)
"""
# Get separable filterbank
FY, FX = self.filterbank_matrices(center_y, center_x, delta, sigma)
# apply...
FI = FX * FY
I_raw = T.sum(FI, axis=1)
I = I_raw / T.max(I_raw)
return I
def get_object_painters(im_dim=None, obj_types=None, obj_scale=0.2):
"""
Get a dict, keyed by object type, of all available object painters.
"""
# configure object renderers for the desired object types...
OPTRS = {}
for obj in obj_types:
# configure an object renderer
optr = ObjectPainter(im_dim, im_dim, obj_type=obj, obj_scale=obj_scale)
# get a Theano function for doing the rendering
_center_x = T.vector()
_center_y = T.vector()
_delta = T.vector()
_sigma = T.vector()
_W = optr.write(_center_y, _center_x, _delta, _sigma)
paint_obj = theano.function(inputs=[_center_y, _center_x, _delta, _sigma], \
outputs=_W)
OPTRS[obj] = paint_obj
return OPTRS
####################################################################
####################################################################
## Class for generating random trajectories within a bounding box ##
####################################################################
####################################################################
class TrajectoryGenerator(object):
def __init__(self, x_range=[-1.,1.], y_range=[-1.,1.], max_speed=0.1):
"""
A class for generating trajectories in box with given x/y range.
"""
self.x_range = x_range
self.y_range = y_range
self.x_min, self.x_max = x_range
self.y_min, self.y_max = y_range
self.max_speed = max_speed
return
def _rand_pos(self, num_samples, rand_vals=None):
"""
Generate positions uniformly at random within our bounding box.
"""
# generate initial positions
if rand_vals is None:
samp_pos = npr.rand(num_samples,2)
else:
samp_pos = rand_vals
# scale x and y coords
samp_pos[:,0] = samp_pos[:,0] * (self.x_range[1] - self.x_range[0])
samp_pos[:,1] = samp_pos[:,1] * (self.y_range[1] - self.y_range[0])
# shift x and y coords
samp_pos[:,0] = samp_pos[:,0] + self.x_min
samp_pos[:,1] = samp_pos[:,1] + self.y_min
return samp_pos
def _rand_vel(self, num_samples, randn_vals=None):
"""
Generate a random velocity under constraint on l2 norm.
"""
# generate initial velocities
if randn_vals is None:
samp_vel = npr.randn(num_samples,2)
else:
samp_vel = randn_vals
# rescale initial velocities to be appropriately large
vel_norms = np.sqrt(np.sum(samp_vel**2.0, axis=1, keepdims=True))
samp_vel = samp_vel * np.minimum(1.0, (self.max_speed / vel_norms))
return samp_vel
def _initial_pos_and_vel(self, num_samples):
"""
Generate random initial positions and velocities.
"""
# generate initial positions
samp_pos = self._rand_pos(num_samples)
# generate initial velocities
samp_vel = self._rand_vel(num_samples, randn_vals=None)
return samp_pos, samp_vel
def _update_pos_and_vel(self, samp_pos, samp_vel):
"""
Return updated positions and velocities.
"""
# advance positions
new_pos = samp_pos + samp_vel
# clip to the required bounding box, and flip velocities when the box
# boundary is crossed by a trajectory.
x_min_clip = new_pos[:,0] < self.x_min
x_max_clip = new_pos[:,0] > self.x_max
y_min_clip = new_pos[:,1] < self.y_min
y_max_clip = new_pos[:,1] > self.y_max
new_pos[x_min_clip,0] = self.x_min
new_pos[x_max_clip,0] = self.x_max
new_pos[y_min_clip,1] = self.y_min
new_pos[y_max_clip,1] = self.y_max
# flip velocities for coordinates that were clipped
x_clipped = x_min_clip | x_max_clip
y_clipped = y_min_clip | y_max_clip
new_vel = samp_vel[:,:]
new_vel[x_clipped,0] = -samp_vel[x_clipped,0]
new_vel[y_clipped,1] = -samp_vel[y_clipped,1]
return new_pos, new_vel
def generate_trajectories(self, num_samples, traj_len, vel_reset=0.05):
"""
Generate a set of trajectories with the given length.
"""
# initialize container arrays
traj_pos = np.zeros((traj_len, num_samples, 2))
traj_vel = np.zeros((traj_len, num_samples, 2))
randn_vals = npr.randn(traj_len, num_samples, 2)
vel_switches = npr.rand(traj_len, num_samples) < vel_reset
# generate and record some trajectories
step_pos, step_vel = self._initial_pos_and_vel(num_samples)
for i in range(traj_len):
rand_vel = self._rand_vel(num_samples, randn_vals[i])
traj_pos[i,:,:] = step_pos
traj_vel[i,:,:] = step_vel
traj_vel[i,vel_switches[i],:] = rand_vel[vel_switches[i],:]
step_pos, step_vel = self._update_pos_and_vel(step_pos, step_vel)
traj_pos = traj_pos.astype(theano.config.floatX)
traj_vel = traj_vel.astype(theano.config.floatX)
return traj_pos, traj_vel
if __name__ == "__main__":
# configure an object renderer
OPTR = ObjectPainter(32, 32, obj_type='circle', obj_scale=0.4)
_center_x = T.vector()
_center_y = T.vector()
_delta = T.vector()
_sigma = T.vector()
_W = OPTR.write(_center_y, _center_x, _delta, _sigma)
write_func = theano.function(inputs=[_center_y, _center_x, _delta, _sigma], \
outputs=_W)
# configure a trajectory generator
num_samples = 100
traj_len = 64
x_range = [-0.8,0.8]
y_range = [-0.8,0.8]
max_speed = 0.15
TRAJ = TrajectoryGenerator(x_range=x_range, y_range=y_range, \
max_speed=max_speed)
# test the writer function
start_time = time.time()
batch_count = 50
for i in range(batch_count):
# generate a minibatch of trajectories
traj_pos, traj_vel = TRAJ.generate_trajectories(num_samples, traj_len)
traj_x = traj_pos[:,:,0]
traj_y = traj_pos[:,:,1]
# draw the trajectories
center_x = to_fX( traj_x.T.ravel() )
center_y = to_fX( traj_y.T.ravel() )
delta = to_fX( np.ones(center_x.shape) )
sigma = to_fX( np.ones(center_x.shape) )
W = write_func(center_y, center_x, delta, 0.2*sigma)
end_time = time.time()
render_time = end_time - start_time
render_bps = batch_count / render_time
print("RENDER BATCH/SECOND: {0:.2f}".format(render_bps))
W = W[:20*traj_len]
utils.visualize_samples(W, "AAAAA.png", num_rows=20)
| Philip-Bachman/Sequential-Generation | MotionRenderers.py | Python | mit | 14,522 | [
"Gaussian"
] | acc46b05920952088209602197b702c365980d873c8d7d4380b70a62db8ac8e3 |
import numpy as np
import scipy as sp
from scipy import fftpack as fft
import cv2
import math
import os
import os.path
from scipy.ndimage.filters import gaussian_filter
import cvUtils
import principalColorSpace as pcs
import saliency
def colorSRS(image, weights = None, avgHalfsize = 8, gaussianSigma = 32, maxDim = 500):
""" Computes a saliency map of a color image using spectral residual saliency on
each layer individually, then combines the results by taking a (weighted)
average.
"""
# Treat the case of grayscale image specifically.
if len(image.shape) < 3 or image.shape[2] == 1:
saliency = spectralResidualSaliency(image, avgHalfsize, gaussianSigma, maxDim)
return saliency
# Set the weights if not set.
rows, cols, channels = image.shape
if weights == None:
weights = [1] * channels
avgResult = None
# Compute and combine
for i in range(0,channels):
channel = np.array(image[:,:,i], copy=False)
saliency = spectralResidualSaliency(channel, avgHalfsize, gaussianSigma, maxDim)
if avgResult == None:
avgResult = weights[i] * saliency
else:
avgResult += weights[i] * saliency
# Our implementation of SRS happens to normalize each layer to the [0;1] range.
return avgResult / np.sum(weights)
def spectralResidualSaliency(grayscaleImage, avgHalfsize = 4, gaussianSigma = 16, maxDim = 256):
"""Computes a saliency map of an image using the spectral residual saliency method
from Hou, 2007.
Args:
grayscaleImage (array): grayscale image to compute a saliency map for.
avgHalfize (int): half size of the window for the average filter.
gaussianSigma (number): sigma parameter to the final gaussian filter.
maxDim (int): maximum size of the largest dimension for the output saliency map.
Returns:
A saliency map of the input image, and optionally the resized source image.
"""
# Resize the source image
newSize = None
sourceRows, sourceCols = grayscaleImage.shape[0:2]
if sourceRows > sourceCols:
newSize = (sourceCols * maxDim / sourceRows, maxDim)
else:
newSize = (maxDim, sourceRows * maxDim / sourceCols)
resizedImage = cv2.resize(grayscaleImage, newSize)
# Compute its Fourier spectrum
spectrum = fft.fft2(resizedImage)
# apply log scaling to the magnitude
logSpectrum = np.log(np.absolute(spectrum))
# get the phase of the spectrum
phase = np.angle(spectrum)
# compute the residual
avgFilterSize = avgHalfsize*2+1
avgFilterKernel = np.ones([avgFilterSize, avgFilterSize]) / avgFilterSize**2
avgLogSpectrum = cv2.filter2D(logSpectrum, -1, avgFilterKernel)
residual = logSpectrum - avgLogSpectrum
# and from it compute the saliency map directly
saliencyMap = np.real(fft.ifft2(np.exp(residual + phase*1j))**2)
filteredMap = gaussian_filter(saliencyMap, gaussianSigma)
# Normalize to [0;1] range
minSaliency = np.amin(filteredMap)
maxSaliency = np.amax(filteredMap)
return (filteredMap - minSaliency) / (maxSaliency - minSaliency)
if __name__ == "__main__":
for imageFilename in cvUtils.imagesInFolder('data/background'):
image = cv2.imread(imageFilename)
# convert to principal color space, use eigenvalues as layer weights
pcsImage = pcs.convertToPCS(cv2.cvtColor(image, cv2.COLOR_BGR2LAB), 3)
saliencyMap = colorSRS(pcsImage)
rows, cols = saliencyMap.shape[0:2]
w = 0.7
center = saliency.centerMap(rows, cols)
centered = center * w + saliencyMap * (1 - w)
cv2.imshow('original', image)
cv2.imshow('saliency', saliencyMap)
cv2.imshow('center map', center)
cv2.imshow('centered', centered)
cv2.waitKey(0)
| alexisVallet/anime-bgrm | spectralResidualSaliency.py | Python | gpl-2.0 | 3,853 | [
"Gaussian"
] | 38cabc1318c9863a815ec413a8e4414d47582ddcc75b745b4a907cb9be134dea |
from test import test_support
import unittest
import codecs
import locale
import sys, StringIO
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", u"\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*u"\u3042" + lineend)
vwo.append((i*200+200)*u"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in lineends:
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = StringIO.StringIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #32110: Test readline() followed by read(n)
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read(n) followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #32110: Test read(n) followed by read(n)
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
# Issue #12446: Test read(n) followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = ('\xff\xfe\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = ('\x00\x00\xfe\xff'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO(4*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO(8*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
u"", # third byte of BOM read
u"", # fourth byte of BOM read => byteorder known
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_32_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_32_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_16_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_16_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = u'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(test_support.unlink, test_support.TESTFN)
with open(test_support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(test_support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', u'\ufffd'),
(b'A\x00Z', u'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', u'ABCD\ufffd'),
(b'\x00\xd8', u'\ufffd'),
(b'\x00\xd8A', u'\ufffd'),
(b'\x00\xd8A\x00', u'\ufffdA'),
(b'\x00\xdcA\x00', u'\ufffdA'),
]
for raw, expected in tests:
try:
with self.assertRaises(UnicodeDecodeError):
codecs.utf_16_le_decode(raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff",
u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', u'\ufffd'),
(b'\x00A\xff', u'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', u'ABCD\ufffd'),
(b'\xd8\x00', u'\ufffd'),
(b'\xd8\x00\xdc', u'\ufffd'),
(b'\xd8\x00\x00A', u'\ufffdA'),
(b'\xdc\x00\x00A', u'\ufffdA'),
]
for raw, expected in tests:
try:
with self.assertRaises(UnicodeDecodeError):
codecs.utf_16_be_decode(raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff",
u"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d)
self.assertEqual(set_d.decode(self.encoding), set_d)
# Set O (optional direct characters)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o)
self.assertEqual(set_o.decode(self.encoding), set_o)
# +
self.assertEqual(u'a+b'.encode(self.encoding), 'a+-b')
self.assertEqual('a+-b'.decode(self.encoding), u'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws)
self.assertEqual(ws.decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(chr(i) for i in range(0x80)) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
u"a+-b",
[
u"a",
u"a",
u"a+",
u"a+-",
u"a+-b",
]
)
def test_errors(self):
tests = [
('\xe1b', u'\ufffdb'),
('a\xe1b', u'a\ufffdb'),
('a\xe1\xe1b', u'a\ufffd\ufffdb'),
('a+IK', u'a\ufffd'),
('a+IK-b', u'a\ufffdb'),
('a+IK,b', u'a\ufffdb'),
('a+IKx', u'a\u20ac\ufffd'),
('a+IKx-b', u'a\u20ac\ufffdb'),
('a+IKwgr', u'a\u20ac\ufffd'),
('a+IKwgr-b', u'a\u20ac\ufffdb'),
('a+IKwgr,', u'a\u20ac\ufffd'),
('a+IKwgr,-b', u'a\u20ac\ufffd-b'),
('a+IKwgrB', u'a\u20ac\u20ac\ufffd'),
('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'),
('a+/,+IKw-b', u'a\ufffd\u20acb'),
('a+//,+IKw-b', u'a\ufffd\u20acb'),
('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'),
('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'),
('a+IKw-b\xe1', u'a\u20acb\ufffd'),
('a+IKw\xe1b', u'a\u20ac\ufffdb'),
]
for raw, expected in tests:
try:
with self.assertRaises(UnicodeDecodeError):
codecs.utf_7_decode(raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
def test_nonbmp(self):
self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-')
self.assertEqual(u'\ud801\udca0'.encode(self.encoding), '+2AHcoA-')
self.assertEqual('+2AHcoA-'.decode(self.encoding), u'\U000104A0')
self.assertEqual('+2AHcoA'.decode(self.encoding), u'\U000104A0')
self.assertEqual(u'\u20ac\U000104A0'.encode(self.encoding), '+IKzYAdyg-')
self.assertEqual('+IKzYAdyg-'.decode(self.encoding), u'\u20ac\U000104A0')
self.assertEqual('+IKzYAdyg'.decode(self.encoding), u'\u20ac\U000104A0')
self.assertEqual(u'\u20ac\u20ac\U000104A0'.encode(self.encoding),
'+IKwgrNgB3KA-')
self.assertEqual('+IKwgrNgB3KA-'.decode(self.encoding),
u'\u20ac\u20ac\U000104A0')
self.assertEqual('+IKwgrNgB3KA'.decode(self.encoding),
u'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
('a+2AE-b', u'a\ud801b'),
('a+2AE\xe1b', u'a\ufffdb'),
('a+2AE', u'a\ufffd'),
('a+2AEA-b', u'a\ufffdb'),
('a+2AH-b', u'a\ufffdb'),
('a+IKzYAQ-b', u'a\u20ac\ud801b'),
('a+IKzYAQ\xe1b', u'a\u20ac\ufffdb'),
('a+IKzYAQA-b', u'a\u20ac\ufffdb'),
('a+IKzYAd-b', u'a\u20ac\ufffdb'),
('a+IKwgrNgB-b', u'a\u20ac\u20ac\ud801b'),
('a+IKwgrNgB\xe1b', u'a\u20ac\u20ac\ufffdb'),
('a+IKwgrNgB', u'a\u20ac\u20ac\ufffd'),
('a+IKwgrNgBA-b', u'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
try:
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
except:
print 'raw=%r' % raw
raise
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
unicode("\xef\xbb\xbf", "utf-8-sig")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = u"spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = chr(b)
if b != '\\':
self.assertEqual(decode(b + '0'), (b + '0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
b = chr(b)
if b not in '\n"\'\\abtnvfr01234567x':
check('\\' + b, '\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write(u"a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEqual((u"ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder(u"a")[1], 1)
self.assertEqual(encoder(u"\xe9\u0142")[1], 2)
encoder = codecs.getencoder("string-escape")
self.assertEqual(encoder(r'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(unicode("python.org", "idna"), u"python.org")
self.assertEqual(unicode("python.org.", "idna"), u"python.org.")
self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual(u"python.org".encode("idna"), "python.org")
self.assertEqual("python.org.".encode("idna"), "python.org.")
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEqual(r.read(), u"")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode(u"rg"), u"")
self.assertEqual(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode("rg."), u"org.")
self.assertEqual(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEqual(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEqual(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEqual(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as a dotless "i"
oldlocale = locale.getlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertEqual(sorted(api), sorted(codecs.__all__))
for api in codecs.__all__:
getattr(codecs, api)
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae')
f = StringIO.StringIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write('\xc3\xbc')
self.assertEqual(f.getvalue(), '\xfc')
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = codecs.encode("\x80", "base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
def test_readline(self):
sin = codecs.encode("\x80", "base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
all_unicode_encodings = [
"ascii",
"base64_codec",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hex_codec",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams[:]
if sys.flags.py3k_warning:
broken_unicode_with_streams.append("rot_13")
# The following encodings only support "strict" mode
only_strict_mode = [
"idna",
"zlib_codec",
"bz2_codec",
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(
codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = u"".join(decoder.decode(c)
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@test_support.cpython_only
def test_basics_capi(self):
from _testcapi import codec_incrementalencoder, codec_incrementaldecoder
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
cencoder = codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
cencoder = codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(cencoder.encode(c) for c in s)
cdecoder = codec_incrementaldecoder(encoding, "ignore")
decodedresult = u"".join(cdecoder.decode(c)
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", u"ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict", u"ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u'c'}),
(u"abc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'Aa', 1: u'Bb', 2: u'Cc'}),
(u"AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'\U0010FFFF', 1: u'b', 2: u'c'}),
(u"\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u''}),
(u"ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: u'a', 1: u'b', 2: u'\ufffe'}
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b'}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b', 2: None}),
(u"ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: u'a', 1: u'b', 2: u'\ufffe'}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b'}),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b', 2: None}),
(u"ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: u'a', 1: u'b', 2: u'\ufffe'}),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
(u"", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord(u'a')
b = ord(u'b')
c = ord(u'c')
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
(u"abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
(u"\U0010FFFFbc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: 0x110000, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, "\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: a, 1: b}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: a, 1: b}),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
(u"ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), "\xfc")
def test_streamreaderwriter(self):
f = StringIO.StringIO("\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), u"\xfc")
class UnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(u""), ("", 0))
self.assertEqual(codecs.unicode_escape_decode(""), (u"", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != ord('\\'):
self.assertEqual(encode(unichr(b)), (chr(b), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != ord('\\'):
self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check(u'\t', r'\t')
check(u'\n', r'\n')
check(u'\r', r'\r')
check(u'\\', r'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(unichr(b), '\\x%02x' % b)
for b in range(127, 256):
check(unichr(b), '\\x%02x' % b)
check(u'\u20ac', r'\u20ac')
check(u'\U0001d120', r'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check("[\\\n]", u"[]")
check(r'[\"]', u'["]')
check(r"[\']", u"[']")
check(r"[\\]", ur"[\]")
check(r"[\a]", u"[\x07]")
check(r"[\b]", u"[\x08]")
check(r"[\t]", u"[\x09]")
check(r"[\n]", u"[\x0a]")
check(r"[\v]", u"[\x0b]")
check(r"[\f]", u"[\x0c]")
check(r"[\r]", u"[\x0d]")
check(r"[\7]", u"[\x07]")
check(r"[\8]", ur"[\8]")
check(r"[\78]", u"[\x078]")
check(r"[\41]", u"[!]")
check(r"[\418]", u"[!8]")
check(r"[\101]", u"[A]")
check(r"[\1010]", u"[A0]")
check(r"[\x41]", u"[A]")
check(r"[\x410]", u"[A0]")
check(r"\u20ac", u"\u20ac")
check(r"\U0001d120", u"\U0001d120")
for b in range(256):
if chr(b) not in '\n"\'\\abtnvfr01234567xuUN':
check('\\' + chr(b), u'\\' + unichr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in ('x', 2), ('u', 4), ('U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
"\\" + c + "0"*i)
self.assertRaises(UnicodeDecodeError, decode,
"[\\" + c + "0"*i + "]")
data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
self.assertEqual(decode(data, "replace"),
(u"[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
class RawUnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(u""), ("", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(""), (u"", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(unichr(b)), (chr(b), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if chr(b) not in 'uU':
check(u'\\' + unichr(b), '\\' + chr(b))
check(u'\u20ac', r'\u20ac')
check(u'\U0001d120', r'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if chr(b) not in 'uU':
check('\\' + chr(b), u'\\' + unichr(b))
check(r"\u20ac", u"\u20ac")
check(r"\U0001d120", u"\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in ('u', 4), ('U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
"\\" + c + "0"*i)
self.assertRaises(UnicodeDecodeError, decode,
"[\\" + c + "0"*i + "]")
data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
self.assertEqual(decode(data, "replace"),
(u"[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(test_support.unlink, test_support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
class TransformCodecTest(unittest.TestCase):
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, "", "uu-codec")
def test_main():
test_support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest,
WithStmtTest,
UnicodeEscapeTest,
RawUnicodeEscapeTest,
BomTest,
TransformCodecTest,
)
if __name__ == "__main__":
test_main()
| HiSPARC/station-software | user/python/Lib/test/test_codecs.py | Python | gpl-3.0 | 81,185 | [
"FEFF"
] | aba645de64c417b14f50dff964cf5ae21f8bee0e7346df71cbb8a7f96fa31c5e |
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
import numpy
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name, ' ---------------\n'
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
columns_to_delete = ['sku','lead_time','perf_6_month_avg','perf_12_month_avg']
target_column = 'went_on_backorder'
train_frame = pd.read_csv(filename)
train_frame.drop(labels=columns_to_delete,axis=1,inplace=True)
class_labels = list(train_frame[target_column].values)
del train_frame[target_column]
train_frame = label_encode_frame(train_frame)
X_train,X_test,y_train,y_test = train_test_split(train_frame.values,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
| rupakc/Kaggle-Compendium | can-you-predict-product-backorders/backorder-baseline.py | Python | mit | 3,036 | [
"Gaussian"
] | b115e68ab3e401cf516ba523e6748ef8e1ebeb045c76c42c246aaab8d473158b |
# Import of the relevant tools
import time
import numpy as np
import theano
import theano.tensor as T
from theano import pp, config
from plotly.tools import FigureFactory as FF
import plotly.graph_objs as go
from ..io.read_vtk import ReadVTK
from ..data_attachment.measures import Measures
from ..data_attachment.varifolds import Varifolds
from ..math_utils.kernels import _squared_distances, _gaussian_kernel
from .theano_hamiltoniancarrier import TheanoHamiltonianCarrier
from .shapes_manifold import ShapesManifold
class TheanoShapesCarrier(ShapesManifold, TheanoHamiltonianCarrier) :
"""
Combines the control points framework with the data attachment + io methods
of the ShapesManifold class.
"""
def __init__(self, S0,
kernel = ('gaussian', 1),
data_attachment = ('measure-kernel', ('gaussian', 1)),
weights = (0.01, 1), # gamma_V, gamma_W
dt = 0.1,
compatibility_compile = False,
plot_interactive = False,
plot_file = True,
foldername = 'results/'
) :
"""
Creates a TheanoCurves/Surfaces manifold.
Compilation takes place here.
"""
TheanoHamiltonianCarrier.__init__(self, kernel = kernel,
weights = weights,
dt = dt,
plot_interactive = plot_interactive,
plot_file = plot_file,
foldername = foldername)
ShapesManifold.__init__(self, S0,
data_attachment)
#===============================================================
# Before compiling, we assign types to the teano variables
q0 = T.matrix('q0')
p0 = T.matrix('p0')
s0 = T.matrix('s0')
xt_x = T.matrix('xt_x')
xt_mu = T.vector('xt_mu')
xt_n = T.matrix('xt_n')
# Compilation. Depending on settings specified in the ~/.theanorc file or explicitely given
# at execution time, this will produce CPU or GPU code.
if not compatibility_compile : # With theano, it's better to let the compilator handle the whole forward-backward pipeline
print('Compiling the shooting_cost routine...')
time1 = time.time()
if self.embedding_type == 'measure' :
self.opt_shooting_cost = theano.function([q0, p0, s0, xt_x, xt_mu], # input
self._opt_shooting_cost(q0, p0, s0, xt_x, xt_mu), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
elif self.embedding_type == 'varifold' :
self.opt_shooting_cost = theano.function([q0, p0, s0, xt_x, xt_mu, xt_n], # input
self._opt_shooting_cost(q0, p0, s0, xt_x, xt_mu, xt_n), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
time2 = time.time()
print('Compiled in : ', '{0:.2f}'.format(time2 - time1), 's')
# The hamiltonian_trajectory routine, that shall be used in the visualization
print('Compiling the hamiltonian_trajectory visualization routine...')
time1 = time.time()
self.hamiltonian_trajectory = theano.function([q0,p0,s0], # input
self._HamiltonianTrajectoryCarrying(q0, p0, s0), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
time2 = time.time()
print('Compiled in : ', '{0:.2f}'.format(time2 - time1), 's')
| jeanfeydy/lddmm-ot | LDDMM_Python/lddmm_python/modules/manifolds/theano_shapescarrier.py | Python | mit | 3,957 | [
"Gaussian"
] | 687b1970b1a0e53a4d1ad91ecd67ac458996a480d8bbe99f1af8ae309ada2c89 |
import cv2
import scipy
from scipy import signal
import pylab
import math
import numpy as np
from dynamicThreshold import OtsuThresholdMethod
class SimpleEdgeDetect:
def smooth_image(self, im):
gaussian = [2, 4, 5, 4, 2,
4, 9, 12, 9, 4,
5, 12, 15, 12, 5,
2, 4, 5, 4, 2,
4, 9, 12, 9, 4]
gaussian = 1.0 / sum(gaussian) * np.reshape(gaussian, (5,5))
return scipy.signal.convolve(im, gaussian, mode='same')
def find_edges(self, im):
smoothed = self.smooth_image(im)
edges = im - smoothed
return edges
class CannyEdgeDetect:
def _apply_filter(self, im, y, x):
y = np.reshape(y, (3,3))
x = np.reshape(x, (3,3))
Gy = scipy.signal.convolve(im, y)
Gx = scipy.signal.convolve(im, x)
return Gy, Gx
def scharr_filter(self, im):
# this filter was really crappy
y = [-3, -10, -3,
0, 0, 0,
+3, +10, +3]
x = [ -3, 0, +3
-10, 0, +10,
-3, 0, +3 ]
return self._apply_filter(im, y, x)
# because of how gradients are calculated, a gradient in the x direction = a vertical line.
def sobel_filter(self, im):
y = [-1, -2, -1,
0, 0, 0,
+1, +2, +1]
x = [-1, 0, +1,
-2, 0, +2,
-1, 0, +1]
return self._apply_filter(im, y, x)
def get_gradient_magnitude_and_angle(self, im):
gy, gx = self.sobel_filter(im)
mag = self.get_magnitude(gy, gx)
phi = self.get_angle(gy, gx)
return mag, phi
def get_magnitude(self, gy, gx):
""" calculate gradient magnitude from Gx and Gy, the gradients in x and y, respectively """
return np.hypot(gy, gx) # == np.sqrt(sobelX**2 + sobelY**2)
def get_angle(self, gy, gx):
""" calculate gradient angle. For each pixel determine direction of gradient in radians. 0 - 2pi """
phi = np.arctan2(gy, gx)
phi += 2 * math.pi # because all of phi is negative for some reason
phi %= (2 * math.pi) # ensure that angle values are only between 0 and 2pi
return phi
def get_4_thinned_bidirectional_edges(self, mag, phi):
""" only keep pixel if is strongest of nieghbors that point in the same direction.
1 . compare to direction. There are 4 directions. Horizontal, Vertical, And DiagB \ and DiagF /
2. compare to neighbors. Keep pixels that are stronger than both neighbors
"""
shape = mag.shape
higher, lower = np.zeros(shape), np.zeros(shape)
toLeft, toRight = np.zeros(shape), np.zeros(shape)
downLeft, upRight = np.zeros(shape), np.zeros(shape)
upLeft, downRight = np.zeros(shape), np.zeros(shape)
# ------ vertical ------- #
higher[:-1, :] = mag[1:, :] # shift rows up
lower[1:, :] = mag[:-1, :] # shift rows down
# ------ horizontal ------- #
toLeft[:, :-1] = mag[:, 1:] # shift rows left
toRight[:, 1:] = mag[:, :-1] # shift rows right
# ------ diagForward ------- # /
downLeft[1:, :-1] = mag[:-1, 1:]
upRight[:-1, 1:] = mag[1:, :-1]
# ------ diagBackward ------- # \
downRight[1:, 1:] = mag[:-1, :-1]
upLeft[:-1, :-1] = mag[1:, 1:]
# -------------------------------
diagFphi, diagBphi, horizPhi, vertPhi = self.get_4_bidirectional_matrices(phi)
thinVert = vertPhi & (mag > higher) & (mag >= lower)
thinHoriz = horizPhi & (mag > toLeft) & (mag >= toRight)
thinDiagF = diagFphi & (mag > downRight) & (mag >= upLeft) # why is the diagonal logic switched?
thinDiagB = diagBphi & (mag > downLeft) & (mag >= upRight)
return [thinDiagF, thinDiagB, thinHoriz, thinVert]
def get_4_bidirectional_matrices(self, phi):
"""determine which of the bidirectional groups to which a pixel belongs.
note that I use the rare & , | symbols, which do boolean logic element-wise (bitwise)
"""
phi = phi % math.pi # take advantage of symmetry. You only need to analyze 0-pi
pi = math.pi
diagForward = (phi > 2 * pi / 16) & (phi < 6 * pi / 16) # /
diagBackward = (phi > 10 * pi / 16) & (phi < 14 * pi / 16) # \
horizontal = (phi <= 2 * pi / 16) | (phi >= 14 * pi / 16) # _ horizontal is only one using the | operator because it's
# got two relevant portions
vertical= (phi >= 6 * pi / 16) & (phi <= 10 * pi / 16) # |
return [diagForward, diagBackward, horizontal, vertical]
def get_2d_gaussian_filter(self, k):
horizontalG = scipy.signal.general_gaussian(k, 1, 0.8)
verticalG = np.reshape(horizontalG, (k, 1))
gaussian2d = horizontalG * verticalG
normalized = gaussian2d / gaussian2d.sum() # so the net sum will equal 1
return normalized
def smooth_image(self, im):
gaussian = [2, 4, 5, 4, 2,
4, 9, 12, 9, 4,
5, 12, 15, 12, 5,
2, 4, 5, 4, 2,
4, 9, 12, 9, 4]
gaussian = 1.0 / sum(gaussian) * np.reshape(gaussian, (5,5))
return scipy.signal.convolve(im, gaussian, mode='same')
def normalize_magnitude(self, mag):
""" scales magnitude matrix back to 0 - 255 values """
offset = mag - mag.min() # offset mag so that minimum value is always 0
if offset.dtype == np.uint8:
raise
normalized = offset * 255 / offset.max() # now.. if this image isn't float, you're screwed
return offset * 255 / offset.max()
def get_combined_thinned_image(self, mag, phi):
thinDiagF, thinDiagB, thinVert, thinHoriz = self.get_4_thinned_bidirectional_edges(mag, phi)
normalMag = self.normalize_magnitude(mag)
thinNormalMag = np.array(normalMag * (thinDiagF + thinDiagB + thinVert + thinHoriz), dtype=np.uint8) # convert to uint8 image format.
return thinNormalMag
def edge_tracking(self, weak, strong):
""" hysteresis edge tracking: keeps weak pixels that are direct neighbors to strong pixels. Improves line detection.
:param weak: an image thresholded by the lower threshold, such that it includes all weak and strong pixels
:param strong: an image thresholded by the higher threshold, such that it includes only strong pixels
"""
weakOnly = weak - strong
blurKernel = np.ones((3,3)) / 9
strongSmeared = scipy.signal.convolve(strong, blurKernel, mode='same') > 0
strongWithWeakNeighbors = weak & strongSmeared # this is your normal result. trying for more will be expensive
return strongWithWeakNeighbors
weakNeighbors = strongWithWeakNeighbors ^ strong #exclusive or
# now here's where we track along the current valid pixel
h, w = weak.shape[:2]
pts = np.transpose(np.nonzero(weakNeighbors)) # coordinates of front of lines to begin tracking: y, x
frontier = set([(y, x) for y, x in pts])
frontierWave = set() # searching in waves makes our search a breadth-first search
explored = list()
directions = [-1, 0, 1]
jitter = [] # jitter = moving around center
for dy in directions:
for dx in directions:
if dx == dy == 0: # don't add center point
continue
jitter.append((dy, dx))
depth = 0
while frontier or frontierWave:
if not frontier: # frontier is exhausted
frontier = frontierWave
frontierWave = set() # start next wave
depth += 1
if depth >= min(h, w) / 2: # if our line is bigger than a dimension in our image, then it's probably a runaway.
print('depth', depth)
break
fy, fx = frontier.pop()
explored.append((fy, fx))
# explore around point, add neighbor
for dy, dx in jitter:
y, x = dy + fy, dx + fx
if y == h or y == -1 or x == w or x == -1: # skip pixels outside boundary
continue
if weak[y, x] and (y, x) not in explored and (y, x) not in frontier: # found an unexplored, connected weak pixel
frontierWave.add((y, x)) # nothing will change if this point already existed in frontier
# now we've explored all the connected-to-strong lines. Next up, mark them on strongWithWeakNeighbors
print(len(explored))
ys = [y for y, x in explored]
xs = [x for y, x in explored]
strongWithWeakNeighbors[ys, xs] = True
# keep nearby weak pixels
return strongWithWeakNeighbors # keeps all the original strong
def double_threshold(self, im):
""" obtain two thresholds for determining weak and strong pixels. return two images, weak and strong,
where strong contains only strong pixels, and weak contains both weak and strong
"""
otsu = OtsuThresholdMethod(im, 4) # speedup of 4 keeps things pretty accurate but much faster
_, lowThresh, highThresh, tooHigh = otsu.calculate_n_thresholds(4)
weakLines = im > lowThresh
strongLines = im > highThresh
return weakLines, strongLines
def find_edges(self, im):
""" returns boolean array represting lines. to convert to image just use edges * 255 """
if im.ndim > 2 and im.shape[-1] > 1: # aka if we have a full color picture
im = im[:, :, 0] # sorry, we can only deal with one channel. I hope you loaded it as greyscale!
smoothed = self.smooth_image(im)
mag, phi = self.get_gradient_magnitude_and_angle(smoothed)
thinNormalMag = self.get_combined_thinned_image(mag, phi)
weak, strong = self.double_threshold(thinNormalMag)
cannyEdges = self.edge_tracking(weak, strong)
return cannyEdges, weak * 255
if __name__ == '__main__':
import os
canny = CannyEdgeDetect()
cwd = os.getcwd()
inputDir ='images/input'
outputDir = 'images/output'
os.chdir(inputDir)
images = os.listdir('.')
os.chdir(cwd)
os.chdir(outputDir)
for f in images:
if '.jpg' not in f:
continue
print('reading', f)
filepath = os.path.join(cwd, inputDir, f)
im4canny = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
edgesFinal, uncanny = canny.find_edges(im4canny)
cv2.imwrite(f.replace('.jpg', '_strong_only.jpg'), edgesFinal * 255)
cv2.imwrite(f.replace('.jpg', '_weak_included.jpg'), uncanny)
| lancekindle/canny-edge | Cartoonizer.py | Python | gpl-3.0 | 10,855 | [
"Gaussian"
] | bde17a94ed06dafac86befd1e29584357d7f82350f1f18c9134c37cae1b701ae |
#!/bin/env python
""" create and put 'ReplicateAndRegister' request """
__RCSID__ = "$Id: $"
import os
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] requestName LFNs targetSE1 [targetSE2 ...]' % Script.scriptName,
'Arguments:',
' requestName: a request name',
' LFNs: single LFN or file with LFNs',
' targetSE: target SE' ] ) )
def getLFNList( arg ):
""" get list of LFNs """
lfnList = []
if os.path.exists( arg ):
lfnList = [line.split()[0] for line in open( arg ).read().splitlines()]
else:
lfnList = [ arg ]
return list( set ( lfnList ) )
# # execution
if __name__ == "__main__":
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
from DIRAC import gLogger
args = Script.getPositionalArgs()
requestName = None
targetSEs = None
if len( args ) < 3:
Script.showHelp()
DIRAC.exit( 1 )
requestName = args[0]
lfnList = getLFNList( args[1] )
targetSEs = list( set( [ se for targetSE in args[2:] for se in targetSE.split( ',' ) ] ) )
gLogger.info( "Will create request '%s' with 'ReplicateAndRegister' "\
"operation using %s lfns and %s target SEs" % ( requestName, len( lfnList ), len( targetSEs ) ) )
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import breakListIntoChunks
lfnChunks = breakListIntoChunks( lfnList, 100 )
multiRequests = len( lfnChunks ) > 1
error = 0
count = 0
reqClient = ReqClient()
fc = FileCatalog()
for lfnChunk in lfnChunks:
metaDatas = fc.getFileMetadata( lfnChunk )
if not metaDatas["OK"]:
gLogger.error( "unable to read metadata for lfns: %s" % metaDatas["Message"] )
error = -1
continue
metaDatas = metaDatas["Value"]
for failedLFN, reason in metaDatas["Failed"].items():
gLogger.error( "skipping %s: %s" % ( failedLFN, reason ) )
lfnChunk = set( metaDatas["Successful"] )
if not lfnChunk:
gLogger.error( "LFN list is empty!!!" )
error = -1
continue
if len( lfnChunk ) > Operation.MAX_FILES:
gLogger.error( "too many LFNs, max number of files per operation is %s" % Operation.MAX_FILES )
error = -1
continue
count += 1
request = Request()
request.RequestName = requestName if not multiRequests else '%s_%d' % ( requestName, count )
replicateAndRegister = Operation()
replicateAndRegister.Type = "ReplicateAndRegister"
replicateAndRegister.TargetSE = ",".join( targetSEs )
for lfn in lfnChunk:
metaDict = metaDatas["Successful"][lfn]
opFile = File()
opFile.LFN = lfn
opFile.Size = metaDict["Size"]
if "Checksum" in metaDict:
# # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD'
opFile.Checksum = metaDict["Checksum"]
opFile.ChecksumType = "ADLER32"
replicateAndRegister.addFile( opFile )
request.addOperation( replicateAndRegister )
putRequest = reqClient.putRequest( request )
if not putRequest["OK"]:
gLogger.error( "unable to put request '%s': %s" % ( request.RequestName, putRequest["Message"] ) )
error = -1
continue
if not multiRequests:
gLogger.always( "Request '%s' has been put to ReqDB for execution." % request.RequestName )
if multiRequests:
gLogger.always( "%d requests have been put to ReqDB for execution, with name %s_<num>" % ( count, requestName ) )
gLogger.always( "You can monitor requests' status using command: 'dirac-rms-show-request <requestName>'" )
DIRAC.exit( error )
| vmendez/DIRAC | DataManagementSystem/scripts/dirac-dms-replicate-and-register-request.py | Python | gpl-3.0 | 4,138 | [
"DIRAC"
] | 943f6b3322be9cb46d739ebbc7b2724bbaa5196e7238f515be6ffc04a609d7fe |
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import os
import json
import numpy as np
import shutil
from monty.tempfile import ScratchDir
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import StructureEnvironments
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import LightStructureEnvironments
from pymatgen.core.periodic_table import Specie
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import MultiWeightsChemenvStrategy
se_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "structure_environments_files")
class StructureEnvironmentsTest(PymatgenTest):
def test_structure_environments(self):
with ScratchDir("."):
f = open("{}/{}".format(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
isite = 6
csm_and_maps_fig, csm_and_maps_subplot = se.get_csm_and_maps(isite=isite)
np.testing.assert_array_almost_equal(csm_and_maps_subplot.lines[0].get_xydata().flatten(), [0.0, 0.53499332])
np.testing.assert_array_almost_equal(csm_and_maps_subplot.lines[1].get_xydata().flatten(), [1.0, 0.47026441])
np.testing.assert_array_almost_equal(csm_and_maps_subplot.lines[2].get_xydata().flatten(), [2.0, 0.00988778])
environments_figure, environments_subplot = se.get_environments_figure(isite=isite)
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[0].get_xy()),
[[1., 1.],
[1., 0.99301365],
[1.00179228, 0.99301365],
[1.00179228, 1.],
[1., 1.]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[1].get_xy()),
[[1., 0.99301365],
[1., 0.],
[1.00179228, 0.],
[1.00179228, 0.99301365],
[1., 0.99301365]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[2].get_xy()),
[[1.00179228, 1.],
[1.00179228, 0.99301365],
[2.25, 0.99301365],
[2.25, 1.],
[1.00179228, 1.]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[3].get_xy()),
[[1.00179228, 0.99301365],
[1.00179228, 0.],
[2.22376156, 0.],
[2.22376156, 0.0060837],
[2.25, 0.0060837],
[2.25, 0.99301365],
[1.00179228, 0.99301365]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[4].get_xy()),
[[2.22376156, 0.0060837],
[2.22376156, 0.],
[2.25, 0.],
[2.25, 0.0060837],
[2.22376156, 0.0060837]])
se.save_environments_figure(isite=isite, imagename='image.png')
self.assertTrue(os.path.exists('image.png'))
self.assertEqual(len(se.differences_wrt(se)), 0)
self.assertFalse(se.__ne__(se))
ce = se.ce_list[isite][4][0]
self.assertTrue(ce.__len__(), 4)
symbol, mingeom = ce.minimum_geometry(symmetry_measure_type='csm_wocs_ctwocc')
self.assertEqual(symbol, 'T:4')
self.assertAlmostEqual(mingeom['symmetry_measure'], 0.00988778424054)
np.testing.assert_array_almost_equal(mingeom['other_symmetry_measures']['rotation_matrix_wcs_csc'],
[[-0.8433079817973094, -0.19705747216466898, 0.5000000005010193],
[0.4868840909509757, 0.11377118475194581, 0.8660254034951744],
[-0.22754236927612112, 0.9737681809261427, 1.3979531202869064e-13]])
self.assertEqual(mingeom['detailed_voronoi_index'], {'index': 0, 'cn': 4})
self.assertAlmostEqual(mingeom['other_symmetry_measures']['scaling_factor_wocs_ctwocc'], 1.6270605877934026)
ce_string = ce.__str__()
self.assertTrue('csm1 (with central site) : 0.00988' in ce_string)
self.assertTrue('csm2 (without central site) : 0.00981' in ce_string)
self.assertTrue('csm1 (with central site) : 12.987' in ce_string)
self.assertTrue('csm2 (without central site) : 11.827' in ce_string)
self.assertTrue('csm1 (with central site) : 32.466' in ce_string)
self.assertTrue('csm2 (without central site) : 32.466' in ce_string)
self.assertTrue('csm1 (with central site) : 34.644' in ce_string)
self.assertTrue('csm2 (without central site) : 32.466' in ce_string)
mingeoms = ce.minimum_geometries(symmetry_measure_type='csm_wocs_ctwocc', max_csm=12.0)
self.assertEqual(len(mingeoms), 2)
mingeoms = ce.minimum_geometries(symmetry_measure_type='csm_wocs_ctwcc', max_csm=12.0)
self.assertEqual(len(mingeoms), 1)
mingeoms = ce.minimum_geometries(n=3)
self.assertEqual(len(mingeoms), 3)
ce2 = se.ce_list[7][4][0]
self.assertTrue(ce.is_close_to(ce2, rtol=0.01, atol=1e-4))
self.assertFalse(ce.is_close_to(ce2, rtol=0.0, atol=1e-8))
self.assertFalse(ce.__eq__(ce2))
self.assertTrue(ce.__ne__(ce2))
def test_light_structure_environments(self):
with ScratchDir("."):
f = open("{}/{}".format(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
strategy = SimplestChemenvStrategy()
lse = LightStructureEnvironments.from_structure_environments(structure_environments=se, strategy=strategy,
valences='undefined')
isite = 6
nb_set = lse.neighbors_sets[isite][0]
neighb_coords = [np.array([0.2443798, 1.80409653, -1.13218359]),
np.array([1.44020353, 1.11368738, 1.13218359]),
np.array([2.75513098, 2.54465207, -0.70467298]),
np.array([0.82616785, 3.65833945, 0.70467298])]
neighb_indices = [0, 3, 5, 1]
neighb_images = [[0, 0, -1], [0, 0, 0], [0, 0, -1], [0, 0, 0]]
np.testing.assert_array_almost_equal(neighb_coords, nb_set.neighb_coords)
np.testing.assert_array_almost_equal(neighb_coords, [s.coords for s in nb_set.neighb_sites])
nb_sai = nb_set.neighb_sites_and_indices
np.testing.assert_array_almost_equal(neighb_coords, [sai['site'].coords for sai in nb_sai])
np.testing.assert_array_almost_equal(neighb_indices, [sai['index'] for sai in nb_sai])
nb_iai = nb_set.neighb_indices_and_images
np.testing.assert_array_almost_equal(neighb_indices, [iai['index'] for iai in nb_iai])
np.testing.assert_array_equal(neighb_images, [iai['image_cell'] for iai in nb_iai])
self.assertEqual(nb_set.__len__(), 4)
self.assertEqual(nb_set.__hash__(), 4)
self.assertFalse(nb_set.__ne__(nb_set))
self.assertEqual(nb_set.__str__(), 'Neighbors Set for site #6 :\n'
' - Coordination number : 4\n'
' - Neighbors sites indices : 0, 1, 2, 3\n')
stats = lse.get_statistics()
neighbors = lse.strategy.get_site_neighbors(site=lse.structure[isite])
self.assertArrayAlmostEqual(neighbors[0].coords, np.array([ 0.2443798, 1.80409653, -1.13218359]))
self.assertArrayAlmostEqual(neighbors[1].coords, np.array([ 1.44020353, 1.11368738, 1.13218359]))
self.assertArrayAlmostEqual(neighbors[2].coords, np.array([ 2.75513098, 2.54465207, -0.70467298]))
self.assertArrayAlmostEqual(neighbors[3].coords, np.array([ 0.82616785, 3.65833945, 0.70467298]))
equiv_site_index_and_transform = lse.strategy.equivalent_site_index_and_transform(neighbors[0])
self.assertEqual(equiv_site_index_and_transform[0], 0)
self.assertArrayAlmostEqual(equiv_site_index_and_transform[1], [0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(equiv_site_index_and_transform[2], [0.0, 0.0, -1.0])
equiv_site_index_and_transform = lse.strategy.equivalent_site_index_and_transform(neighbors[1])
self.assertEqual(equiv_site_index_and_transform[0], 3)
self.assertArrayAlmostEqual(equiv_site_index_and_transform[1], [0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(equiv_site_index_and_transform[2], [0.0, 0.0, 0.0])
self.assertEqual(stats['atom_coordination_environments_present'], {'Si': {'T:4': 3.0}})
self.assertEqual(stats['coordination_environments_atom_present'], {'T:4': {'Si': 3.0}})
self.assertEqual(stats['fraction_atom_coordination_environments_present'], {'Si': {'T:4': 1.0}})
site_info_ce = lse.get_site_info_for_specie_ce(specie=Specie('Si', 4), ce_symbol='T:4')
np.testing.assert_array_almost_equal(site_info_ce['fractions'], [1.0, 1.0, 1.0])
np.testing.assert_array_almost_equal(site_info_ce['csms'],
[0.009887784240541068, 0.009887786546730826, 0.009887787384385317])
self.assertEqual(site_info_ce['isites'], [6, 7, 8])
site_info_allces = lse.get_site_info_for_specie_allces(specie=Specie('Si', 4))
self.assertEqual(site_info_allces['T:4'], site_info_ce)
self.assertFalse(lse.contains_only_one_anion('I-'))
self.assertFalse(lse.contains_only_one_anion_atom('I'))
self.assertTrue(lse.site_contains_environment(isite=isite, ce_symbol='T:4'))
self.assertFalse(lse.site_contains_environment(isite=isite, ce_symbol='S:4'))
self.assertFalse(lse.structure_contains_atom_environment(atom_symbol='Si', ce_symbol='S:4'))
self.assertTrue(lse.structure_contains_atom_environment(atom_symbol='Si', ce_symbol='T:4'))
self.assertFalse(lse.structure_contains_atom_environment(atom_symbol='O', ce_symbol='T:4'))
self.assertTrue(lse.uniquely_determines_coordination_environments)
self.assertFalse(lse.__ne__(lse))
envs = lse.strategy.get_site_coordination_environments(lse.structure[6])
self.assertEqual(len(envs), 1)
self.assertEqual(envs[0][0], 'T:4')
multi_strategy = MultiWeightsChemenvStrategy.stats_article_weights_parameters()
lse_multi = LightStructureEnvironments.from_structure_environments(strategy=multi_strategy,
structure_environments=se,
valences='undefined')
self.assertAlmostEqual(lse_multi.coordination_environments[isite][0]['csm'], 0.009887784240541068)
self.assertAlmostEqual(lse_multi.coordination_environments[isite][0]['ce_fraction'], 1.0)
self.assertEqual(lse_multi.coordination_environments[isite][0]['ce_symbol'], 'T:4')
if __name__ == "__main__":
unittest.main() | dongsenfo/pymatgen | pymatgen/analysis/chemenv/coordination_environments/tests/test_structure_environments.py | Python | mit | 12,696 | [
"pymatgen"
] | dbbfff4d8e02273d26a10f3a2b30f6347db19defcff3dab0ebb2c4004d442dd1 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Indicator.last_generated_at'
db.add_column('profiles_indicator', 'last_generated_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Indicator.last_generated_at'
db.delete_column('profiles_indicator', 'last_generated_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.IndicatorPart']"})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| 216software/Profiles | communityprofiles/profiles/oldmigrations/0046_auto__add_field_indicator_last_generated_at.py | Python | mit | 15,298 | [
"MOE"
] | 95b001b74823db562ec58ccc60c2cad84bc9e900bc9f5252f9dc093e67086205 |
"""All models (or business logic) for the web app should be here.
This including wrappers around talking with the database.
"""
from .customerfamily import CustomerFamily # noqa
from .dependent import Dependent # noqa
from .shopping_category import ShoppingCategory # noqa
from .shopping_item import ShoppingItem # noqa
from .visit import Visit # noqa
from .volunteervisit import VolunteerVisit # noqa
from .relationship import Relationship # noqa
| jlutz777/FreeStore | models/__init__.py | Python | mit | 458 | [
"VisIt"
] | b69132ea3363b97452a101591df3ae1b34494296e3ce3c911bcebdf1293e271d |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""SPM wrappers for preprocessing data
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from builtins import range
__docformat__ = 'restructuredtext'
# Standard library imports
from copy import deepcopy
import os
# Third-party imports
import numpy as np
# Local imports
from ..base import (OutputMultiPath, TraitedSpec, isdefined,
traits, InputMultiPath, File)
from .base import (SPMCommand, scans_for_fname,
func_is_3d, Info,
scans_for_fnames, SPMCommandInputSpec)
from ...utils.filemanip import (fname_presuffix, filename_to_list,
list_to_filename, split_filename)
class SliceTimingInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='scans',
desc='list of filenames to apply slice timing',
mandatory=True, copyfile=False)
num_slices = traits.Int(field='nslices',
desc='number of slices in a volume',
mandatory=True)
time_repetition = traits.Float(field='tr',
desc=('time between volume acquisitions'
'(start to start time)'),
mandatory=True)
time_acquisition = traits.Float(field='ta',
desc=('time of volume acquisition. usually'
'calculated as TR-(TR/num_slices)'),
mandatory=True)
slice_order = traits.List(traits.Int(), field='so',
desc='1-based order in which slices are acquired',
mandatory=True)
ref_slice = traits.Int(field='refslice',
desc='1-based Number of the reference slice',
mandatory=True)
out_prefix = traits.String('a', field='prefix', usedefault=True,
desc='slicetimed output prefix')
class SliceTimingOutputSpec(TraitedSpec):
timecorrected_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='slice time corrected files')
class SliceTiming(SPMCommand):
"""Use spm to perform slice timing correction.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19
Examples
--------
>>> from nipype.interfaces.spm import SliceTiming
>>> st = SliceTiming()
>>> st.inputs.in_files = 'functional.nii'
>>> st.inputs.num_slices = 32
>>> st.inputs.time_repetition = 6.0
>>> st.inputs.time_acquisition = 6. - 6./32.
>>> st.inputs.slice_order = list(range(32,0,-1))
>>> st.inputs.ref_slice = 1
>>> st.run() # doctest: +SKIP
"""
input_spec = SliceTimingInputSpec
output_spec = SliceTimingOutputSpec
_jobtype = 'temporal'
_jobname = 'st'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
return scans_for_fnames(filename_to_list(val),
keep4d=False,
separate_sessions=True)
return super(SliceTiming, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['timecorrected_files'] = []
filelist = filename_to_list(self.inputs.in_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=self.inputs.out_prefix) for in_f in f]
else:
run = fname_presuffix(f, prefix=self.inputs.out_prefix)
outputs['timecorrected_files'].append(run)
return outputs
class RealignInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)), field='data',
mandatory=True, copyfile=True,
desc='list of filenames to realign')
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
quality = traits.Range(low=0.0, high=1.0, field='eoptions.quality',
desc='0.1 = fast, 1.0 = precise')
fwhm = traits.Range(low=0.0, field='eoptions.fwhm',
desc='gaussian smoothing kernel width')
separation = traits.Range(low=0.0, field='eoptions.sep',
desc='sampling separation in mm')
register_to_mean = traits.Bool(field='eoptions.rtm',
desc='Indicate whether realignment is done to the mean image')
weight_img = File(exists=True, field='eoptions.weight',
desc='filename of weighting image')
interp = traits.Range(low=0, high=7, field='eoptions.interp',
desc='degree of b-spline used for interpolation')
wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='eoptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_which = traits.ListInt([2, 1], field='roptions.which',
minlen=2, maxlen=2, usedefault=True,
desc='determines which images to reslice')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='realigned output prefix')
class RealignOutputSpec(TraitedSpec):
mean_image = File(exists=True, desc='Mean image file from the realignment')
modified_in_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='Copies of all files passed to in_files.\
Headers will have been modified to align all\
images with the first, or optionally to first\
do that, extract a mean image, and re-align to\
that mean image.')
realigned_files = OutputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
desc='If jobtype is write or estwrite, these will be the\
resliced files. Otherwise, they will be copies of\
in_files that have had their headers rewritten.')
realignment_parameters = OutputMultiPath(File(exists=True),
desc='Estimated translation and rotation parameters')
class Realign(SPMCommand):
"""Use spm_realign for estimating within modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> realign = spm.Realign()
>>> realign.inputs.in_files = 'functional.nii'
>>> realign.inputs.register_to_mean = True
>>> realign.run() # doctest: +SKIP
"""
input_spec = RealignInputSpec
output_spec = RealignOutputSpec
_jobtype = 'spatial'
_jobname = 'realign'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'in_files':
if self.inputs.jobtype == "write":
separate_sessions = False
else:
separate_sessions = True
return scans_for_fnames(val,
keep4d=False,
separate_sessions=separate_sessions)
return super(Realign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Realign, self)._parse_inputs()
return [{'%s' % (self.inputs.jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
resliced_all = self.inputs.write_which[0] > 0
resliced_mean = self.inputs.write_which[1] > 0
if self.inputs.jobtype != "write":
if isdefined(self.inputs.in_files):
outputs['realignment_parameters'] = []
for imgf in self.inputs.in_files:
if isinstance(imgf, list):
tmp_imgf = imgf[0]
else:
tmp_imgf = imgf
outputs['realignment_parameters'].append(fname_presuffix(tmp_imgf,
prefix='rp_',
suffix='.txt',
use_ext=False))
if not isinstance(imgf, list) and func_is_3d(imgf):
break
if self.inputs.jobtype == "estimate":
outputs['realigned_files'] = self.inputs.in_files
if self.inputs.jobtype == "estimate" or self.inputs.jobtype == "estwrite":
outputs['modified_in_files'] = self.inputs.in_files
if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isinstance(self.inputs.in_files[0], list):
first_image = self.inputs.in_files[0][0]
else:
first_image = self.inputs.in_files[0]
if resliced_mean:
outputs['mean_image'] = fname_presuffix(first_image, prefix='mean')
if resliced_all:
outputs['realigned_files'] = []
for idx, imgf in enumerate(filename_to_list(self.inputs.in_files)):
realigned_run = []
if isinstance(imgf, list):
for i, inner_imgf in enumerate(filename_to_list(imgf)):
newfile = fname_presuffix(inner_imgf,
prefix=self.inputs.out_prefix)
realigned_run.append(newfile)
else:
realigned_run = fname_presuffix(imgf,
prefix=self.inputs.out_prefix)
outputs['realigned_files'].append(realigned_run)
return outputs
class CoregisterInputSpec(SPMCommandInputSpec):
target = File(exists=True, field='ref', mandatory=True,
desc='reference file to register to', copyfile=False)
source = InputMultiPath(File(exists=True), field='source',
desc='file to register to target', copyfile=True,
mandatory=True)
jobtype = traits.Enum('estwrite', 'estimate', 'write',
desc='one of: estimate, write, estwrite',
usedefault=True)
apply_to_files = InputMultiPath(File(exists=True), field='other',
desc='files to apply transformation to',
copyfile=True)
cost_function = traits.Enum('mi', 'nmi', 'ecc', 'ncc',
field='eoptions.cost_fun',
desc="""cost function, one of: 'mi' - Mutual Information,
'nmi' - Normalised Mutual Information,
'ecc' - Entropy Correlation Coefficient,
'ncc' - Normalised Cross Correlation""")
fwhm = traits.List(traits.Float(), minlen=2, maxlen=2,
field='eoptions.fwhm',
desc='gaussian smoothing kernel width (mm)')
separation = traits.List(traits.Float(), field='eoptions.sep',
desc='sampling separation in mm')
tolerance = traits.List(traits.Float(), field='eoptions.tol',
desc='acceptable tolerance for each of 12 params')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), minlen=3, maxlen=3,
field='roptions.wrap',
desc='Check if interpolation should wrap in [x,y,z]')
write_mask = traits.Bool(field='roptions.mask',
desc='True/False mask output image')
out_prefix = traits.String('r', field='roptions.prefix', usedefault=True,
desc='coregistered output prefix')
class CoregisterOutputSpec(TraitedSpec):
coregistered_source = OutputMultiPath(File(exists=True),
desc='Coregistered source files')
coregistered_files = OutputMultiPath(File(exists=True),
desc='Coregistered other files')
class Coregister(SPMCommand):
"""Use spm_coreg for estimating cross-modality rigid body alignment
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> coreg = spm.Coregister()
>>> coreg.inputs.target = 'functional.nii'
>>> coreg.inputs.source = 'structural.nii'
>>> coreg.run() # doctest: +SKIP
"""
input_spec = CoregisterInputSpec
output_spec = CoregisterOutputSpec
_jobtype = 'spatial'
_jobname = 'coreg'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'target' or (opt == 'source' and self.inputs.jobtype != "write"):
return scans_for_fnames(filename_to_list(val),
keep4d=True)
if opt == 'apply_to_files':
return np.array(filename_to_list(val), dtype=object)
if opt == 'source' and self.inputs.jobtype == "write":
if isdefined(self.inputs.apply_to_files):
return scans_for_fnames(val + self.inputs.apply_to_files)
else:
return scans_for_fnames(val)
return super(Coregister, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm coregister options if set to None ignore
"""
if self.inputs.jobtype == "write":
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype', 'apply_to_files'))
else:
einputs = super(Coregister, self)._parse_inputs(skip=('jobtype'))
jobtype = self.inputs.jobtype
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = self.inputs.apply_to_files
outputs['coregistered_source'] = self.inputs.source
elif self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite":
if isdefined(self.inputs.apply_to_files):
outputs['coregistered_files'] = []
for imgf in filename_to_list(self.inputs.apply_to_files):
outputs['coregistered_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
outputs['coregistered_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['coregistered_source'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class NormalizeInputSpec(SPMCommandInputSpec):
template = File(exists=True, field='eoptions.template',
desc='template file to normalize to',
mandatory=True, xor=['parameter_file'],
copyfile=False)
source = InputMultiPath(File(exists=True), field='subj.source',
desc='file to normalize to template',
xor=['parameter_file'],
mandatory=True, copyfile=True)
jobtype = traits.Enum('estwrite', 'est', 'write', usedefault=True,
desc='Estimate, Write or do both')
apply_to_files = InputMultiPath(traits.Either(File(exists=True),
traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to',
copyfile=True)
parameter_file = File(field='subj.matname', mandatory=True,
xor=['source', 'template'],
desc='normalization parameter file*_sn.mat', copyfile=False)
source_weight = File(field='subj.wtsrc',
desc='name of weighting image for source', copyfile=False)
template_weight = File(field='eoptions.weight',
desc='name of weighting image for template', copyfile=False)
source_image_smoothing = traits.Float(field='eoptions.smosrc',
desc='source smoothing')
template_image_smoothing = traits.Float(field='eoptions.smoref',
desc='template smoothing')
affine_regularization_type = traits.Enum('mni', 'size', 'none',
field='eoptions.regtype',
desc='mni, size, none')
DCT_period_cutoff = traits.Float(field='eoptions.cutoff',
desc='Cutoff of for DCT bases')
nonlinear_iterations = traits.Int(field='eoptions.nits',
desc='Number of iterations of nonlinear warping')
nonlinear_regularization = traits.Float(field='eoptions.reg',
desc='the amount of the regularization for the nonlinear part of the normalization')
write_preserve = traits.Bool(field='roptions.preserve',
desc='True/False warped images are modulated')
write_bounding_box = traits.List(traits.List(traits.Float(), minlen=3,
maxlen=3),
field='roptions.bb', minlen=2, maxlen=2,
desc='3x2-element list of lists')
write_voxel_sizes = traits.List(traits.Float(), field='roptions.vox',
minlen=3, maxlen=3,
desc='3-element list')
write_interp = traits.Range(low=0, high=7, field='roptions.interp',
desc='degree of b-spline used for interpolation')
write_wrap = traits.List(traits.Int(), field='roptions.wrap',
desc=('Check if interpolation should wrap in [x,y,z]'
'- list of bools'))
out_prefix = traits.String('w', field='roptions.prefix', usedefault=True,
desc='normalized output prefix')
class NormalizeOutputSpec(TraitedSpec):
normalization_parameters = OutputMultiPath(File(exists=True),
desc='MAT files containing the normalization parameters')
normalized_source = OutputMultiPath(File(exists=True),
desc='Normalized source files')
normalized_files = OutputMultiPath(File(exists=True),
desc='Normalized other files')
class Normalize(SPMCommand):
"""use spm_normalise for warping an image to a template
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=203
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm = spm.Normalize()
>>> norm.inputs.source = 'functional.nii'
>>> norm.run() # doctest: +SKIP
"""
input_spec = NormalizeInputSpec
output_spec = NormalizeOutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'template':
return scans_for_fname(filename_to_list(val))
if opt == 'source':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'parameter_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['write_wrap']:
if len(val) != 3:
raise ValueError('%s must have 3 elements' % opt)
return super(Normalize, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm normalize options if set to None ignore
"""
einputs = super(Normalize, self)._parse_inputs(skip=('jobtype',
'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.source):
inputfiles.extend(self.inputs.source)
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.source):
einputs[0]['subj']['resample'] = scans_for_fname(self.inputs.source)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['normalization_parameters'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalization_parameters'].append(fname_presuffix(imgf,
suffix='_sn.mat',
use_ext=False))
outputs['normalization_parameters'] = list_to_filename(outputs['normalization_parameters'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_source'] = self.inputs.source
elif 'write' in self.inputs.jobtype:
if isdefined(self.inputs.write_preserve) and self.inputs.write_preserve:
prefixNorm = ''.join(['m', self.inputs.out_prefix])
else:
prefixNorm = self.inputs.out_prefix
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix=prefixNorm) for in_f in f]
else:
run = [fname_presuffix(f, prefix=prefixNorm)]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.source):
outputs['normalized_source'] = []
for imgf in filename_to_list(self.inputs.source):
outputs['normalized_source'].append(fname_presuffix(imgf,
prefix=prefixNorm))
return outputs
class Normalize12InputSpec(SPMCommandInputSpec):
image_to_align = File(exists=True, field='subj.vol',
desc='file to estimate normalization parameters with',
xor=['deformation_file'],
mandatory=True, copyfile=True)
apply_to_files = InputMultiPath(traits.Either(File(exists=True),
traits.List(File(exists=True))),
field='subj.resample',
desc='files to apply transformation to',
copyfile=True)
deformation_file = File(field='subj.def', mandatory=True,
xor=['image_to_align', 'tpm'],
desc=('file y_*.nii containing 3 deformation fields '
'for the deformation in x, y and z dimension'),
copyfile=False)
jobtype = traits.Enum('estwrite', 'est', 'write', usedefault=True,
desc='Estimate, Write or do Both')
bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1,
10, field='eoptions.biasreg',
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130,
140, 150, 'Inf', field='eoptions.biasfwhm',
desc='FWHM of Gaussian smoothness of bias')
tpm = File(exists=True, field='eoptions.tpm',
desc='template in form of tissue probablitiy maps to normalize to',
xor=['deformation_file'],
copyfile=False)
affine_regularization_type = traits.Enum('mni', 'size', 'none',
field='eoptions.affreg',
desc='mni, size, none')
warping_regularization = traits.List(traits.Float(), field='eoptions.reg',
minlen=5, maxlen=5,
desc=('controls balance between '
'parameters and data'))
smoothness = traits.Float(field='eoptions.fwhm',
desc=('value (in mm) to smooth the data before '
'normalization'))
sampling_distance = traits.Float(field='eoptions.samp',
desc=('Sampling distance on data for '
'parameter estimation'))
write_bounding_box = traits.List(traits.List(traits.Float(),
minlen=3, maxlen=3),
field='woptions.bb', minlen=2, maxlen=2,
desc=('3x2-element list of lists representing '
'the bounding box (in mm) to be written'))
write_voxel_sizes = traits.List(traits.Float(), field='woptions.vox',
minlen=3, maxlen=3,
desc=('3-element list representing the '
'voxel sizes (in mm) of the written '
'normalised images'))
write_interp = traits.Range(low=0, high=7, field='woptions.interp',
desc='degree of b-spline used for interpolation')
class Normalize12OutputSpec(TraitedSpec):
deformation_field = OutputMultiPath(File(exists=True),
desc=('NIfTI file containing 3 deformation '
'fields for the deformation in '
'x, y and z dimension'))
normalized_image = OutputMultiPath(File(exists=True),
desc=('Normalized file that needed to '
'be aligned'))
normalized_files = OutputMultiPath(File(exists=True),
desc='Normalized other files')
class Normalize12(SPMCommand):
"""uses SPM12's new Normalise routine for warping an image to a template.
Spatial normalisation is now done via the segmentation routine (which was
known as ``New Segment`` in SPM8). Note that the normalisation in SPM12
is done towards a file containing multiple tissue probability maps, which
was not the cass in SPM8.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=49
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> norm12 = spm.Normalize12()
>>> norm12.inputs.image_to_align = 'structural.nii'
>>> norm12.inputs.apply_to_files = 'functional.nii'
>>> norm12.run() # doctest: +SKIP
"""
input_spec = Normalize12InputSpec
output_spec = Normalize12OutputSpec
_jobtype = 'spatial'
_jobname = 'normalise'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'tpm':
return scans_for_fname(filename_to_list(val))
if opt == 'image_to_align':
return scans_for_fname(filename_to_list(val))
if opt == 'apply_to_files':
return scans_for_fnames(filename_to_list(val))
if opt == 'deformation_file':
return np.array([list_to_filename(val)], dtype=object)
if opt in ['nonlinear_regularization']:
if len(val) != 5:
raise ValueError('%s must have 5 elements' % opt)
return super(Normalize12, self)._format_arg(opt, spec, val)
def _parse_inputs(self, skip=()):
"""validate spm normalize options if set to None ignore
"""
einputs = super(Normalize12, self)._parse_inputs(skip=('jobtype',
'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.image_to_align):
inputfiles.extend([self.inputs.image_to_align])
einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if jobtype in ['estwrite', 'write']:
if not isdefined(self.inputs.apply_to_files):
if isdefined(self.inputs.image_to_align):
einputs[0]['subj']['resample'] = scans_for_fname(self.inputs.image_to_align)
return [{'%s' % (jobtype): einputs[0]}]
def _list_outputs(self):
outputs = self._outputs().get()
jobtype = self.inputs.jobtype
if jobtype.startswith('est'):
outputs['deformation_field'] = []
for imgf in filename_to_list(self.inputs.image_to_align):
outputs['deformation_field'].append(fname_presuffix(imgf,
prefix='y_'))
outputs['deformation_field'] = list_to_filename(outputs['deformation_field'])
if self.inputs.jobtype == "estimate":
if isdefined(self.inputs.apply_to_files):
outputs['normalized_files'] = self.inputs.apply_to_files
outputs['normalized_image'] = fname_presuffix(self.inputs.image_to_align,
prefix='w')
elif 'write' in self.inputs.jobtype:
outputs['normalized_files'] = []
if isdefined(self.inputs.apply_to_files):
filelist = filename_to_list(self.inputs.apply_to_files)
for f in filelist:
if isinstance(f, list):
run = [fname_presuffix(in_f, prefix='w') for in_f in f]
else:
run = [fname_presuffix(f, prefix='w')]
outputs['normalized_files'].extend(run)
if isdefined(self.inputs.image_to_align):
outputs['normalized_image'] = fname_presuffix(self.inputs.image_to_align,
prefix='w')
return outputs
class SegmentInputSpec(SPMCommandInputSpec):
data = InputMultiPath(File(exists=True), field='data', desc='one scan per subject',
copyfile=False, mandatory=True)
gm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.GM',
desc="""Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
wm_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.WM',
desc="""Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
csf_output_type = traits.List(traits.Bool(), minlen=3, maxlen=3, field='output.CSF',
desc="""Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img.
None: [False,False,False],
Native Space: [False,False,True],
Unmodulated Normalised: [False,True,False],
Modulated Normalised: [True,False,False],
Native + Unmodulated Normalised: [False,True,True],
Native + Modulated Normalised: [True,False,True],
Native + Modulated + Unmodulated: [True,True,True],
Modulated + Unmodulated Normalised: [True,True,False]""")
save_bias_corrected = traits.Bool(field='output.biascor',
desc='True/False produce a bias corrected image')
clean_masks = traits.Enum('no', 'light', 'thorough', field='output.cleanup',
desc="clean using estimated brain mask ('no','light','thorough')")
tissue_prob_maps = traits.List(File(exists=True), field='opts.tpm',
desc='list of gray, white & csf prob. (opt,)')
gaussians_per_class = traits.List(traits.Int(), field='opts.ngaus',
desc='num Gaussians capture intensity distribution')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', '', field='opts.regtype',
desc='Possible options: "mni", "eastern", "subj", "none" (no reguralisation), "" (no affine registration)')
warping_regularization = traits.Float(field='opts.warpreg',
desc='Controls balance between parameters and data')
warp_frequency_cutoff = traits.Float(field='opts.warpco', desc='Cutoff of DCT bases')
bias_regularization = traits.Enum(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, field='opts.biasreg',
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130,
'Inf', field='opts.biasfwhm',
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(field='opts.samp',
desc='Sampling distance on data for parameter estimation')
mask_image = File(exists=True, field='opts.msk',
desc='Binary image to restrict parameter estimation ')
class SegmentOutputSpec(TraitedSpec):
native_gm_image = File(desc='native space grey probability map')
normalized_gm_image = File(desc='normalized grey probability map',)
modulated_gm_image = File(desc='modulated, normalized grey probability map')
native_wm_image = File(desc='native space white probability map')
normalized_wm_image = File(desc='normalized white probability map')
modulated_wm_image = File(desc='modulated, normalized white probability map')
native_csf_image = File(desc='native space csf probability map')
normalized_csf_image = File(desc='normalized csf probability map')
modulated_csf_image = File(desc='modulated, normalized csf probability map')
modulated_input_image = File(deprecated='0.10',
new_name='bias_corrected_image',
desc='bias-corrected version of input image')
bias_corrected_image = File(desc='bias-corrected version of input image')
transformation_mat = File(exists=True, desc='Normalization transformation')
inverse_transformation_mat = File(exists=True,
desc='Inverse normalization info')
class Segment(SPMCommand):
"""use spm_segment to separate structural images into different
tissue classes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=209
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.Segment()
>>> seg.inputs.data = 'structural.nii'
>>> seg.run() # doctest: +SKIP
"""
input_spec = SegmentInputSpec
output_spec = SegmentOutputSpec
def __init__(self, **inputs):
_local_version = SPMCommand().version
if _local_version and '12.' in _local_version:
self._jobtype = 'tools'
self._jobname = 'oldseg'
else:
self._jobtype = 'spatial'
self._jobname = 'preproc'
SPMCommand.__init__(self, **inputs)
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2}
if opt in ['data', 'tissue_prob_maps']:
if isinstance(val, list):
return scans_for_fnames(val)
else:
return scans_for_fname(val)
if 'output_type' in opt:
return [int(v) for v in val]
if opt == 'mask_image':
return scans_for_fname(val)
if opt == 'clean_masks':
return clean_masks_dict[val]
return super(Segment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
f = self.inputs.data[0]
for tidx, tissue in enumerate(['gm', 'wm', 'csf']):
outtype = '%s_output_type' % tissue
if isdefined(getattr(self.inputs, outtype)):
for idx, (image, prefix) in enumerate([('modulated', 'mw'),
('normalized', 'w'),
('native', '')]):
if getattr(self.inputs, outtype)[idx]:
outfield = '%s_%s_image' % (image, tissue)
outputs[outfield] = fname_presuffix(f,
prefix='%sc%d' % (prefix,
tidx + 1))
if isdefined(self.inputs.save_bias_corrected) and \
self.inputs.save_bias_corrected:
outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m')
t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False)
outputs['transformation_mat'] = t_mat
invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False)
outputs['inverse_transformation_mat'] = invt_mat
return outputs
class NewSegmentInputSpec(SPMCommandInputSpec):
channel_files = InputMultiPath(File(exists=True),
desc="A list of files to be segmented",
field='channel', copyfile=False, mandatory=True)
channel_info = traits.Tuple(traits.Float(), traits.Float(),
traits.Tuple(traits.Bool, traits.Bool),
desc="""A tuple with the following fields:
- bias reguralisation (0-10)
- FWHM of Gaussian smoothness of bias
- which maps to save (Corrected, Field) - a tuple of two boolean values""",
field='channel')
tissues = traits.List(traits.Tuple(traits.Tuple(File(exists=True), traits.Int()), traits.Int(),
traits.Tuple(traits.Bool, traits.Bool), traits.Tuple(traits.Bool, traits.Bool)),
desc="""A list of tuples (one per tissue) with the following fields:
- tissue probability map (4D), 1-based index to frame
- number of gaussians
- which maps to save [Native, DARTEL] - a tuple of two boolean values
- which maps to save [Unmodulated, Modulated] - a tuple of two boolean values""",
field='tissue')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', field='warp.affreg',
desc='mni, eastern, subj, none ')
warping_regularization = traits.Float(field='warp.reg',
desc='Aproximate distance between sampling points.')
sampling_distance = traits.Float(field='warp.samp',
desc='Sampling distance on data for parameter estimation')
write_deformation_fields = traits.List(traits.Bool(), minlen=2, maxlen=2, field='warp.write',
desc="Which deformation fields to write:[Inverse, Forward]")
class NewSegmentOutputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)), desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)), desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)), desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)), desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True), desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(File(exists=True), desc='bias corrected images')
bias_field_images = OutputMultiPath(File(exists=True), desc='bias field images')
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
class NewSegment(SPMCommand):
"""Use spm_preproc8 (New Segment) to separate structural images into different
tissue classes. Supports multiple modalities.
NOTE: This interface currently supports single channel input only
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=43
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> seg.inputs.channel_info = (0.0001, 60, (True, True))
>>> seg.run() # doctest: +SKIP
For VBM pre-processing [http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf],
TPM.nii should be replaced by /path/to/spm8/toolbox/Seg/TPM.nii
>>> seg = NewSegment()
>>> seg.inputs.channel_files = 'structural.nii'
>>> tissue1 = (('TPM.nii', 1), 2, (True,True), (False, False))
>>> tissue2 = (('TPM.nii', 2), 2, (True,True), (False, False))
>>> tissue3 = (('TPM.nii', 3), 2, (True,False), (False, False))
>>> tissue4 = (('TPM.nii', 4), 2, (False,False), (False, False))
>>> tissue5 = (('TPM.nii', 5), 2, (False,False), (False, False))
>>> seg.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5]
>>> seg.run() # doctest: +SKIP
"""
input_spec = NewSegmentInputSpec
output_spec = NewSegmentOutputSpec
def __init__(self, **inputs):
_local_version = SPMCommand().version
if _local_version and '12.' in _local_version:
self._jobtype = 'spatial'
self._jobname = 'preproc'
else:
self._jobtype = 'tools'
self._jobname = 'preproc8'
SPMCommand.__init__(self, **inputs)
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['channel_files', 'channel_info']:
# structure have to be recreated, because of some weird traits error
new_channel = {}
new_channel['vols'] = scans_for_fnames(self.inputs.channel_files)
if isdefined(self.inputs.channel_info):
info = self.inputs.channel_info
new_channel['biasreg'] = info[0]
new_channel['biasfwhm'] = info[1]
new_channel['write'] = [int(info[2][0]), int(info[2][1])]
return [new_channel]
elif opt == 'tissues':
new_tissues = []
for tissue in val:
new_tissue = {}
new_tissue['tpm'] = np.array([','.join([tissue[0][0], str(tissue[0][1])])], dtype=object)
new_tissue['ngaus'] = tissue[1]
new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])]
new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])]
new_tissues.append(new_tissue)
return new_tissues
elif opt == 'write_deformation_fields':
return super(NewSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(NewSegment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['native_class_images'] = []
outputs['dartel_input_images'] = []
outputs['normalized_class_images'] = []
outputs['modulated_class_images'] = []
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['bias_field_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
n_classes = 5
if isdefined(self.inputs.tissues):
n_classes = len(self.inputs.tissues)
for i in range(n_classes):
outputs['native_class_images'].append([])
outputs['dartel_input_images'].append([])
outputs['normalized_class_images'].append([])
outputs['modulated_class_images'].append([])
for filename in self.inputs.channel_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.tissues):
for i, tissue in enumerate(self.inputs.tissues):
if tissue[2][0]:
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i + 1, base)))
if tissue[2][1]:
outputs['dartel_input_images'][i].append(os.path.join(pth, "rc%d%s.nii" % (i + 1, base)))
if tissue[3][0]:
outputs['normalized_class_images'][i].append(os.path.join(pth, "wc%d%s.nii" % (i + 1, base)))
if tissue[3][1]:
outputs['modulated_class_images'][i].append(os.path.join(pth, "mwc%d%s.nii" % (i + 1, base)))
else:
for i in range(n_classes):
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i + 1, base)))
outputs['transformation_mat'].append(os.path.join(pth, "%s_seg8.mat" % base))
if isdefined(self.inputs.write_deformation_fields):
if self.inputs.write_deformation_fields[0]:
outputs['inverse_deformation_field'].append(os.path.join(pth, "iy_%s.nii" % base))
if self.inputs.write_deformation_fields[1]:
outputs['forward_deformation_field'].append(os.path.join(pth, "y_%s.nii" % base))
if isdefined(self.inputs.channel_info):
if self.inputs.channel_info[2][0]:
outputs['bias_corrected_images'].append(os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.channel_info[2][1]:
outputs['bias_field_images'].append(os.path.join(pth, "BiasField_%s.nii" % (base)))
return outputs
class SmoothInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), field='data',
desc='list of files to smooth',
mandatory=True, copyfile=False)
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='fwhm',
desc='3-list of fwhm for each dimension')
data_type = traits.Int(field='dtype',
desc='Data type of the output images')
implicit_masking = traits.Bool(field='im',
desc=('A mask implied by a particular'
'voxel value'))
out_prefix = traits.String('s', field='prefix', usedefault=True,
desc='smoothed output prefix')
class SmoothOutputSpec(TraitedSpec):
smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files')
class Smooth(SPMCommand):
"""Use spm_smooth for 3D Gaussian smoothing of image volumes.
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=55
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> smooth = spm.Smooth()
>>> smooth.inputs.in_files = 'functional.nii'
>>> smooth.inputs.fwhm = [4, 4, 4]
>>> smooth.run() # doctest: +SKIP
"""
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
_jobtype = 'spatial'
_jobname = 'smooth'
def _format_arg(self, opt, spec, val):
if opt in ['in_files']:
return scans_for_fnames(filename_to_list(val))
if opt == 'fwhm':
if not isinstance(val, list):
return [val, val, val]
if isinstance(val, list):
if len(val) == 1:
return [val[0], val[0], val[0]]
else:
return val
return super(Smooth, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['smoothed_files'] = []
for imgf in filename_to_list(self.inputs.in_files):
outputs['smoothed_files'].append(fname_presuffix(imgf, prefix=self.inputs.out_prefix))
return outputs
class DARTELInputSpec(SPMCommandInputSpec):
image_files = traits.List(traits.List(File(exists=True)),
desc="A list of files to be segmented",
field='warp.images', copyfile=False, mandatory=True)
template_prefix = traits.Str('Template', usedefault=True,
field='warp.settings.template',
desc='Prefix for template')
regularization_form = traits.Enum('Linear', 'Membrane', 'Bending',
field='warp.settings.rform',
desc='Form of regularization energy term')
iteration_parameters = traits.List(traits.Tuple(traits.Range(1, 10),
traits.Tuple(traits.Float,
traits.Float,
traits.Float),
traits.Enum(1, 2, 4, 8, 16,
32, 64, 128,
256, 512),
traits.Enum(0, 0.5, 1, 2, 4,
8, 16, 32)),
minlen=3,
maxlen=12,
field='warp.settings.param',
desc="""List of tuples for each iteration
- Inner iterations
- Regularization parameters
- Time points for deformation model
- smoothing parameter
""")
optimization_parameters = traits.Tuple(traits.Float, traits.Range(1, 8),
traits.Range(1, 8),
field='warp.settings.optim',
desc="""Optimization settings a tuple
- LM regularization
- cycles of multigrid solver
- relaxation iterations
""")
class DARTELOutputSpec(TraitedSpec):
final_template_file = File(exists=True, desc='final DARTEL template')
template_files = traits.List(File(exists=True), desc='Templates from different stages of iteration')
dartel_flow_fields = traits.List(File(exists=True), desc='DARTEL flow fields')
class DARTEL(SPMCommand):
"""Use spm DARTEL to create a template and flow fields
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=185
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> dartel = spm.DARTEL()
>>> dartel.inputs.image_files = [['rc1s1.nii','rc1s2.nii'],['rc2s1.nii', 'rc2s2.nii']]
>>> dartel.run() # doctest: +SKIP
"""
input_spec = DARTELInputSpec
output_spec = DARTELOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'regularization_form':
mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2}
return mapper[val]
elif opt == 'iteration_parameters':
params = []
for param in val:
new_param = {}
new_param['its'] = param[0]
new_param['rparam'] = list(param[1])
new_param['K'] = param[2]
new_param['slam'] = param[3]
params.append(new_param)
return params
elif opt == 'optimization_parameters':
new_param = {}
new_param['lmreg'] = val[0]
new_param['cyc'] = val[1]
new_param['its'] = val[2]
return [new_param]
else:
return super(DARTEL, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['template_files'] = []
for i in range(6):
outputs['template_files'].append(os.path.realpath('%s_%d.nii' % (self.inputs.template_prefix, i + 1)))
outputs['final_template_file'] = os.path.realpath('%s_6.nii' % self.inputs.template_prefix)
outputs['dartel_flow_fields'] = []
for filename in self.inputs.image_files[0]:
pth, base, ext = split_filename(filename)
outputs['dartel_flow_fields'].append(os.path.realpath('u_%s_%s%s' % (base,
self.inputs.template_prefix,
ext)))
return outputs
class DARTELNorm2MNIInputSpec(SPMCommandInputSpec):
template_file = File(exists=True,
desc="DARTEL template",
field='mni_norm.template', copyfile=False, mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='mni_norm.data.subjs.flowfields',
mandatory=True)
apply_to_files = InputMultiPath(File(exists=True),
desc="Files to apply the transform to",
field='mni_norm.data.subjs.images',
mandatory=True, copyfile=False)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.vox')
bounding_box = traits.Tuple(traits.Float, traits.Float, traits.Float,
traits.Float, traits.Float, traits.Float,
desc="Voxel sizes for output file",
field='mni_norm.bb')
modulate = traits.Bool(field='mni_norm.preserve',
desc="Modulate out images - no modulation preserves concentrations")
fwhm = traits.Either(traits.List(traits.Float(), minlen=3, maxlen=3),
traits.Float(), field='mni_norm.fwhm',
desc='3-list of fwhm for each dimension')
class DARTELNorm2MNIOutputSpec(TraitedSpec):
normalized_files = OutputMultiPath(File(exists=True), desc='Normalized files in MNI space')
normalization_parameter_file = File(exists=True, desc='Transform parameters to MNI space')
class DARTELNorm2MNI(SPMCommand):
"""Use spm DARTEL to normalize data to MNI space
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=188
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> nm = spm.DARTELNorm2MNI()
>>> nm.inputs.template_file = 'Template_6.nii'
>>> nm.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s3_Template.nii']
>>> nm.inputs.apply_to_files = ['c1s1.nii', 'c1s3.nii']
>>> nm.inputs.modulate = True
>>> nm.run() # doctest: +SKIP
"""
input_spec = DARTELNorm2MNIInputSpec
output_spec = DARTELNorm2MNIOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['template_file']:
return np.array([val], dtype=object)
elif opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['apply_to_files']:
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif opt == 'voxel_size':
return list(val)
elif opt == 'bounding_box':
return list(val)
elif opt == 'fwhm':
if isinstance(val, list):
return val
else:
return [val, val, val]
else:
return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
pth, base, ext = split_filename(self.inputs.template_file)
outputs['normalization_parameter_file'] = os.path.realpath(base + '_2mni.mat')
outputs['normalized_files'] = []
prefix = "w"
if isdefined(self.inputs.modulate) and self.inputs.modulate:
prefix = 'm' + prefix
if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0:
prefix = 's' + prefix
for filename in self.inputs.apply_to_files:
pth, base, ext = split_filename(filename)
outputs['normalized_files'].append(os.path.realpath('%s%s%s' % (prefix,
base,
ext)))
return outputs
class CreateWarpedInputSpec(SPMCommandInputSpec):
image_files = InputMultiPath(File(exists=True),
desc="A list of files to be warped",
field='crt_warped.images', copyfile=False,
mandatory=True)
flowfield_files = InputMultiPath(File(exists=True),
desc="DARTEL flow fields u_rc1*",
field='crt_warped.flowfields',
copyfile=False,
mandatory=True)
iterations = traits.Range(low=0, high=9,
desc=("The number of iterations: log2(number of "
"time steps)"),
field='crt_warped.K')
interp = traits.Range(low=0, high=7, field='crt_warped.interp',
desc='degree of b-spline used for interpolation')
modulate = traits.Bool(field='crt_warped.jactransf',
desc="Modulate images")
class CreateWarpedOutputSpec(TraitedSpec):
warped_files = traits.List(File(exists=True, desc='final warped files'))
class CreateWarped(SPMCommand):
"""Apply a flow field estimated by DARTEL to create warped images
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=190
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> create_warped = spm.CreateWarped()
>>> create_warped.inputs.image_files = ['rc1s1.nii', 'rc1s2.nii']
>>> create_warped.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s2_Template.nii']
>>> create_warped.run() # doctest: +SKIP
"""
input_spec = CreateWarpedInputSpec
output_spec = CreateWarpedOutputSpec
_jobtype = 'tools'
_jobname = 'dartel'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['image_files']:
return scans_for_fnames(val, keep4d=True,
separate_sessions=True)
if opt in ['flowfield_files']:
return scans_for_fnames(val, keep4d=True)
else:
return super(CreateWarped, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['warped_files'] = []
for filename in self.inputs.image_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.modulate) and self.inputs.modulate:
outputs['warped_files'].append(os.path.realpath('mw%s%s' % (base,
ext)))
else:
outputs['warped_files'].append(os.path.realpath('w%s%s' % (base,
ext)))
return outputs
class ApplyDeformationFieldInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, field='fnames')
deformation_field = File(exists=True, mandatory=True, field='comp{1}.def')
reference_volume = File(exists=True, mandatory=True,
field='comp{2}.id.space')
interp = traits.Range(low=0, high=7, field='interp',
desc='degree of b-spline used for interpolation')
class ApplyDeformationFieldOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True))
class ApplyDeformations(SPMCommand):
input_spec = ApplyDeformationFieldInputSpec
output_spec = ApplyDeformationFieldOutputSpec
_jobtype = 'util'
_jobname = 'defs'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['deformation_field', 'reference_volume']:
val = [val]
if opt in ['deformation_field']:
return scans_for_fnames(val, keep4d=True, separate_sessions=False)
if opt in ['in_files', 'reference_volume']:
return scans_for_fnames(val, keep4d=False, separate_sessions=False)
else:
return super(ApplyDeformations, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = []
for filename in self.inputs.in_files:
_, fname = os.path.split(filename)
outputs['out_files'].append(os.path.realpath('w%s' % fname))
return outputs
class VBMSegmentInputSpec(SPMCommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc="A list of files to be segmented",
field='estwrite.data', copyfile=False, mandatory=True)
tissues = File(
exists=True, field='estwrite.tpm',
desc='tissue probability map')
gaussians_per_class = traits.Tuple(
(2, 2, 2, 3, 4, 2), *([traits.Int()] * 6),
usedefault=True,
desc='number of gaussians for each tissue class')
bias_regularization = traits.Enum(
0.0001,
(0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10),
field='estwrite.opts.biasreg', usedefault=True,
desc='no(0) - extremely heavy (10)')
bias_fwhm = traits.Enum(
60,
(30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'),
field='estwrite.opts.biasfwhm',
usedefault=True,
desc='FWHM of Gaussian smoothness of bias')
sampling_distance = traits.Float(
3, usedefault=True, field='estwrite.opts.samp',
desc='Sampling distance on data for parameter estimation')
warping_regularization = traits.Float(
4, usedefault=True, field='estwrite.opts.warpreg',
desc='Controls balance between parameters and data')
spatial_normalization = traits.Enum(
'high', 'low', usedefault=True,)
dartel_template = File(
exists=True,
field='estwrite.extopts.dartelwarp.normhigh.darteltpm')
use_sanlm_denoising_filter = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.extopts.sanlm',
desc="0=No denoising, 1=denoising,2=denoising multi-threaded")
mrf_weighting = traits.Float(
0.15, usedefault=True, field='estwrite.extopts.mrf')
cleanup_partitions = traits.Int(
1, usedefault=True, field='estwrite.extopts.cleanup',
desc="0=None,1=light,2=thorough")
display_results = traits.Bool(
True, usedefault=True, field='estwrite.extopts.print')
gm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.native',)
gm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.GM.warped',)
gm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.GM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
gm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.GM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
wm_native = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.native',)
wm_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.WM.warped',)
wm_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.WM.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
wm_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.WM.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
csf_native = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.native',)
csf_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.CSF.warped',)
csf_modulated_normalized = traits.Range(
0, 2, 2, usedefault=True, field='estwrite.output.CSF.modulated',
desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only')
csf_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.CSF.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
bias_corrected_native = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.native',)
bias_corrected_normalized = traits.Bool(
True, usedefault=True, field='estwrite.output.bias.warped',)
bias_corrected_affine = traits.Bool(
False, usedefault=True, field='estwrite.output.bias.affine',)
pve_label_native = traits.Bool(
False, usedefault=True, field='estwrite.output.label.native')
pve_label_normalized = traits.Bool(
False, usedefault=True, field='estwrite.output.label.warped')
pve_label_dartel = traits.Range(
0, 2, 0, usedefault=True, field='estwrite.output.label.dartel',
desc="0=None,1=rigid(SPM8 default),2=affine")
jacobian_determinant = traits.Bool(
False, usedefault=True, field='estwrite.jacobian.warped')
deformation_field = traits.Tuple(
(0, 0), traits.Bool, traits.Bool, usedefault=True,
field='estwrite.output.warps',
desc='forward and inverse field')
class VBMSegmentOuputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)),
desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)),
desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)),
desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)),
desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True),
desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
normalized_bias_corrected_images = OutputMultiPath(
File(exists=True),
desc='bias corrected images')
pve_label_native_images = OutputMultiPath(File(exists=True))
pve_label_normalized_images = OutputMultiPath(File(exists=True))
pve_label_registered_images = OutputMultiPath(File(exists=True))
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
jacobian_determinant_images = OutputMultiPath(File(exists=True))
class VBMSegment(SPMCommand):
"""Use VBM8 toolbox to separate structural images into different
tissue classes.
Example
-------
>>> import nipype.interfaces.spm as spm
>>> seg = spm.VBMSegment()
>>> seg.inputs.tissues = 'TPM.nii'
>>> seg.inputs.dartel_template = 'Template_1_IXI550_MNI152.nii'
>>> seg.inputs.bias_corrected_native = True
>>> seg.inputs.gm_native = True
>>> seg.inputs.wm_native = True
>>> seg.inputs.csf_native = True
>>> seg.inputs.pve_label_native = True
>>> seg.inputs.deformation_field = (True, False)
>>> seg.run() # doctest: +SKIP
"""
input_spec = VBMSegmentInputSpec
output_spec = VBMSegmentOuputSpec
_jobtype = 'tools'
_jobname = 'vbm8'
def _list_outputs(self):
outputs = self._outputs().get()
do_dartel = self.inputs.spatial_normalization
dartel_px = ''
if do_dartel:
dartel_px = 'r'
outputs['native_class_images'] = [[], [], []]
outputs['dartel_input_images'] = [[], [], []]
outputs['normalized_class_images'] = [[], [], []]
outputs['modulated_class_images'] = [[], [], []]
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['normalized_bias_corrected_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
outputs['jacobian_determinant_images'] = []
outputs['pve_label_native_images'] = []
outputs['pve_label_normalized_images'] = []
outputs['pve_label_registered_images'] = []
for filename in self.inputs.in_files:
pth, base, ext = split_filename(filename)
outputs['transformation_mat'].append(
os.path.join(pth, "%s_seg8.mat" % base))
for i, tis in enumerate(['gm', 'wm', 'csf']):
# native space
if getattr(self.inputs, '%s_native' % tis):
outputs['native_class_images'][i].append(
os.path.join(pth, "p%d%s.nii" % (i + 1, base)))
if getattr(self.inputs, '%s_dartel' % tis) == 1:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s.nii" % (i + 1, base)))
elif getattr(self.inputs, '%s_dartel' % tis) == 2:
outputs['dartel_input_images'][i].append(
os.path.join(pth, "rp%d%s_affine.nii" % (i + 1, base)))
# normalized space
if getattr(self.inputs, '%s_normalized' % tis):
outputs['normalized_class_images'][i].append(
os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i + 1, base)))
if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1:
outputs['modulated_class_images'][i].append(os.path.join(
pth, "mw%sp%d%s.nii" % (dartel_px, i + 1, base)))
elif getattr(self.inputs, '%s_modulated_normalized' % tis) == 2:
outputs['normalized_class_images'][i].append(os.path.join(
pth, "m0w%sp%d%s.nii" % (dartel_px, i + 1, base)))
if self.inputs.pve_label_native:
outputs['pve_label_native_images'].append(
os.path.join(pth, "p0%s.nii" % (base)))
if self.inputs.pve_label_normalized:
outputs['pve_label_normalized_images'].append(
os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base)))
if self.inputs.pve_label_dartel == 1:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s.nii" % (base)))
elif self.inputs.pve_label_dartel == 2:
outputs['pve_label_registered_images'].append(
os.path.join(pth, "rp0%s_affine.nii" % (base)))
if self.inputs.bias_corrected_native:
outputs['bias_corrected_images'].append(
os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.bias_corrected_normalized:
outputs['normalized_bias_corrected_images'].append(
os.path.join(pth, "wm%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[0]:
outputs['forward_deformation_field'].append(
os.path.join(pth, "y_%s%s.nii" % (dartel_px, base)))
if self.inputs.deformation_field[1]:
outputs['inverse_deformation_field'].append(
os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base)))
if self.inputs.jacobian_determinant and do_dartel:
outputs['jacobian_determinant_images'].append(
os.path.join(pth, "jac_wrp1%s.nii" % (base)))
return outputs
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return scans_for_fnames(val, keep4d=True)
elif opt in ['spatial_normalization']:
if val == 'low':
return {'normlow': []}
elif opt in ['dartel_template']:
return np.array([val], dtype=object)
elif opt in ['deformation_field']:
return super(VBMSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(VBMSegment, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
if self.inputs.spatial_normalization == 'low':
einputs = super(VBMSegment, self)._parse_inputs(
skip=('spatial_normalization', 'dartel_template'))
einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1}
return einputs
else:
return super(VBMSegment, self)._parse_inputs(skip=('spatial_normalization'))
| sgiavasis/nipype | nipype/interfaces/spm/preprocess.py | Python | bsd-3-clause | 77,455 | [
"Gaussian"
] | 50e6f1c119c36c272ac9fb13263280b2a7a7bbbb81f212e0cf485eb25ca260c2 |
import os,sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from rdkit import Chem
from rdkit.Chem import MACCSkeys
from utils import (ohe_label,
PAD_ID,
remove_salts)
max_char_len = 100
min_char_len = 20
data_dir = 'data/compounds'
out_file = 'data/features_fp166.npz'
sdf_list = [os.path.join(data_dir,f) for f in os.listdir(data_dir) if f.endswith('.sdf')]
fps = []
encoded_smiles = []
lengths = []
for sdf in sdf_list:
print "Processing file %s"%sdf
suppl = Chem.SDMolSupplier(sdf)
for mol in tqdm(suppl):
try:
smiles = Chem.MolToSmiles(mol, isomericSmiles=True, kekuleSmiles=True)
except:
continue
smiles = remove_salts(smiles)
smiles_length = len(smiles)
if smiles_length < min_char_len or smiles_length > max_char_len:
continue
fp = MACCSkeys.GenMACCSKeys(mol)
fp_list = [int(bit) for bit in list(fp.ToBitString())[1:]]
fps.append(fp_list)
ohe = ohe_label(smiles)
len_list = []
for array in ohe:
idx = np.argmax(array)
weight_val = 0. if idx == PAD_ID else 1.
len_list.append(weight_val)
lengths.append(len_list)
encoded_smiles.append(ohe)
samples = np.array(fps)
print "Samples shape {}".format(samples.shape)
labels = np.array(encoded_smiles)
print "Labels shape {}".format(labels.shape)
weights = np.array(lengths)
print "Weights shape {}".format(weights.shape)
np.savez(out_file, samples=samples, labels=labels, weights=weights)
| kdmarshall/FingerprintDecoder | scripts/featurize_maccs.py | Python | mit | 1,424 | [
"RDKit"
] | e044bdbec1a8a32e833809f1918cbe690208f08119e8455efc63fa538b2774c5 |
#!/usr/bin/env python
"""
Kill or delete DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from COMDIRAC.Interfaces import DSession
class Params:
def __init__ ( self ):
self.delete = False
self.selectAll = False
self.verbose = False
def setDelete( self, arg = None ):
self.delete = True
def getDelete( self ):
return self.delete
def setSelectAll( self, arg = None ):
self.selectAll = True
def getSelectAll( self ):
return self.selectAll
def setVerbose( self, arg = None ):
self.verbose = True
def getVerbose( self ):
return self.verbose
params = Params()
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] JobID ...' % Script.scriptName,
'Arguments:',
' JobID: a DIRAC job identifier', ] ) )
Script.registerSwitch( "D", "delete", "delete job", params.setDelete )
Script.registerSwitch( "a", "all", "select all jobs", params.setSelectAll )
Script.registerSwitch( "v", "verbose", "verbose output", params.setVerbose )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
exitCode = 0
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.Core.DISET.RPCClient import RPCClient
wmsClient = WMSClient()
jobs = args
if params.getSelectAll():
session = DSession()
Script.enableCS()
result = session.getUserName()
if result["OK"]:
userName = result["Value"]
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobs( {"Owner" : userName} )
if not result['OK']:
print "ERROR:", result['Message']
else:
jobs += map ( int, result['Value'] )
else:
print "ERROR:", result["Message"]
errors = []
for job in jobs:
result = None
if params.delete:
result = wmsClient.deleteJob( job )
else:
result = wmsClient.killJob( job )
if not result['OK']:
errors.append( result['Message'] )
exitCode = 2
elif params.getVerbose():
action = "killed"
if params.getDelete(): action = "deleted"
print "%s job %s" % ( action, job )
for error in errors:
print "ERROR: %s" % error
DIRAC.exit( exitCode )
| pigay/COMDIRAC | Interfaces/scripts/dkill.py | Python | gpl-3.0 | 2,337 | [
"DIRAC"
] | ddf53f9c01b5db1f76257af25f6ee1dcbd7c1e78e8edb488ee10f3eaa998e764 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
Provides an implementation of a constant node speed. In this case a speed of a
node is constant at a given value.
"""
from math import fabs
from sim2net.speed._speed import Speed
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class Constant(Speed):
"""
This class implements a constant node speed fixed at a given value.
"""
def __init__(self, speed):
"""
*Parameters*:
- **speed** (`float`): a value of the node speed.
*Example*:
.. testsetup::
from sim2net.speed.constant import Constant
.. doctest::
>>> speed = Constant(5.0)
>>> speed.current
5.0
>>> speed.get_new()
5.0
>>> speed = Constant(-5.0)
>>> speed.current
5.0
>>> speed.get_new()
5.0
"""
super(Constant, self).__init__(Constant.__name__)
check_argument_type(Constant.__name__, 'speed', float, speed,
self.logger)
self.__current_speed = fabs(float(speed))
@property
def current(self):
"""
(*Property*) The absolute value of the current speed of type `float`.
"""
return self.__current_speed
def get_new(self):
"""
Returns the absolute value of the given node speed of type `float`.
"""
return self.current
| mkalewski/sim2net | sim2net/speed/constant.py | Python | mit | 2,039 | [
"VisIt"
] | dabcc65dc555604ea1863a79363efd917878b1128758ba4502b44d3a49a2f20d |
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# License: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater, assert_true, assert_raises
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
def test_wrong_number_of_outputs():
gp = GaussianProcess()
assert_raises(ValueError, gp.fit, [[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_batch_size():
# TypeError when using batch_size on Python 3, see
# https://github.com/scikit-learn/scikit-learn/issues/7329 for more
# details
gp = GaussianProcess()
gp.fit(X, y)
gp.predict(X, batch_size=1)
gp.predict(X, batch_size=1, eval_MSE=True)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| herilalaina/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | Python | bsd-3-clause | 7,049 | [
"Gaussian"
] | a2031c5b08d5d24395c548a957b053e6e50dea5a914192464a5b5d3bf65359f0 |
# Copyright 2013 by Michiel de Hoon. All rights reserved.
# Copyright 2016 modified by Kevin Ha
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
from Bio.motifs import matrix
from Bio.Alphabet import NucleotideAlphabet
import platform
class ExtendedPositionSpecificScoringMatrix(
matrix.PositionSpecificScoringMatrix):
"""This new class inherits Bio.motifs.matrix.PositionSpecificScoringMatrix.
It has been modified to support any kind of Alphabet. This allows us to
perform motif scans on RNA sequence as well as RNA secondary structure.
The main change is the fact that the 'ACGT' hard-coding has been replaced
with whatever letters are in the Alphabet of the matrix. This seems to be
sufficient enough for our purposes.
"""
def _py_calculate(self, sequence, m, n):
"""Handles the default calcuate() method in Python.
Moved from _calculate in the except clause below.
"""
# The C code handles mixed case so Python version must too:
sequence = sequence.upper()
scores = []
for i in range(n - m + 1):
score = 0.0
for position in range(m):
letter = sequence[i + position]
try:
score += self[letter][position]
except KeyError:
score = float("nan")
break
scores.append(score)
return scores
# Make sure that we use C-accelerated PWM calculations if running under CPython.
# Fall back to the slower Python implementation if Jython or IronPython.
try:
from . import _pwm
def _calculate(self, sequence, m, n):
# Only RNA and DNA is supported right now. If sequence is
# secondary structure, then use Python implementation.
if not isinstance(self.alphabet, NucleotideAlphabet):
return self._py_calculate(sequence, m, n)
letters = ''.join(sorted(self.alphabet.letters))
logodds = [[self[letter][i] for letter in letters]
for i in range(m)]
return self._pwm.calculate(sequence, logodds)
except ImportError:
if platform.python_implementation() == 'CPython':
raise
else:
def _calculate(self, sequence, m, n):
return self._py_calculate(sequence, m, n)
def calculate(self, sequence):
# TODO - Force uppercase here and optimise switch statement in C
# by assuming upper case?
sequence = str(sequence)
m = self.length
n = len(sequence)
scores = self._calculate(sequence, m, n)
if len(scores) == 1:
return scores[0]
else:
return scores
| morrislab/rnascan | rnascan/BioAddons/motifs/matrix.py | Python | agpl-3.0 | 2,895 | [
"Biopython"
] | 21c81620a95e71c59b70bd656b4143b95a72d6dea4153cf6b9abba6e7788dfdd |
#!/usr/bin/env python
r"""
Example of solving Laplace's equation on a block domain refined with level 1
hanging nodes.
The domain is progressively refined towards the edge/face of the block, where
Dirichlet boundary conditions are prescribed by an oscillating function.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u = 0
\;, \quad \forall s \;.
Notes
-----
The implementation of the mesh refinement with level 1 hanging nodes is a
proof-of-concept code with many unresolved issues. The main problem is the fact
that a user needs to input the cells to refine at each level, while taking care
of the following constraints:
- the level 1 hanging nodes constraint: a cell that has a less-refined
neighbour cannot be refined;
- the implementation constraint: a cell with a refined neighbour cannot be
refined.
The hanging nodes are treated by a basis transformation/DOF substitution, which
has to be applied explicitly by the user:
- call ``field.substitute_dofs(subs)`` before assembling and solving;
- then call ``field.restore_dofs()`` before saving results.
Usage Examples
--------------
Default options, 2D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py output
$ python postproc.py output/hanging.vtk --wireframe -b -d'u,plot_warp_scalar'
Default options, 3D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py -3 output
$ python postproc.py output/hanging.vtk --wireframe -b --3d
Finer initial domain, 2D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py --shape=11,11 output
$ python postproc.py output/hanging.vtk --wireframe -b -d'u,plot_warp_scalar'
Bi-quadratic approximation, 2D, storing results in 'output' directory::
$ python examples/diffusion/laplace_refine_interactive.py --order=2 output
# View solution with higher order DOFs removed.
$ python postproc.py output/hanging.vtk --wireframe -b -d'u,plot_warp_scalar'
# View full solution on a mesh adapted for visualization.
$ python postproc.py output/hanging_u.vtk --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete import (FieldVariable, Integral, Equation, Equations,
Function, Problem)
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.conditions import (Conditions, EssentialBC)
import sfepy.discrete.fem.refine_hanging as rh
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.terms import Term
def refine_towards_facet(domain0, grading, axis):
subs = None
domain = domain0
for level, coor in enumerate(grading):
refine = nm.zeros(domain.mesh.n_el, dtype=nm.uint8)
region = domain.create_region('aux',
'vertices in (%s %.10f)' % (axis, coor),
add_to_regions=False)
refine[region.cells] = 1
domain, subs = rh.refine(domain, refine, subs=subs)
return domain, subs
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y[, z]) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'3d' :
'generate a 3D block',
'order' :
'field approximation order',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='7,7,7', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-3', '--3d',
action='store_true', dest='is_3d',
default=False, help=helps['3d'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
options = parser.parse_args()
dim = 3 if options.is_3d else 2
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
mesh0 = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
domain0 = FEDomain('d', mesh0)
bbox = domain0.get_mesh_bounding_box()
min_x, max_x = bbox[:, 0]
eps = 1e-8 * (max_x - min_x)
cnt = (shape[0] - 1) // 2
g0 = 0.5 * dims[0]
grading = nm.array([g0 / 2**ii for ii in range(cnt)]) + eps + centre[0] - g0
domain, subs = refine_towards_facet(domain0, grading, 'x <')
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in (x < %.10f)' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in (x > %.10f)' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 1, omega,
approx_order=options.order)
if subs is not None:
field.substitute_dofs(subs)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_laplace(v, u)',
integral, omega, v=v, u=u)
eq = Equation('eq', t1)
eqs = Equations([eq])
def u_fun(ts, coors, bc=None, problem=None):
"""
Define a displacement depending on the y coordinate.
"""
if coors.shape[1] == 2:
min_y, max_y = bbox[:, 1]
y = (coors[:, 1] - min_y) / (max_y - min_y)
val = (max_y - min_y) * nm.cos(3 * nm.pi * y)
else:
min_y, max_y = bbox[:, 1]
min_z, max_z = bbox[:, 2]
y = (coors[:, 1] - min_y) / (max_y - min_y)
z = (coors[:, 2] - min_z) / (max_z - min_z)
val = ((max_y - min_y) * (max_z - min_z)
* nm.cos(3 * nm.pi * y) * (1.0 + 3.0 * (z - 0.5)**2))
return val
bc_fun = Function('u_fun', u_fun)
fix1 = EssentialBC('shift_u', gamma1, {'u.0' : bc_fun})
fix2 = EssentialBC('fix2', gamma2, {'u.all' : 0.0})
ls = ScipyDirect({})
nls = Newton({}, lin_solver=ls)
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([fix1, fix2]))
pb.set_solver(nls)
state = pb.solve(save_results=False)
if subs is not None:
field.restore_dofs()
filename = os.path.join(options.output_dir, 'hanging.vtk')
ensure_path(filename)
pb.save_state(filename, state)
if options.order > 1:
pb.save_state(filename, state, linearization=Struct(kind='adaptive',
min_level=0,
max_level=8,
eps=1e-3))
if __name__ == '__main__':
main()
| sfepy/sfepy | examples/diffusion/laplace_refine_interactive.py | Python | bsd-3-clause | 8,056 | [
"VTK"
] | 802f9adb21cfcbeb2727bfff8618aae651d9cbf393ad6a2b9a663b63a10a0080 |
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import TestFailed, verify, vereq, check_syntax
import sys
print '1. Parser'
print '1.1 Tokens'
print '1.1.1 Backslashes'
# Backslash means line continuation:
x = 1 \
+ 1
if x != 2: raise TestFailed, 'backslash for line continuation'
# Backslash does not means continuation in comments :\
x = 0
if x != 0: raise TestFailed, 'backslash ending comment'
print '1.1.2 Numeric literals'
print '1.1.2.1 Plain integers'
if 0xff != 255: raise TestFailed, 'hex int'
if 0377 != 255: raise TestFailed, 'octal int'
if 2147483647 != 017777777777: raise TestFailed, 'large positive int'
try:
from sys import maxint
except ImportError:
maxint = 2147483647
if maxint == 2147483647:
# The following test will start to fail in Python 2.4;
# change the 020000000000 to -020000000000
if -2147483647-1 != -020000000000: raise TestFailed, 'max negative int'
# XXX -2147483648
if 037777777777 < 0: raise TestFailed, 'large oct'
if 0xffffffff < 0: raise TestFailed, 'large hex'
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
print "OverflowError on huge integer literal " + repr(s)
elif eval('maxint == 9223372036854775807'):
if eval('-9223372036854775807-1 != -01000000000000000000000'):
raise TestFailed, 'max negative int'
if eval('01777777777777777777777') < 0: raise TestFailed, 'large oct'
if eval('0xffffffffffffffff') < 0: raise TestFailed, 'large hex'
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
print "OverflowError on huge integer literal " + repr(s)
else:
print 'Weird maxint value', maxint
print '1.1.2.2 Long integers'
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
print '1.1.2.3 Floating point'
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
print '1.1.3 String literals'
x = ''; y = ""; verify(len(x) == 0 and x == y)
x = '\''; y = "'"; verify(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; verify(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
verify(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
verify(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
verify(x == y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''; verify(x == y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"; verify(x == y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'; verify(x == y)
print '1.2 Grammar'
print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
print 'file_input' # (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
print 'expr_input' # testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
print 'eval_input' # testlist ENDMARKER
x = eval('1, 0 or 1')
print 'funcdef'
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
vereq(f2.func_code.co_varnames, ('one_argument',))
vereq(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
vereq(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
vereq(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
vereq(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
vereq(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
verify(v3.func_code.co_varnames == ('a', '(b, c)', 'rest', 'b', 'c'))
else:
vereq(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
verify(v3(1, (2, 3), 4) == (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# Check ast errors in *args and *kwargs
check_syntax("f(*g(1=2))")
check_syntax("f(**g(1=2))")
### lambdef: 'lambda' [varargslist] ':' test
print 'lambdef'
l1 = lambda : 0
verify(l1() == 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
verify(l3() == [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
verify(l4() == 1)
l5 = lambda x, y, z=2: x + y + z
verify(l5(1, 2) == 5)
verify(l5(1, 2, 3) == 6)
check_syntax("lambda x: x = 2")
check_syntax("lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
### simple_stmt: small_stmt (';' small_stmt)* [';']
print 'simple_stmt'
x = 1; pass; del x
def foo():
# verify statments that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
print 'expr_stmt' # (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
# NB these variables are deleted below
check_syntax("x + 1 = 1")
check_syntax("a + 1 = b + 2")
print 'print_stmt' # 'print' (test ',')* [test]
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
print 'extended print_stmt' # 'print' '>>' test ','
import sys
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
# syntax errors
check_syntax('print ,')
check_syntax('print >> x,')
print 'del_stmt' # 'del' exprlist
del abc
del x, y, (z, xyz)
print 'pass_stmt' # 'pass'
pass
print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
print 'break_stmt' # 'break'
while 1: break
print 'continue_stmt' # 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "continue + try/except ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
print msg
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "continue + try/finally ok"
print msg
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
print "testing continue and break in try/except in loop"
def test_break_continue_loop(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
print "continue then break in try/except in loop broken!"
test_break_continue_loop()
print 'return_stmt' # 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax("class foo:return 1")
print 'yield_stmt'
check_syntax("class foo:yield 1")
print 'raise_stmt' # 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
print 'import_name' # 'import' dotted_as_names
import sys
import time, sys
print 'import_from' # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
print 'global_stmt' # 'global' NAME (',' NAME)*
def f():
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
def f():
z = None
del z
exec 'z=1+1\n'
if z != 2: raise TestFailed, 'exec \'z=1+1\'\\n'
del z
exec 'z=1+1'
if z != 2: raise TestFailed, 'exec \'z=1+1\''
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: raise TestFailed, 'exec u\'z=1+1\'\\n'
del z
exec u'z=1+1'
if z != 2: raise TestFailed, 'exec u\'z=1+1\''
"""
f()
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g (%s), l (%s)' %(g,l)
print "assert_stmt" # assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
assert x == 2
print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285: raise TestFailed, 'for over growing sequence'
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
vereq(result, [1, 2, 3])
print 'try_stmt'
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [',' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError, msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
print 'test'
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
print 'comparison'
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
print 'binary mask ops'
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
print 'shift ops'
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
print 'additive ops'
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
print 'multiplicative ops'
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
print 'unary ops'
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
print 'selectors'
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
print
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
print L
print 'atoms'
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
x = `1,2`
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
print 'classdef' # 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
print [s.strip() for s in spcs]
print [3 * x for x in nums]
print [x for x in nums if x > 2]
print [(i, s) for i in nums for s in strs]
print [(i, s) for i in nums for s in [f for f in strs if "n" in f]]
print [(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)]
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
print test_in_func(nums)
def test_nested_front():
print [[y for y in [x, x + 1]] for x in [1,3,5]]
test_nested_front()
check_syntax("[i, s for i in nums for s in strs]")
check_syntax("[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
print [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
verify(g.next() == [x for x in range(10)])
try:
g.next()
raise TestFailed, 'should produce StopIteration exception'
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
raise TestFailed, 'should produce TypeError'
except TypeError:
pass
verify(list((x, y) for x in 'abcd' for y in 'abcd') == [(x, y) for x in 'abcd' for y in 'abcd'])
verify(list((x, y) for x in 'ab' for y in 'xy') == [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
verify(sum(b) == sum([x for x in range(10)]))
verify(sum(x**2 for x in range(10)) == sum([x**2 for x in range(10)]))
verify(sum(x*x for x in range(10) if x%2) == sum([x*x for x in range(10) if x%2]))
verify(sum(x for x in (y for y in range(10))) == sum([x for x in range(10)]))
verify(sum(x for x in (y for y in (z for z in range(10)))) == sum([x for x in range(10)]))
verify(sum(x for x in [y for y in (z for z in range(10))]) == sum([x for x in range(10)]))
verify(sum(x for x in (y for y in (z for z in range(10) if True)) if True) == sum([x for x in range(10)]))
verify(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True) == 0)
check_syntax("foo(x for x in range(10), 100)")
check_syntax("foo(100, x for x in range(10))")
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
verify(len(list(g)) == 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
verify([(i,j) for i in range(10) for j in range(5)] == list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
verify([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
verify((x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# Verify unpacking single element tuples in listcomp/genexp.
vereq([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
vereq(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
verify([ x() for x in lambda: True, lambda: False if x() ] == [True])
verify([ x() for x in (lambda: True, lambda: False) if x() ] == [True])
verify([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ] == [True])
verify((5 if 1 else _checkeval("check 1", 0)) == 5)
verify((_checkeval("check 2", 0) if 0 else 5) == 5)
verify((5 and 6 if 0 else 1) == 1)
verify(((5 and 6) if 0 else 1) == 1)
verify((5 and (6 if 1 else 1)) == 6)
verify((0 or _checkeval("check 3", 2) if 0 else 3) == 3)
verify((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)) == 1)
verify((0 or 5 if 1 else _checkeval("check 6", 3)) == 5)
verify((not 5 if 1 else 1) == False)
verify((not 5 if 0 else 1) == 1)
verify((6 + 1 if 1 else 2) == 7)
verify((6 - 1 if 1 else 2) == 5)
verify((6 * 2 if 1 else 4) == 12)
verify((6 / 2 if 1 else 3) == 3)
verify((6 < 4 if 0 else 2) == 2)
| zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_grammar.py | Python | epl-1.0 | 21,632 | [
"GULP"
] | 1aea9c77c161a1e4bafc069df2679fc95036f34eb31dbb7003f97d9e9b5bcc66 |
from ase import Atoms
from gpaw import GPAW, ConvergenceError
from gpaw.mixer import Mixer, MixerSum
# C2 singlet from http://toc.uni-muenster.de/GMTKN/GMTKN30/W4-08.html
m = Atoms(symbols='C2',
positions=[
[ 0. , 0. , -0.62000006],
[ 0. , 0. , 0.62000006]],
)
m.center(vacuum=4.0)
calc = GPAW(h=0.18,
xc='PBE',
basis='dzp',
maxiter=550,
width=0.0,
txt='C2_default.txt',
)
m.set_calculator(calc)
try:
m.get_potential_energy()
except ConvergenceError:
pass
assert not calc.scf.converged
del calc
# converges (fortuitously)
# C2 really needs broken spin-symmetry to converge
calc = GPAW(h=0.18,
xc='PBE',
basis='dzp',
maxiter=550,
width=0.0,
)
calc.set(
mixer=MixerSum(0.02, 3),
eigensolver='cg',
spinpol=True,
txt='C2_conv1.txt',
)
m.set_calculator(calc)
try:
e1 = m.get_potential_energy()
except ConvergenceError:
e1 = None
pass
del calc
# or with broken symmetry magnetic moments, to a different solution
m.set_initial_magnetic_moments([-0.5,0.5])
calc = GPAW(h=0.18,
xc='PBE',
basis='dzp',
maxiter=550,
width=0.0,
)
calc.set(
txt='C2_conv2.txt',
)
m.set_calculator(calc)
e2 = m.get_potential_energy()
if e1 is not None:
# Note that spin-symmetry broken solution gives a different energy!
# Standard DFT is unable to treat such systems, similarly to the
# famous H2 dissociation: dx.doi.org/10.1103/PhysRevLett.87.133004
assert e2 - e1 > 0.15
| robwarm/gpaw-symm | gpaw/test/big/scf/C2/C2.py | Python | gpl-3.0 | 1,670 | [
"ASE",
"GPAW"
] | 8f0cead4a6ecfaa607f9b90a7801521ec8e4d86c7031c6c684fa55e1686e5c7b |
#!/usr/bin/env python
"""
Demos of MIMO time encoding and decoding algorithms that use IAF
neurons with delays.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_delay_demo_'
output_count = 0
output_ext = '.png'
# Define input signal generation parameters:
T = 0.05
dur = 2*T
dt = 1e-6
f = 100
bw = 2*np.pi*f
np.random.seed(0)
noise_power = None
comps = 8
if noise_power == None:
fig_title = 'IAF Input Signal with No Noise'
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power
M = 3 # number of input signals
N = 9 # number of neurons
# Starting and ending points of interval that is encoded:
t_start = 0.02
t_end = t_start+T
if t_end > dur:
raise ValueError('t_start is too large')
k_start = int(np.round(t_start/dt))
k_end = int(np.round(t_end/dt))
t_enc = np.arange(k_start, k_end, dtype=np.float)*dt
u_list = []
for i in xrange(M):
fig_title_in = fig_title + ' (Signal #' + str(i+1) + ')'
print fig_title_in
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power, comps)
u /= max(u)
u *= 1.5
pl.plot_signal(t_enc, u[k_start:k_end], fig_title_in,
output_name + str(output_count) + output_ext)
u_list.append(u)
output_count += 1
t = np.arange(len(u_list[0]), dtype=np.float)*dt
# Define neuron parameters:
def randu(a, b, *d):
"""Create an array of the given shape and propagate it with random
samples from a uniform distribution over ``[a, b)``."""
if a >= b:
raise ValueError('b must exceed a')
return a+(b-a)*np.random.rand(*d)
b_list = list(randu(2.3, 3.3, N))
d_list = list(randu(0.15, 0.25, N))
k_list = list(0.01*np.ones(N))
a_list = map(list, np.reshape(np.random.exponential(0.003, N*M), (N, M)))
w_list = map(list, np.reshape(randu(0.5, 1.0, N*M), (N, M)))
fig_title = 'Signal Encoded Using Delayed IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_delay)(u_list, t_start, dt, b_list, d_list,
k_list, a_list, w_list)
for i in xrange(M):
for j in xrange(N):
fig_title_out = fig_title + '\n(Signal #' + str(i+1) + \
', Neuron #' + str(j+1) + ')'
pl.plot_encoded(t_enc, u_list[i][k_start:k_end],
s_list[j][np.cumsum(s_list[j])<T],
fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Delayed IAF Decoder'
print fig_title
u_rec_list = func_timer(iaf.iaf_decode_delay)(s_list, T, dt,
b_list, d_list, k_list,
a_list, w_list)
for i in xrange(M):
fig_title_out = fig_title + ' (Signal #' + str(i+1) + ')'
pl.plot_compare(t_enc, u_list[i][k_start:k_end],
u_rec_list[i][0:k_end-k_start], fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
| bionet/ted.python | demos/iaf_delay_demo.py | Python | bsd-3-clause | 3,443 | [
"NEURON"
] | 7ea3fc6eb6950c5ba79c3e97182b446fd880d01d2571e65f7a4ab0fc95e1e4ca |
#!/usr/bin/env python
"""Module cma implements the CMA-ES, Covariance Matrix Adaptation Evolution
Strategy, a stochastic optimizer for robust non-linear non-convex
derivative-free function minimization for Python versions 2.6, 2.7, 3.x
(for Python 2.5 class SolutionDict would need to be re-implemented, because
it depends on collections.MutableMapping, since version 0.91.01).
CMA-ES searches for a minimizer (a solution x in R**n) of an
objective function f (cost function), such that f(x) is
minimal. Regarding f, only function values for candidate solutions
need to be available, gradients are not necessary. Even less
restrictive, only a passably reliable ranking of the candidate
solutions in each iteration is necessary, the function values
itself do not matter. Some termination criteria however depend
on actual f-values.
Two interfaces are provided:
- function `fmin(func, x0, sigma0,...)`
runs a complete minimization
of the objective function func with CMA-ES.
- class `CMAEvolutionStrategy`
allows for minimization such that the
control of the iteration loop remains with the user.
Used packages:
- unavoidable: `numpy` (see `barecmaes2.py` if `numpy` is not
available),
- avoidable with small changes: `time`, `sys`
- optional: `matplotlib.pylab` (for `plot` etc., highly
recommended), `pprint` (pretty print), `pickle` (in class
`Sections`), `doctest`, `inspect`, `pygsl` (never by default)
Testing
-------
The code can be tested on a given system. Typing::
python cma.py --test
or in the Python shell ``ipython -pylab``::
run cma.py --test
runs ``doctest.testmod(cma)`` showing only exceptions (and not the
tests that fail due to small differences in the output) and should
run without complaints in about under two minutes. On some systems,
the pop up windows must be closed manually to continue and finish
the test.
Install
-------
The code only needs to be visible in the python path, but can also be installed by::
python cma.py --install
which solely calls the ``setup`` function from the standard ``distutils.core``
package for installation.
Example
-------
::
import cma
help(cma) # "this" help message, use cma? in ipython
help(cma.fmin)
help(cma.CMAEvolutionStrategy)
help(cma.Options)
cma.Options('tol') # display 'tolerance' termination options
cma.Options('verb') # display verbosity options
res = cma.fmin(cma.Fcts.tablet, 15 * [1], 1)
res[0] # best evaluated solution
res[5] # mean solution, presumably better with noise
:See: `fmin()`, `Options`, `CMAEvolutionStrategy`
:Author: Nikolaus Hansen, 2008-2012
:License: GPL 2 and 3
"""
from __future__ import division # future is >= 3.0, this code has mainly been used with 2.6 & 2.7
from __future__ import with_statement # only necessary for python 2.5 and not in heavy use
# from __future__ import collections.MutableMapping # does not exist in future, otherwise 2.5 would work
from __future__ import print_function # for cross-checking, available from python 2.6
import sys
if sys.version.startswith('3'): # in python 3.x
xrange = range
raw_input = input
__version__ = "0.92.06 branched from $Revision: 3322 $ $Date: 2012-11-22 18:05:10 +0100 (Thu, 22 Nov 2012) $"
# bash: svn propset svn:keywords 'Date Revision' cma.py
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 or 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# for testing:
# pyflakes cma.py # finds bugs by static analysis
# pychecker --limit 60 cma.py # also executes, gives 60 warnings (all checked)
# python cma.py -t -quiet # executes implemented tests based on doctest
# to create a html documentation file:
# pydoc -w cma # edit the header (remove local pointers)
# epydoc cma.py # comes close to javadoc but does not find the
# # links of function references etc
# doxygen needs @package cma as first line in the module docstring
# some things like class attributes are not interpreted correctly
# sphinx: doc style of doc.python.org, could not make it work
# TODO: make those options that are only used in fmin an error in init of CMA, but still Options() should
# work as input to CMA.
# TODO: add a default logger in CMAEvolutionStrategy, see fmin() and optimize() first
# tell() should probably not add data, but optimize() should handle even an after_iteration_handler.
# TODO: CMAEvolutionStrategy(ones(10), 1).optimize(cma.fcts.elli) # should work like fmin
# one problem: the data logger is not default and seemingly cannot be attached in one line
# TODO: check combination of boundary handling and transformation: penalty must be computed
# on gp.pheno(x_geno, bounds=None), but without bounds, check/remove usage of .geno everywhere
# TODO: check whether all new solutions are put into self.sent_solutions
# TODO: separate initialize==reset_state from __init__
# TODO: introduce Zpos == diffC which makes the code more consistent and the active update "exact"
# TODO: split tell into a variable transformation part and the "pure" functionality
# usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
# genotypic repair is not part of tell_geno
# TODO: read settable "options" from a (properties) file, see myproperties.py
#
# typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun, callback=None
# maxfev, diag (A sequency of N positive entries that serve as
# scale factors for the variables.)
# full_output -- non-zero to return all optional outputs.
# If xtol < 0.0, xtol is set to sqrt(machine_precision)
# 'infot -- a dictionary of optional outputs with the keys:
# 'nfev': the number of function calls...
#
# see eg fmin_powell
# typical returns
# x, f, dictionary d
# (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>)
#
# TODO: keep best ten solutions
# TODO: implement constraints handling
# TODO: option full_output -- non-zero to return all optional outputs.
# TODO: extend function unitdoctest, or use unittest?
# TODO: implement equal-fitness termination, covered by stagnation?
# TODO: apply style guide: no capitalizations!?
# TODO: check and test dispdata()
# TODO: eigh(): thorough testing would not hurt
#
# TODO (later): implement readSignals from a file like properties file (to be called after tell())
import time # not really essential
import collections, numpy as np # arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh, sort, argsort, random, ones,...
from numpy import inf, array, dot, exp, log, sqrt, sum # to access the built-in sum fct: __builtins__.sum or del sum removes the imported sum and recovers the shadowed
# Added by Glen Berseth to support multi-threading iterations
try:
from pathos.multiprocessing import ProcessingPool as Pool
except ImportError:
print( '\n\n Multi-Porcessing will NOT work in CMA.\n Please install pathos for multi-processing to work properly. \n\n ')
# raise ImportError(msg)
import math
try:
import matplotlib.pylab as pylab # also: use ipython -pylab
show = pylab.show
savefig = pylab.savefig # we would like to be able to use cma.savefig() etc
closefig = pylab.close
except:
pylab = None
print(' Could not import matplotlib.pylab, therefore ``cma.plot()`` etc. is not available')
def show():
pass
__docformat__ = "reStructuredText" # this hides some comments entirely?
sys.py3kwarning = True # TODO: out-comment from version 2.6
# why not package math?
# TODO: check scitools.easyviz and how big the adaptation would be
# changes:
# 14/02/27: bug fix when penalty boundary handling is applied with fixed variables:
# penalty weight vector gamma is now expanded by phenotypic fixed variables.
# 12/10/25: removed useless check_points from fmin interface
# 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods
# timesCroot and divCroot to the right class
# 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed,
# sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2)
# is used for weight calculation.
# 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation
# 12/07/21: convert value True for noisehandling into 1 making the output compatible
# 12/01/30: class Solution and more old stuff removed r3101
# 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
# 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
# 11/09/30: flat fitness termination checks also history length
# 11/09/30: elitist option (using method clip_or_fit_solutions)
# 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
# injected or modified solutions and even reliable adaptive encoding
# 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
# 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
# fixed: method settableOptionsList, also renamed to versatileOptions
# default seed depends on time now
# 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
# fixed: output argument ordering in fmin, print now only used as function
# removed: parallel option in fmin
# 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
# 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
# also the return value of fmin changed and attribute stop is now a method.
# 11/04/22: bug-fix: option fixed_variables in combination with scaling
# 11/04/21: stopdict is not a copy anymore
# 11/04/15: option fixed_variables implemented
# 11/03/23: bug-fix boundary update was computed even without boundaries
# 11/03/12: bug-fix of variable annotation in plots
# 11/02/05: work around a memory leak in numpy
# 11/02/05: plotting routines improved
# 10/10/17: cleaning up, now version 0.9.30
# 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
# if input scaling_of_variables is given)
# 08/10/01: option evalparallel introduced,
# bug-fix for scaling being a vector
# 08/09/26: option CMAseparable becomes CMA_diagonal
# 08/10/18: some names change, test functions go into a class
# 08/10/24: more refactorizing
# 10/03/09: upper bound exp(min(1,...)) for step-size control
# TODO: this would define the visible interface
# __all__ = ['fmin', 'CMAEvolutionStrategy', 'plot', ...]
#
# emptysets = ('', (), [], {}) # array([]) does not work but also np.size(.) == 0
# "x in emptysets" cannot be well replaced by "not x"
# which is also True for array([]) and None, but also for 0 and False, and False for NaN
use_sent_solutions = True # 5-30% CPU slower, particularly for large lambda, will be mandatory soon
#____________________________________________________________
#____________________________________________________________
#
def unitdoctest():
"""is used to describe test cases and might in future become helpful
as an experimental tutorial as well. The main testing feature at the
moment is by doctest with ``cma._test()`` or conveniently by
``python cma.py --test``. With the ``--verbose`` option added, the
results will always slightly differ and many "failed" test cases
might be reported.
A simple first overall test:
>>> import cma
>>> res = cma.fmin(cma.fcts.elli, 3*[1], 1, CMA_diagonal=2, seed=1, verb_time=0)
(3_w,7)-CMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=1)
Covariance matrix is diagonal for 2 iterations (1/ccov=7.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.453161670768570e+04 1.2e+00 1.08e+00 1e+00 1e+00
2 14 3.281197961927601e+04 1.3e+00 1.22e+00 1e+00 2e+00
3 21 1.082851071704020e+04 1.3e+00 1.24e+00 1e+00 2e+00
100 700 8.544042012075362e+00 1.4e+02 3.18e-01 1e-03 2e-01
200 1400 5.691152415221861e-12 1.0e+03 3.82e-05 1e-09 1e-06
220 1540 3.890107746209078e-15 9.5e+02 4.56e-06 8e-11 7e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.89010774621e-15 2.52273602735e-15
mean solution: [ -4.63614606e-08 -3.42761465e-10 1.59957987e-11]
std deviation: [ 6.96066282e-08 2.28704425e-09 7.63875911e-11]
Test on the Rosenbrock function with 3 restarts. The first trial only
finds the local optimum, which happens in about 20% of the cases.
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, 4*[-1],1, ftarget=1e-6, restarts=3, verb_time=0, verb_disp=500, seed=3)
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=3)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 4.875315645656848e+01 1.0e+00 8.43e-01 8e-01 8e-01
2 16 1.662319948123120e+02 1.1e+00 7.67e-01 7e-01 8e-01
3 24 6.747063604799602e+01 1.2e+00 7.08e-01 6e-01 7e-01
184 1472 3.701428610430019e+00 4.3e+01 9.41e-07 3e-08 5e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.70142861043 3.70142861043
mean solution: [-0.77565922 0.61309336 0.38206284 0.14597202]
std deviation: [ 2.54211502e-08 3.88803698e-08 4.74481641e-08 3.64398108e-08]
(8_w,16)-CMA-ES (mu_w=4.8,w_1=32%) in dimension 4 (seed=4)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 1489 2.011376859371495e+02 1.0e+00 8.90e-01 8e-01 9e-01
2 1505 4.157106647905128e+01 1.1e+00 8.02e-01 7e-01 7e-01
3 1521 3.548184889359060e+01 1.1e+00 1.02e+00 8e-01 1e+00
111 3249 6.831867555502181e-07 5.1e+01 2.62e-02 2e-04 2e-03
termination on ftarget : 1e-06
final/bestever f-value = 6.8318675555e-07 1.18576673231e-07
mean solution: [ 0.99997004 0.99993938 0.99984868 0.99969505]
std deviation: [ 0.00018973 0.00038006 0.00076479 0.00151402]
>>> assert res[1] <= 1e-6
Notice the different termination conditions. Termination on the target
function value ftarget prevents further restarts.
Test of scaling_of_variables option
>>> import cma
>>> opts = cma.Options()
>>> opts['seed'] = 456
>>> opts['verb_disp'] = 0
>>> opts['CMA_active'] = 1
>>> # rescaling of third variable: for searching in roughly
>>> # x0 plus/minus 1e3*sigma0 (instead of plus/minus sigma0)
>>> opts.scaling_of_variables = [1, 1, 1e3, 1]
>>> res = cma.fmin(cma.fcts.rosen, 4 * [0.1], 0.1, **opts)
termination on tolfun : 1e-11
final/bestever f-value = 2.68096173031e-14 1.09714829146e-14
mean solution: [ 1.00000001 1.00000002 1.00000004 1.00000007]
std deviation: [ 3.00466854e-08 5.88400826e-08 1.18482371e-07 2.34837383e-07]
The printed std deviations reflect the actual true value (not the one
in the internal representation which would be different).
>>> import cma
>>> r = cma.fmin(cma.fcts.diffpow, 15 * [1], 1, CMA_dampsvec_fac=0.5, ftarget=1e-9)
>>> assert(r[1] < 1e-9)
>>> assert(r[2] < 13000) # only passed with CMA_dampsvec_fac
:See: cma.main(), cma._test()
"""
pass
#____________________________________________________________
#____________________________________________________________
#
class BlancClass(object):
"""blanc container class for having a collection of attributes"""
#_____________________________________________________________________
#_____________________________________________________________________
#
class DerivedDictBase(collections.MutableMapping):
"""for conveniently adding features to a dictionary. The actual
dictionary is in ``self.data``. Copy-paste
and modify setitem, getitem, and delitem, if necessary"""
def __init__(self, *args, **kwargs):
# collections.MutableMapping.__init__(self)
super(DerivedDictBase, self).__init__()
# super(SolutionDict, self).__init__() # the same
self.data = dict(*args, **kwargs)
def __len__(self):
return len(self.data)
def __contains__(self, value):
return value in self.data
def __iter__(self):
return iter(self.data)
def __setitem__(self, key, value):
"""defines self[key] = value"""
self.data[key] = value
def __getitem__(self, key):
"""defines self[key]"""
return self.data[key]
def __delitem__(self, key):
del self.data[key]
class SolutionDict(DerivedDictBase):
"""dictionary with computation of an hash key for the inserted solutions and
a stack of previously inserted same solutions.
Each entry is meant to store additional information related to the solution.
>>> import cma, numpy as np
>>> d = cma.SolutionDict()
>>> x = np.array([1,2,4])
>>> d[x] = {'x': x, 'iteration': 1}
>>> d.get(x) == (d[x] if d.key(x) in d.keys() else None)
The last line is always true.
TODO: data_with_same_key behaves like a stack (see setitem and delitem), but rather should behave like a queue?!
A queue is less consistent with the operation self[key] = ..., if self.data_with_same_key[key] is not empty.
"""
def __init__(self, *args, **kwargs):
DerivedDictBase.__init__(self, *args, **kwargs)
self.data_with_same_key = {}
def key(self, x):
try:
return tuple(x)
except TypeError:
return x
def __setitem__(self, key, value):
"""defines self[key] = value"""
key = self.key(key)
if key in self.data_with_same_key:
self.data_with_same_key[key] += [self.data[key]]
elif key in self.data:
self.data_with_same_key[key] = [self.data[key]]
self.data[key] = value
def __getitem__(self, key):
"""defines self[key]"""
return self.data[self.key(key)]
def __delitem__(self, key):
"""remove only most current key-entry"""
key = self.key(key)
if key in self.data_with_same_key:
if len(self.data_with_same_key[key]) == 1:
self.data[key] = self.data_with_same_key.pop(key)[0]
else:
self.data[key] = self.data_with_same_key[key].pop(-1)
else:
del self.data[key]
def truncate(self, max_len, min_iter):
if len(self) > max_len:
for k in list(self.keys()):
if self[k]['iteration'] < min_iter:
del self[k] # only deletes one item with k as key, should delete all?
class SolutionDictOld(dict):
"""depreciated, SolutionDict should do, to be removed after SolutionDict
has been successfully applied.
dictionary with computation of an hash key for the inserted solutions and
stack of previously inserted same solutions.
Each entry is meant to store additional information related to the solution.
Methods ``pop`` and ``get`` are modified accordingly.
d = SolutionDict()
x = array([1,2,4])
d.insert(x, {'x': x, 'iteration': 1})
d.get(x) == d[d.key(x)] if d.key(x) in d.keys() else d.get(x) is None
TODO: not yet tested
TODO: behaves like a stack (see _pop_derived), but rather should behave like a queue?!
A queue is less consistent with the operation self[key] = ..., if self.more[key] is not empty.
"""
def __init__(self):
self.more = {} # previously inserted same solutions
self._pop_base = self.pop
self.pop = self._pop_derived
self._get_base = self.get
self.get = self._get_derived
def key(self, x):
"""compute the hash key of ``x``"""
return tuple(x)
def insert(self, x, datadict):
key = self.key(x)
if key in self.more:
self.more[key] += [self[key]]
elif key in self:
self.more[key] = [self[key]]
self[key] = datadict
def _get_derived(self, x, default=None):
return self._get_base(self.key(x), default)
def _pop_derived(self, x):
key = self.key(x)
res = self[key]
if key in self.more:
if len(self.more[key]) == 1:
self[key] = self.more.pop(key)[0]
else:
self[key] = self.more[key].pop(-1)
return res
class BestSolution(object):
"""container to keep track of the best solution seen"""
def __init__(self, x=None, f=np.inf, evals=None):
"""initialize the best solution with `x`, `f`, and `evals`.
Better solutions have smaller `f`-values.
"""
self.x = x
self.x_geno = None
self.f = f if f is not None and f is not np.nan else np.inf
self.evals = evals
self.evalsall = evals
self.last = BlancClass()
self.last.x = x
self.last.f = f
def update(self, arx, xarchive=None, arf=None, evals=None):
"""checks for better solutions in list `arx`, based on the smallest
corresponding value in `arf`, alternatively, `update` may be called
with a `BestSolution` instance like ``update(another_best_solution)``
in which case the better solution becomes the current best.
`xarchive` is used to retrieve the genotype of a solution.
"""
if arf is not None: # find failsave minimum
minidx = np.nanargmin(arf)
if minidx is np.nan:
return
minarf = arf[minidx]
# minarf = reduce(lambda x, y: y if y and y is not np.nan and y < x else x, arf, np.inf)
if type(arx) == BestSolution:
if self.evalsall is None:
self.evalsall = arx.evalsall
elif arx.evalsall is not None:
self.evalsall = max((self.evalsall, arx.evalsall))
if arx.f is not None and arx.f < np.inf:
self.update([arx.x], xarchive, [arx.f], arx.evals)
return self
elif minarf < np.inf and (minarf < self.f or self.f is None):
self.x, self.f = arx[minidx], arf[minidx]
self.x_geno = xarchive[self.x]['geno'] if xarchive is not None else None
self.evals = None if not evals else evals - len(arf) + minidx+1
self.evalsall = evals
elif evals:
self.evalsall = evals
self.last.x = arx[minidx]
self.last.f = minarf
def get(self):
"""return ``(x, f, evals)`` """
return self.x, self.f, self.evals, self.x_geno
#____________________________________________________________
#____________________________________________________________
#
class BoundPenalty(object):
"""Computes the boundary penalty. Must be updated each iteration,
using the `update` method.
Details
-------
The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``,
where `xfeas` is the closest feasible (in-bounds) solution from `x`.
The weight `w[i]` should be updated during each iteration using
the update method.
This class uses `GenoPheno.into_bounds` in method `update` to access
domain boundary values and repair. This inconsistency is going to be
removed in future.
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
##
# bounds attribute reminds the domain boundary values
self.bounds = bounds
self.gamma = 1 # a very crude assumption
self.weights_initialized = False # gamma becomes a vector after initialization
self.hist = [] # delta-f history
def has_bounds(self):
"""return True, if any variable is bounded"""
bounds = self.bounds
if bounds in (None, [None, None]):
return False
for i in xrange(bounds[0]):
if bounds[0][i] is not None and bounds[0][i] > -np.inf:
return True
for i in xrange(bounds[1]):
if bounds[1][i] is not None and bounds[1][i] < np.inf:
return True
return False
def repair(self, x, bounds=None, copy=False, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
Arguments
---------
`bounds`
can be `None`, in which case the "default" bounds are used,
or ``[lb, ub]``, where `lb` and `ub`
represent lower and upper domain bounds respectively that
can be `None` or a scalar or a list or array of length ``len(self)``
code is more or less copy-paste from Solution.repair, but never tested
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# TODO: test whether np.max([bounds[0], x], axis=0) etc is speed relevant
if bounds is None:
bounds = self.bounds
if copy_always:
x_out = array(x, copy=True)
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x_out = array(x, copy=True) if copy and not copy_always else x
if bounds[0] is not None:
if np.isscalar(bounds[0]):
for i in xrange(len(x)):
x_out[i] = max([bounds[0], x[i]])
else:
for i in xrange(len(x)):
if bounds[0][i] is not None:
x_out[i] = max([bounds[0][i], x[i]])
if bounds[1] is not None:
if np.isscalar(bounds[1]):
for i in xrange(len(x)):
x_out[i] = min([bounds[1], x[i]])
else:
for i in xrange(len(x)):
if bounds[1][i] is not None:
x_out[i] = min([bounds[1][i], x[i]])
return x_out # convenience return
#____________________________________________________________
#
def __call__(self, x, archive, gp):
"""returns the boundary violation penalty for `x` ,where `x` is a
single solution or a list or array of solutions.
If `bounds` is not `None`, the values in `bounds` are used, see `__init__`"""
if x in (None, (), []):
return x
if gp.bounds in (None, [None, None], (None, None)):
return 0.0 if np.isscalar(x[0]) else [0.0] * len(x) # no penalty
x_is_single_vector = np.isscalar(x[0])
x = [x] if x_is_single_vector else x
# add fixed variables to self.gamma
try:
gamma = list(self.gamma) # fails if self.gamma is a scalar
for i in sorted(gp.fixed_values): # fails if fixed_values is None
gamma.insert(i, 0.0)
gamma = array(gamma, copy=False)
except TypeError:
gamma = self.gamma
pen = []
for xi in x:
# CAVE: this does not work with already repaired values!!
# CPU(N,lam,iter=20,200,100)?: 3s of 10s, array(xi): 1s (check again)
# remark: one deep copy can be prevented by xold = xi first
xpheno = gp.pheno(archive[xi]['geno'])
xinbounds = gp.into_bounds(xpheno)
fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
return pen[0] if x_is_single_vector else pen
#____________________________________________________________
#
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired `Solution` instances
"""
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions))
#____________________________________________________________
#
def update(self, function_values, es, bounds=None):
"""updates the weights for computing a boundary penalty.
Arguments
---------
`function_values`
all function values of recent population of solutions
`es`
`CMAEvolutionStrategy` object instance, in particular the
method `into_bounds` of the attribute `gp` of type `GenoPheno`
is used.
`bounds`
not (yet) in use other than for ``bounds == [None, None]`` nothing
is updated.
Reference: Hansen et al 2009, A Method for Handling Uncertainty...
IEEE TEC, with addendum at http://www.lri.fr/~hansen/TEC2009online.pdf
"""
if bounds is None:
bounds = self.bounds
if bounds is None or (bounds[0] is None and bounds[1] is None): # no bounds ==> no penalty
return self # len(function_values) * [0.0] # case without voilations
N = es.N
### prepare
# compute varis = sigma**2 * C_ii
varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else ( # scalar case
es.C if np.isscalar(es.C[0]) else # diagonal matrix case
[es.C[i][i] for i in xrange(N)])) # full matrix case
# dmean = (es.mean - es.gp.into_bounds(es.mean)) / varis**0.5
dmean = (es.mean - es.gp.geno(es.gp.into_bounds(es.gp.pheno(es.mean)))) / varis**0.5
### Store/update a history of delta fitness value
fvals = sorted(function_values)
l = 1 + len(fvals)
val = fvals[3*l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
# insert val in history
if np.isfinite(val) and val > 0:
self.hist.insert(0, val)
elif val == inf and len(self.hist) > 1:
self.hist.insert(0, max(self.hist))
else:
pass # ignore 0 or nan values
if len(self.hist) > 20 + (3*N) / es.popsize:
self.hist.pop()
### prepare
dfit = np.median(self.hist) # median interquartile range
damp = min(1, es.sp.mueff/10./N)
### set/update weights
# Throw initialization error
if len(self.hist) == 0:
raise _Error('wrongful initialization, no feasible solution sampled. ' +
'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +
'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')
# initialize weights
if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO
self.gamma = array(N * [2*dfit])
self.weights_initialized = True
# update weights gamma
if self.weights_initialized:
edist = array(abs(dmean) - 3 * max(1, N**0.5/es.sp.mueff))
if 1 < 3: # this is better, around a factor of two
# increase single weights possibly with a faster rate than they can decrease
# value unit of edst is std dev, 3==random walk of 9 steps
self.gamma *= exp((edist>0) * np.tanh(edist/3) / 2.)**damp
# decrease all weights up to the same level to avoid single extremely small weights
# use a constant factor for pseudo-keeping invariance
self.gamma[self.gamma > 5 * dfit] *= exp(-1./3)**damp
# self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)
elif 1 < 3 and (edist>0).any(): # previous method
# CAVE: min was max in TEC 2009
self.gamma[edist>0] *= 1.1**min(1, es.sp.mueff/10./N)
# max fails on cigtab(N=12,bounds=[0.1,None]):
# self.gamma[edist>0] *= 1.1**max(1, es.sp.mueff/10./N) # this was a bug!?
# self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.mueff/10./N)
else: # alternative version, but not better
solutions = es.pop # this has not been checked
r = self.feasible_ratio(solutions) # has to be the averaged over N iterations
self.gamma *= exp(np.max([N*[0], 0.3 - r], axis=0))**min(1, es.sp.mueff/10/N)
es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]
### return penalty
# es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]
return self # bound penalty values
#____________________________________________________________
#____________________________________________________________
#
class GenoPhenoBase(object):
"""depreciated, abstract base class for genotyp-phenotype transformation,
to be implemented.
See (and rather use) option ``transformation`` of ``fmin`` or ``CMAEvolutionStrategy``.
Example
-------
::
import cma
class Mygpt(cma.GenoPhenoBase):
def pheno(self, x):
return x # identity for the time being
gpt = Mygpt()
optim = cma.CMAEvolutionStrategy(...)
while not optim.stop():
X = optim.ask()
f = [func(gpt.pheno(x)) for x in X]
optim.tell(X, f)
In case of a repair, we might pass the repaired solution into `tell()`
(with check_points being True).
TODO: check usecases in `CMAEvolutionStrategy` and implement option GenoPhenoBase
"""
def pheno(self, x):
raise NotImplementedError()
return x
#____________________________________________________________
#____________________________________________________________
#
class GenoPheno(object):
"""Genotype-phenotype transformation.
Method `pheno` provides the transformation from geno- to phenotype,
that is from the internal representation to the representation used
in the objective function. Method `geno` provides the "inverse" pheno-
to genotype transformation. The geno-phenotype transformation comprises,
in this order:
- insert fixed variables (with the phenotypic and therefore quite
possibly "wrong" values)
- affine linear transformation (scaling and shift)
- user-defined transformation
- projection into feasible domain (boundaries)
- assign fixed variables their original phenotypic value
By default all transformations are the identity. The boundary
transformation is only applied, if the boundaries are given as argument to
the method `pheno` or `geno` respectively.
``geno`` is not really necessary and might disappear in future.
"""
def __init__(self, dim, scaling=None, typical_x=None, bounds=None, fixed_values=None, tf=None):
"""return `GenoPheno` instance with fixed dimension `dim`.
Keyword Arguments
-----------------
`scaling`
the diagonal of a scaling transformation matrix, multipliers
in the genotyp-phenotyp transformation, see `typical_x`
`typical_x`
``pheno = scaling*geno + typical_x``
`bounds` (obsolete, might disappear)
list with two elements,
lower and upper bounds both can be a scalar or a "vector"
of length dim or `None`. Without effect, as `bounds` must
be given as argument to `pheno()`.
`fixed_values`
a dictionary of variable indices and values, like ``{0:2.0, 2:1.1}``,
that are not subject to change, negative indices are ignored
(they act like incommenting the index), values are phenotypic
values.
`tf`
list of two user-defined transformation functions, or `None`.
``tf[0]`` is a function that transforms the internal representation
as used by the optimizer into a solution as used by the
objective function. ``tf[1]`` does the back-transformation.
For example ::
tf_0 = lambda x: [xi**2 for xi in x]
tf_1 = lambda x: [abs(xi)**0.5 fox xi in x]
or "equivalently" without the `lambda` construct ::
def tf_0(x):
return [xi**2 for xi in x]
def tf_1(x):
return [abs(xi)**0.5 fox xi in x]
``tf=[tf_0, tf_1]`` is a reasonable way to guaranty that only positive
values are used in the objective function.
Details
-------
If ``tf_1`` is ommitted, the initial x-value must be given as genotype (as the
phenotype-genotype transformation is unknown) and injection of solutions
might lead to unexpected results.
"""
self.N = dim
self.bounds = bounds
self.fixed_values = fixed_values
if tf is not None:
self.tf_pheno = tf[0]
self.tf_geno = tf[1] # TODO: should not necessarily be needed
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r < 1e-7)
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r > -1e-7)
print("WARNING in class GenoPheno: user defined transformations have not been tested thoroughly")
else:
self.tf_geno = None
self.tf_pheno = None
if fixed_values:
if type(fixed_values) is not dict:
raise _Error("fixed_values must be a dictionary {index:value,...}")
if max(fixed_values.keys()) >= dim:
raise _Error("max(fixed_values.keys()) = " + str(max(fixed_values.keys())) +
" >= dim=N=" + str(dim) + " is not a feasible index")
# convenience commenting functionality: drop negative keys
for k in list(fixed_values.keys()):
if k < 0:
fixed_values.pop(k)
if bounds:
if len(bounds) != 2:
raise _Error('len(bounds) must be 2 for lower and upper bounds')
for i in (0,1):
if bounds[i] is not None:
bounds[i] = array(dim * [bounds[i]] if np.isscalar(bounds[i]) else
[b for b in bounds[i]])
def vec_is_default(vec, default_val=0):
"""return True if `vec` has the value `default_val`,
None or [None] are also recognized as default"""
try:
if len(vec) == 1:
vec = vec[0] # [None] becomes None and is always default
else:
return False
except TypeError:
pass # vec is a scalar
if vec is None or vec == array(None) or vec == default_val:
return True
return False
self.scales = array(scaling)
if vec_is_default(self.scales, 1):
self.scales = 1 # CAVE: 1 is not array(1)
elif self.scales.shape is not () and len(self.scales) != self.N:
raise _Error('len(scales) == ' + str(len(self.scales)) +
' does not match dimension N == ' + str(self.N))
self.typical_x = array(typical_x)
if vec_is_default(self.typical_x, 0):
self.typical_x = 0
elif self.typical_x.shape is not () and len(self.typical_x) != self.N:
raise _Error('len(typical_x) == ' + str(len(self.typical_x)) +
' does not match dimension N == ' + str(self.N))
if (self.scales is 1 and
self.typical_x is 0 and
self.bounds in (None, [None, None]) and
self.fixed_values is None and
self.tf_pheno is None):
self.isidentity = True
else:
self.isidentity = False
def into_bounds(self, y, bounds=None, copy_never=False, copy_always=False):
"""Argument `y` is a phenotypic vector,
return `y` put into boundaries, as a copy iff ``y != into_bounds(y)``.
Note: this code is duplicated in `Solution.repair` and might
disappear in future.
"""
bounds = bounds if bounds is not None else self.bounds
if bounds in (None, [None, None]):
return y if not copy_always else array(y, copy=True)
if bounds[0] is not None:
if len(bounds[0]) not in (1, len(y)):
raise ValueError('len(bounds[0]) = ' + str(len(bounds[0])) +
' and len of initial solution (' + str(len(y)) + ') disagree')
if copy_never: # is rather slower
for i in xrange(len(y)):
y[i] = max(bounds[0][i], y[i])
else:
y = np.max([bounds[0], y], axis=0)
if bounds[1] is not None:
if len(bounds[1]) not in (1, len(y)):
raise ValueError('len(bounds[1]) = ' + str(len(bounds[1])) +
' and initial solution (' + str(len(y)) + ') disagree')
if copy_never:
for i in xrange(len(y)):
y[i] = min(bounds[1][i], y[i])
else:
y = np.min([bounds[1], y], axis=0)
return y
def pheno(self, x, bounds=None, copy=True, copy_always=False):
"""maps the genotypic input argument into the phenotypic space,
boundaries are only applied if argument ``bounds is not None``, see
help for class `GenoPheno`
"""
if copy_always and not copy:
raise ValueError('arguments copy_always=' + str(copy_always) +
' and copy=' + str(copy) + ' have inconsistent values')
if self.isidentity and bounds in (None, [None, None], (None, None)):
return x if not copy_always else array(x, copy=copy_always)
if self.fixed_values is None:
y = array(x, copy=copy) # make a copy, in case
else: # expand with fixed values
y = list(x) # is a copy
for i in sorted(self.fixed_values.keys()):
y.insert(i, self.fixed_values[i])
y = array(y, copy=False)
if self.scales is not 1: # just for efficiency
y *= self.scales
if self.typical_x is not 0:
y += self.typical_x
if self.tf_pheno is not None:
y = array(self.tf_pheno(y), copy=False)
if bounds is not None:
y = self.into_bounds(y, bounds)
if self.fixed_values is not None:
for i, k in list(self.fixed_values.items()):
y[i] = k
return y
def geno(self, y, bounds=None, copy=True, copy_always=False, archive=None):
"""maps the phenotypic input argument into the genotypic space.
If `bounds` are given, first `y` is projected into the feasible
domain. In this case ``copy==False`` leads to a copy.
By default a copy is made only to prevent to modify ``y``.
method geno is only needed if external solutions are injected
(geno(initial_solution) is depreciated and will disappear)
TODO: arg copy=True should become copy_never=False
"""
if archive is not None and bounds is not None:
try:
return archive[y]['geno']
except:
pass
x = array(y, copy=(copy and not self.isidentity) or copy_always)
# bounds = self.bounds if bounds is None else bounds
if bounds is not None: # map phenotyp into bounds first
x = self.into_bounds(x, bounds)
if self.isidentity:
return x
# user-defined transformation
if self.tf_geno is not None:
x = array(self.tf_geno(x), copy=False)
else:
_Error('t1 of options transformation was not defined but is needed as being the inverse of t0')
# affine-linear transformation: shift and scaling
if self.typical_x is not 0:
x -= self.typical_x
if self.scales is not 1: # just for efficiency
x /= self.scales
# kick out fixed_values
if self.fixed_values is not None:
# keeping the transformed values does not help much
# therefore it is omitted
if 1 < 3:
keys = sorted(self.fixed_values.keys())
x = array([x[i] for i in range(len(x)) if i not in keys], copy=False)
else: # TODO: is this more efficient?
x = list(x)
for key in sorted(list(self.fixed_values.keys()), reverse=True):
x.remove(key)
x = array(x, copy=False)
return x
#____________________________________________________________
#____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
class OOOptimizer(object):
""""abstract" base class for an OO optimizer interface with methods
`__init__`, `ask`, `tell`, `stop`, `result`, and `optimize`. Only
`optimize` is fully implemented in this base class.
Examples
--------
All examples minimize the function `elli`, the output is not shown.
(A preferred environment to execute all examples is ``ipython -pylab``.)
First we need ::
from cma import CMAEvolutionStrategy, CMADataLogger # CMAEvolutionStrategy derives from the OOOptimizer class
elli = lambda x: sum(1e3**((i-1.)/(len(x)-1.)*x[i])**2 for i in range(len(x)))
The shortest example uses the inherited method `OOOptimizer.optimize()`::
res = CMAEvolutionStrategy(8 * [0.1], 0.5).optimize(elli)
The input parameters to `CMAEvolutionStrategy` are specific to this
inherited class. The remaining functionality is based on interface
defined by `OOOptimizer`. We might have a look at the result::
print(res[0]) # best solution and
print(res[1]) # its function value
`res` is the return value from method
`CMAEvolutionStrategy.result()` appended with `None` (no logger).
In order to display more exciting output we rather do ::
logger = CMADataLogger() # derives from the abstract BaseDataLogger class
res = CMAEvolutionStrategy(9 * [0.5], 0.3).optimize(elli, logger)
logger.plot() # if matplotlib is available, logger == res[-1]
or even shorter ::
res = CMAEvolutionStrategy(9 * [0.5], 0.3).optimize(elli, CMADataLogger())
res[-1].plot() # if matplotlib is available
Virtually the same example can be written with an explicit loop
instead of using `optimize()`. This gives the necessary insight into
the `OOOptimizer` class interface and gives entire control over the
iteration loop::
optim = CMAEvolutionStrategy(9 * [0.5], 0.3) # a new CMAEvolutionStrategy instance calling CMAEvolutionStrategy.__init__()
logger = CMADataLogger(optim) # get a logger instance
# this loop resembles optimize()
while not optim.stop(): # iterate
X = optim.ask() # get candidate solutions
f = [elli(x) for x in X] # evaluate solutions
# maybe do something else that needs to be done
optim.tell(X, f) # do all the real work: prepare for next iteration
optim.disp(20) # display info every 20th iteration
logger.add() # log another "data line"
# final output
print('termination by', optim.stop())
print('best f-value =', optim.result()[1])
print('best solution =', optim.result()[0])
logger.plot() # if matplotlib is available
raw_input('press enter to continue') # prevents exiting and closing figures
Details
-------
Most of the work is done in the method `tell(...)`. The method `result()` returns
more useful output.
"""
def __init__(self, xstart, **more_args):
"""``xstart`` is a mandatory argument"""
self.xstart = xstart
self.more_args = more_args
self.initialize()
def initialize(self):
"""(re-)set to the initial state"""
self.countiter = 0
self.xcurrent = self.xstart[:]
raise NotImplementedError('method initialize() must be implemented in derived class')
def ask(self):
"""abstract method, AKA "get" or "sample_distribution", deliver new candidate solution(s), a list of "vectors"
"""
raise NotImplementedError('method ask() must be implemented in derived class')
def tell(self, solutions, function_values):
"""abstract method, AKA "update", prepare for next iteration"""
self.countiter += 1
raise NotImplementedError('method tell() must be implemented in derived class')
def stop(self):
"""abstract method, return satisfied termination conditions in a dictionary like
``{'termination reason': value, ...}``, for example ``{'tolfun': 1e-12}``, or the empty
dictionary ``{}``. The implementation of `stop()` should prevent an infinite loop.
"""
raise NotImplementedError('method stop() is not implemented')
def disp(self, modulo=None):
"""abstract method, display some iteration infos if ``self.iteration_counter % modulo == 0``"""
raise NotImplementedError('method disp() is not implemented')
def result(self):
"""abstract method, return ``(x, f(x), ...)``, that is, the minimizer, its function value, ..."""
raise NotImplementedError('method result() is not implemented')
def optimize(self, objectivefct, logger=None, verb_disp=20, iterations=None):
"""find minimizer of `objectivefct` by iterating over `OOOptimizer` `self`
with verbosity `verb_disp`, using `BaseDataLogger` `logger` with at
most `iterations` iterations. ::
return self.result() + (self.stop(), self, logger)
Example
-------
>>> import cma
>>> res = cma.CMAEvolutionStrategy(7 * [0.1], 0.5).optimize(cma.fcts.rosen, cma.CMADataLogger(), 100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(res[0])
[ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992]
"""
if logger is None:
if hasattr(self, 'logger'):
logger = self.logger
citer = 0
while not self.stop():
if iterations is not None and citer >= iterations:
return self.result()
citer += 1
X = self.ask() # deliver candidate solutions
fitvals = [objectivefct(x) for x in X]
self.tell(X, fitvals) # all the work is done here
self.disp(verb_disp)
logger.add(self) if logger else None
logger.add(self, modulo=bool(logger.modulo)) if logger else None
if verb_disp:
self.disp(1)
if verb_disp in (1, True):
print('termination by', self.stop())
print('best f-value =', self.result()[1])
print('solution =', self.result()[0])
return self.result() + (self.stop(), self, logger)
#____________________________________________________________
#____________________________________________________________
#
class CMAEvolutionStrategy(OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
See `fmin` for the one-line-call functional interface.
Calling sequence
================
``optim = CMAEvolutionStrategy(x0, sigma0, opts)``
returns a class instance.
Arguments
---------
`x0`
initial solution, starting point (phenotype).
`sigma0`
initial standard deviation. The problem
variables should have been scaled, such that a single
standard deviation on all variables is useful and the
optimum is expected to lie within about `x0` +- ``3*sigma0``.
See also options `scaling_of_variables`.
Often one wants to check for solutions close to the initial
point. This allows for an easier check for consistency of
the objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `Options`.
Main interface / usage
======================
The ask-and-tell interface is inherited from the generic `OOOptimizer`
interface for iterative optimization algorithms (see there). With ::
optim = CMAEvolutionStrategy(8 * [0.5], 0.2)
an object instance is generated. In each iteration ::
solutions = optim.ask()
is used to ask for new candidate solutions (possibly several times) and ::
optim.tell(solutions, func_values)
passes the respective function values to `optim`. Instead of `ask()`,
the class `CMAEvolutionStrategy` also provides ::
(solutions, func_values) = optim.ask_and_eval(objective_func)
Therefore, after initialization, an entire optimization can be written
in two lines like ::
while not optim.stop():
optim.tell(*optim.ask_and_eval(objective_func))
Without the freedom of executing additional lines within the iteration,
the same reads in a single line as ::
optim.optimize(objective_func)
Besides for termination criteria, in CMA-ES only
the ranks of the `func_values` are relevant.
Attributes and Properties
=========================
- `inputargs` -- passed input arguments
- `inopts` -- passed options
- `opts` -- actually used options, some of them can be changed any
time, see class `Options`
- `popsize` -- population size lambda, number of candidate solutions
returned by `ask()`
Details
=======
The following two enhancements are turned off by default.
**Active CMA** is implemented with option ``CMA_active`` and conducts
an update of the covariance matrix with negative weights. The
exponential update is implemented, where from a mathematical
viewpoint positive definiteness is guarantied. The update is applied
after the default update and only before the covariance matrix is
decomposed, which limits the additional computational burden to be
at most a factor of three (typically smaller). A typical speed up
factor (number of f-evaluations) is between 1.1 and two.
References: Jastrebski and Arnold, CEC 2006, Glasmachers et al, GECCO 2010.
**Selective mirroring** is implemented with option ``CMA_mirrors`` in
the method ``get_mirror()``. Only the method `ask_and_eval()` will
then sample selectively mirrored vectors. In selective mirroring, only
the worst solutions are mirrored. With the default small number of mirrors,
*pairwise selection* (where at most one of the two mirrors contribute to the
update of the distribution mean) is implicitely guarantied under selective
mirroring and therefore not explicitly implemented.
References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234)
>>>
>>> # iterate until termination
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.fcts.elli(x) for x in X])
... es.disp() # by default sparse, see option verb_disp
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 2.093015112685775e+04 1.0e+00 9.27e-01 9e-01 9e-01 0:0.0
2 16 4.964814235917688e+04 1.1e+00 9.54e-01 9e-01 1e+00 0:0.0
3 24 2.876682459926845e+05 1.2e+00 1.02e+00 9e-01 1e+00 0:0.0
100 800 6.809045875281943e-01 1.3e+02 1.41e-02 1e-04 1e-02 0:0.2
200 1600 2.473662150861846e-10 8.0e+02 3.08e-05 1e-08 8e-06 0:0.5
233 1864 2.766344961865341e-14 8.6e+02 7.99e-07 8e-11 7e-08 0:0.6
>>>
>>> cma.pprint(es.result())
(Solution([ -1.98546755e-09, -1.10214235e-09, 6.43822409e-11,
-1.68621326e-11]),
4.5119610261406537e-16,
1666,
1672,
209,
array([ -9.13545269e-09, -1.45520541e-09, -6.47755631e-11,
-1.00643523e-11]),
array([ 3.20258681e-08, 3.15614974e-09, 2.75282215e-10,
3.27482983e-11]))
>>>
>>> # help(es.result) shows
result(self) method of cma.CMAEvolutionStrategy instance
return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``
Using the multiprocessing module, we can evaluate the function in parallel with a simple
modification of the example ::
import multiprocessing
# prepare es = ...
pool = multiprocessing.Pool(es.popsize)
while not es.stop():
X = es.ask()
es.tell(X, pool.map_async(cma.felli, X).get()) # use chunksize parameter as popsize/len(pool)?
Example with a data logger, lower bounds (at zero) and handling infeasible solutions:
>>> import cma
>>> import numpy as np
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5, {'bounds': [0, np.inf]})
>>> logger = cma.CMADataLogger().register(es)
>>> while not es.stop():
... fit, X = [], []
... while len(X) < es.popsize:
... curr_fit = np.NaN
... while curr_fit is np.NaN:
... x = es.ask(1)[0]
... curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
... X.append(x)
... fit.append(curr_fit)
... es.tell(X, fit)
... logger.add()
... es.disp()
<output omitted>
>>>
>>> assert es.result()[1] < 1e-9
>>> assert es.result()[2] < 9000 # by internal termination
>>> logger.plot() # plot data
>>> cma.show()
>>> print(' *** if execution stalls close the figure window to continue (and check out ipython --pylab) ***')
Example implementing restarts with increasing popsize (IPOP), output is not displayed:
>>> import cma, numpy as np
>>>
>>> # restart with increasing population size (IPOP)
>>> bestever = cma.BestSolution()
>>> for lam in 10 * 2**np.arange(7): # 10, 20, 40, 80, ..., 10 * 2**6
... es = cma.CMAEvolutionStrategy('6 - 8 * np.random.rand(9)', # 9-D
... 5, # initial std sigma0
... {'popsize': lam,
... 'verb_append': bestever.evalsall}) # pass options
... logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
... while not es.stop():
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rastrigin(x) for x in X] # evaluate each solution
... es.tell(X, fit) # besides for termination only the ranking in fit is used
...
... # display some output
... logger.add() # add a "data point" to the log, writing in files
... es.disp() # uses option verb_disp with default 100
...
... print('termination:', es.stop())
... cma.pprint(es.best.__dict__)
...
... bestever.update(es.best)
...
... # show a plot
... logger.plot();
... if bestever.f < 1e-8: # global optimum was hit
... break
<output omitted>
>>> assert es.result()[1] < 1e-8
On the Rastrigin function, usually after five restarts the global optimum
is located.
The final example shows how to resume:
>>> import cma, pickle
>>>
>>> es = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
... 0.5) # initial std sigma0
>>> logger = cma.CMADataLogger().register(es)
>>> es.optimize(cma.fcts.rosen, logger, iterations=100)
>>> logger.plot()
>>> pickle.dump(es, open('saved-cma-object.pkl', 'wb'))
>>> print('saved')
>>> del es, logger # let's start fresh
>>>
>>> es = pickle.load(open('saved-cma-object.pkl', 'rb'))
>>> print('resumed')
>>> logger = cma.CMADataLogger(es.opts['verb_filenameprefix'] # use same name
... ).register(es, True) # True: append to old log data
>>> es.optimize(cma.fcts.rosen, logger, verb_disp=200)
>>> assert es.result()[2] < 15000
>>> cma.pprint(es.result())
>>> logger.plot()
Missing Features
================
Option ``randn`` to pass a random number generator.
:See: `fmin()`, `Options`, `plot()`, `ask()`, `tell()`, `ask_and_eval()`
"""
# __all__ = () # TODO this would be the interface
#____________________________________________________________
@property # read only attribute decorator for a method
def popsize(self):
"""number of samples by default returned by` ask()`
"""
return self.sp.popsize
# this is not compatible with python2.5:
# @popsize.setter
# def popsize(self, p):
# """popsize cannot be set (this might change in future)
# """
# raise _Error("popsize cannot be changed (this might change in future)")
#____________________________________________________________
#____________________________________________________________
def stop(self, check=True):
"""return a dictionary with the termination status.
With ``check==False``, the termination conditions are not checked and
the status might not reflect the current situation.
"""
if (check and self.countiter > 0 and self.opts['termination_callback'] and
self.opts['termination_callback'] != str(self.opts['termination_callback'])):
self.callbackstop = self.opts['termination_callback'](self)
return self.stopdict(self if check else None) # update the stopdict and return a Dict
#____________________________________________________________
#____________________________________________________________
def __init__(self, x0, sigma0, inopts = {}):
"""see class `CMAEvolutionStrategy`
"""
self.inputargs = dict(locals()) # for the record
del self.inputargs['self'] # otherwise the instance self has a cyclic reference
self.inopts = inopts
opts = Options(inopts).complement() # Options() == fmin([],[]) == defaultOptions()
if opts['noise_handling'] and eval(opts['noise_handling']):
raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
if opts['restarts'] and eval(opts['restarts']):
raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
if x0 == str(x0):
x0 = eval(x0)
self.mean = array(x0) # should not have column or row, is just 1-D
if self.mean.ndim == 2:
print('WARNING: input x0 should be a list or 1-D array, trying to flatten ' +
str(self.mean.shape) + '-array')
if self.mean.shape[0] == 1:
self.mean = self.mean[0]
elif self.mean.shape[1] == 1:
self.mean = array([x[0] for x in self.mean])
if self.mean.ndim != 1:
raise _Error('x0 must be 1-D array')
if len(self.mean) <= 1:
raise _Error('optimization in 1-D is not supported (code was never tested)')
self.N = self.mean.shape[0]
N = self.N
self.mean.resize(N) # 1-D array, not really necessary?!
self.x0 = self.mean
self.mean = self.x0.copy() # goes to initialize
self.sigma0 = sigma0
if isinstance(sigma0, str): # TODO: no real need here (do rather in fmin)
self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
raise _Error('input argument sigma0 must be (or evaluate to) a scalar')
self.sigma = self.sigma0 # goes to inialize
# extract/expand options
opts.evalall(locals()) # using only N
self.opts = opts
self.randn = opts['randn']
self.gp = GenoPheno(N, opts['scaling_of_variables'], opts['typical_x'],
opts['bounds'], opts['fixed_variables'], opts['transformation'])
self.boundPenalty = BoundPenalty(self.gp.bounds)
s = self.gp.geno(self.mean)
self.mean = self.gp.geno(self.mean, bounds=self.gp.bounds)
self.N = len(self.mean)
N = self.N
if (self.mean != s).any():
print('WARNING: initial solution is out of the domain boundaries:')
print(' x0 = ' + str(self.inputargs['x0']))
print(' ldom = ' + str(self.gp.bounds[0]))
print(' udom = ' + str(self.gp.bounds[1]))
self.fmean = np.NaN # TODO name should change? prints nan (OK with matlab&octave)
self.fmean_noise_free = 0. # for output only
self.sp = CMAParameters(N, opts)
self.sp0 = self.sp # looks useless, as it is not a copy
# initialization of state variables
self.countiter = 0
self.countevals = max((0, opts['verb_append'])) if type(opts['verb_append']) is not bool else 0
self.ps = np.zeros(N)
self.pc = np.zeros(N)
stds = np.ones(N)
self.sigma_vec = np.ones(N) if np.isfinite(self.sp.dampsvec) else 1
if np.all(self.opts['CMA_teststds']): # also 0 would not make sense
stds = self.opts['CMA_teststds']
if np.size(stds) != N:
raise _Error('CMA_teststds option must have dimension = ' + str(N))
if self.opts['CMA_diagonal']: # is True or > 0
# linear time and space complexity
self.B = array(1) # works fine with np.dot(self.B, anything) and self.B.T
self.C = stds**2 # TODO: remove this!?
self.dC = self.C
else:
self.B = np.eye(N) # identity(N), do not from matlib import *, as eye is a matrix there
# prevent equal eigenvals, a hack for np.linalg:
self.C = np.diag(stds**2 * exp(1e-6*(np.random.rand(N)-0.5)))
self.dC = np.diag(self.C)
self.Zneg = np.zeros((N, N))
self.D = stds
self.flgtelldone = True
self.itereigenupdated = self.countiter
self.noiseS = 0 # noise "signal"
self.hsiglist = []
if not opts['seed']:
np.random.seed()
six_decimals = (time.time() - 1e6 * (time.time() // 1e6))
opts['seed'] = 1e5 * np.random.rand() + six_decimals + 1e5 * (time.time() % 1)
opts['seed'] = int(opts['seed'])
np.random.seed(opts['seed'])
self.sent_solutions = SolutionDict()
self.best = BestSolution()
out = {} # TODO: obsolete, replaced by method results()?
out['best'] = self.best
# out['hsigcount'] = 0
out['termination'] = {}
self.out = out
self.const = BlancClass()
self.const.chiN = N**0.5*(1-1./(4.*N)+1./(21.*N**2)) # expectation of norm(randn(N,1))
# attribute for stopping criteria in function stop
self.stopdict = CMAStopDict()
self.callbackstop = 0
self.fit = BlancClass()
self.fit.fit = [] # not really necessary
self.fit.hist = [] # short history of best
self.fit.histbest = [] # long history of best
self.fit.histmedian = [] # long history of median
self.more_to_write = [] #[1, 1, 1, 1] # N*[1] # needed when writing takes place before setting
# say hello
if opts['verb_disp'] > 0:
sweighted = '_w' if self.sp.mu > 1 else ''
smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else ''
print('(%d' % (self.sp.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr + ')-CMA-ES' +
' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.mueff, int(100*self.sp.weights[0])) +
' in dimension %d (seed=%d, %s)' % (N, opts['seed'], time.asctime())) # + func.__name__
if opts['CMA_diagonal'] and self.sp.CMA_on:
s = ''
if opts['CMA_diagonal'] is not True:
s = ' for '
if opts['CMA_diagonal'] < np.inf:
s += str(int(opts['CMA_diagonal']))
else:
s += str(np.floor(opts['CMA_diagonal']))
s += ' iterations'
s += ' (1/ccov=' + str(round(1./(self.sp.c1+self.sp.cmu))) + ')'
print(' Covariance matrix is diagonal' + s)
#____________________________________________________________
#____________________________________________________________
def ask(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions, sampled from a multi-variate
normal distribution and transformed to f-representation
(phenotype) to be evaluated.
Arguments
---------
`number`
number of returned solutions, by default the
population size ``popsize`` (AKA ``lambda``).
`xmean`
distribution mean
`sigma`
multiplier for internal sample width (standard
deviation)
Return
------
A list of N-dimensional candidate solutions to be evaluated
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3)
>>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution
... es.tell(X, fit) # feed values
:See: `ask_and_eval`, `ask_geno`, `tell`
"""
pop_geno = self.ask_geno(number, xmean, sigma_fac)
# N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
# new data: 11.5s vs 9.5s == 20%
# TODO: check here, whether this is necessary?
# return [self.gp.pheno(x, copy=False, bounds=self.gp.bounds) for x in pop] # probably fine
# return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
# pop_pheno = [Solution(self.gp.pheno(x, copy=False), copy=False).repair(self.gp.bounds) for x in pop_geno]
pop_pheno = [self.gp.pheno(x, copy=True, bounds=self.gp.bounds) for x in pop_geno]
if not self.gp.isidentity or use_sent_solutions: # costs 25% in CPU performance with N,lambda=20,200
# archive returned solutions, first clean up archive
if self.countiter % 30/self.popsize**0.5 < 1:
self.sent_solutions.truncate(0, self.countiter - 1 - 3 * self.N/self.popsize**0.5)
# insert solutions
for i in xrange(len(pop_geno)):
self.sent_solutions[pop_pheno[i]] = {'geno': pop_geno[i],
'pheno': pop_pheno[i],
'iteration': self.countiter}
return pop_pheno
#____________________________________________________________
#____________________________________________________________
def ask_geno(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions in genotyp, sampled from a
multi-variate normal distribution.
Arguments are
`number`
number of returned solutions, by default the
population size `popsize` (AKA lambda).
`xmean`
distribution mean
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`ask_geno` returns a list of N-dimensional candidate solutions
in genotyp representation and is called by `ask`.
:See: `ask`, `ask_and_eval`
"""
if number is None or number < 1:
number = self.sp.popsize
if xmean is None:
xmean = self.mean
if self.countiter == 0:
self.tic = time.clock() # backward compatible
self.elapsed_time = ElapsedTime()
if self.opts['CMA_AII']:
if self.countiter == 0:
self.aii = AII(self.x0, self.sigma0)
self.flgtelldone = False
pop = self.aii.ask(number)
return pop
sigma = sigma_fac * self.sigma
# update parameters for sampling the distribution
# fac 0 1 10
# 150-D cigar:
# 50749 50464 50787
# 200-D elli: == 6.9
# 99900 101160
# 100995 103275 == 2% loss
# 100-D elli: == 6.9
# 363052 369325 < 2% loss
# 365075 365755
# update distribution
if self.sp.CMA_on and (
(self.opts['updatecovwait'] is None and
self.countiter >=
self.itereigenupdated + 1./(self.sp.c1+self.sp.cmu)/self.N/10
) or
(self.opts['updatecovwait'] is not None and
self.countiter > self.itereigenupdated + self.opts['updatecovwait']
)):
self.updateBD()
# sample distribution
if self.flgtelldone: # could be done in tell()!?
self.flgtelldone = False
self.ary = []
# each row is a solution
arz = self.randn((number, self.N))
if 11 < 3: # mutate along the principal axes only
perm = np.random.permutation(self.N) # indices for mutated principal component
for i in xrange(min((len(arz), self.N))):
# perm = np.random.permutation(self.N) # random principal component, should be much worse
l = sum(arz[i]**2)**0.5
arz[i] *= 0
if 11 < 3: # mirrored sampling
arz[i][perm[int(i/2)]] = l * (2 * (i % 2) - 1)
else:
arz[i][perm[i % self.N]] = l * np.sign(np.random.rand(1) - 0.5)
if number == self.sp.popsize:
self.arz = arz # is never used
else:
pass
if 11 < 3: # normalize the length to chiN
for i in xrange(len(arz)):
# arz[i] *= exp(self.randn(1)[0] / 8)
ss = sum(arz[i]**2)**0.5
arz[i] *= self.const.chiN / ss
# or to average
# arz *= 1 * self.const.chiN / np.mean([sum(z**2)**0.5 for z in arz])
# fac = np.mean(sum(arz**2, 1)**0.5)
# print fac
# arz *= self.const.chiN / fac
self.ary = self.sigma_vec * np.dot(self.B, (self.D * arz).T).T
pop = xmean + sigma * self.ary
self.evaluations_per_f_value = 1
return pop
def get_mirror(self, x):
"""return ``pheno(self.mean - (geno(x) - self.mean))``.
TODO: this implementation is yet experimental.
Selectively mirrored sampling improves to a moderate extend but
overadditively with active CMA for quite understandable reasons.
Optimal number of mirrors are suprisingly small: 1,2,3 for maxlam=7,13,20
however note that 3,6,10 are the respective maximal possible mirrors that
must be clearly suboptimal.
"""
try:
# dx = x.geno - self.mean, repair or boundary handling is not taken into account
dx = self.sent_solutions[x]['geno'] - self.mean
except:
print('WARNING: use of geno is depreciated')
dx = self.gp.geno(x, copy=True) - self.mean
dx *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobisNorm(dx)
x = self.mean - dx
y = self.gp.pheno(x, bounds=self.gp.bounds)
if not self.gp.isidentity or use_sent_solutions: # costs 25% in CPU performance with N,lambda=20,200
self.sent_solutions[y] = {'geno': x,
'pheno': y,
'iteration': self.countiter}
return y
def mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1-i]:
f_values[idx[i]] += m
else:
f_values[-1-i] += m
return f_values
def mirror_idx_cov(self, f_values, idx1): # will most likely be removed
"""obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
"""
idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
f = []
for i in xrange(len(idx1)):
f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
# idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
return idx2[np.argsort(f)][-1::-1]
#____________________________________________________________
#____________________________________________________________
#
def ask_and_eval(self, func, args=(), number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median):
"""samples `number` solutions and evaluates them on `func`, where
each solution `s` is resampled until ``func(s) not in (numpy.NaN, None)``.
Arguments
---------
`func`
objective function
`args`
additional parameters for `func`
`number`
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`
number of evaluations for each sampled solution
`aggregation`
function that aggregates `evaluations` values to
as single value.
Return
------
``(X, fit)``, where
X -- list of solutions
fit -- list of respective function values
Details
-------
When ``func(x)`` returns `NaN` or `None` a new solution is sampled until
``func(x) not in (numpy.NaN, None)``. The argument to `func` can be
freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not sampled
independently but as mirrors of other bad solutions. This is a simple
derandomization that can save 10-30% of the evaluations in particular
with small populations, for example on the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8*[10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0)
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration
>>> print('terminated on ' + str(es.stop()))
<output omitted>
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes
::
while not es.stop():
es.tell(*es.ask_and_eval(cma.fcts.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = number
selective_mirroring = True
nmirrors = self.sp.lam_mirr
if popsize != self.sp.popsize:
nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
# TODO: now selective mirroring might be impaired
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
self.mirrors_rejected_idx = [] # might never be used
if xmean is None:
xmean = self.mean
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize)
X = []
for k in xrange(int(popsize)):
nreject = -1
f = np.NaN
while f in (np.NaN, None): # rejection sampling
nreject += 1
if k < popsize - nmirrors or nreject:
if nreject:
x = self.ask(1, xmean, sigma_fac)[0]
else:
x = X_first.pop(0)
else: # mirrored sample
if k == popsize - nmirrors and selective_mirroring:
self.mirrors_idx = np.argsort(fit)[-1:-1-nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
if nreject == 1 and k >= popsize - nmirrors:
self.mirrors_rejected_idx.append(k)
# contraints handling test hardwired ccccccccccc
if 11 < 3 and self.opts['vv'] and nreject < 2: # trying out negative C-update as constraints handling
if not hasattr(self, 'constraints_paths'):
k = 1
self.constraints_paths = [np.zeros(self.N) for _i in xrange(k)]
Izero = np.zeros([self.N, self.N])
for i in xrange(self.N):
if x[i] < 0:
Izero[i][i] = 1
self.C -= self.opts['vv'] * Izero
Izero[i][i] = 0
if 1 < 3 and sum([ (9 + i + 1) * x[i] for i in xrange(self.N)]) > 50e3:
self.constraints_paths[0] = 0.9 * self.constraints_paths[0] + 0.1 * (x - self.mean) / self.sigma
self.C -= (self.opts['vv'] / self.N) * np.outer(self.constraints_paths[0], self.constraints_paths[0])
f = func(x, *args)
if f not in (np.NaN, None) and evaluations > 1:
f = aggregation([f] + [func(x, *args) for _i in xrange(int(evaluations-1))])
if nreject + 1 % 1000 == 0:
print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(nreject, self.countiter))
fit.append(f)
X.append(x)
self.evaluations_per_f_value = int(evaluations)
return X, fit
def ask_and_eval3(self, func, args=(), number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median, processes=1):
"""samples `number` solutions and evaluates them on `func`, where
each solution `s` is resampled until ``func(s) not in (numpy.NaN, None)``.
Arguments
---------
`func`
objective function
`args`
additional parameters for `func`
`number`
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`
number of evaluations for each sampled solution
`aggregation`
function that aggregates `evaluations` values to
as single value.
Return
------
``(X, fit)``, where
X -- list of solutions
fit -- list of respective function values
Details
-------
When ``func(x)`` returns `NaN` or `None` a new solution is sampled until
``func(x) not in (numpy.NaN, None)``. The argument to `func` can be
freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not sampled
independently but as mirrors of other bad solutions. This is a simple
derandomization that can save 10-30% of the evaluations in particular
with small populations, for example on the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8*[10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0)
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration
>>> print('terminated on ' + str(es.stop()))
<output omitted>
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes
::
while not es.stop():
es.tell(*es.ask_and_eval(cma.fcts.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = number
selective_mirroring = True
nmirrors = self.sp.lam_mirr
if popsize != self.sp.popsize:
nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
# TODO: now selective mirroring might be impaired
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
self.mirrors_rejected_idx = [] # might never be used
if xmean is None:
xmean = self.mean
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize)
X = []
for k in xrange(int(popsize)):
nreject = -1
nreject += 1
if k < popsize - nmirrors or nreject:
if nreject:
x = self.ask(1, xmean, sigma_fac)[0]
else:
x = X_first.pop(0)
else: # mirrored sample
if k == popsize - nmirrors and selective_mirroring:
self.mirrors_idx = np.argsort(fit)[-1:-1-nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
if nreject == 1 and k >= popsize - nmirrors:
self.mirrors_rejected_idx.append(k)
# contraints handling test hardwired ccccccccccc
if 11 < 3 and self.opts['vv'] and nreject < 2: # trying out negative C-update as constraints handling
if not hasattr(self, 'constraints_paths'):
k = 1
self.constraints_paths = [np.zeros(self.N) for _i in xrange(k)]
Izero = np.zeros([self.N, self.N])
for i in xrange(self.N):
print("Other stuff")
if x[i] < 0:
Izero[i][i] = 1
self.C -= self.opts['vv'] * Izero
Izero[i][i] = 0
if 1 < 3 and sum([ (9 + i + 1) * x[i] for i in xrange(self.N)]) > 50e3:
self.constraints_paths[0] = 0.9 * self.constraints_paths[0] + 0.1 * (x - self.mean) / self.sigma
self.C -= (self.opts['vv'] / self.N) * np.outer(self.constraints_paths[0], self.constraints_paths[0])
X.append(x)
pool = Pool(processes)
# print('********* about to map CMA')
fit = pool.map(func, X)
# print('********* done mapping CMA, got %d evaluations out of %d' % (len(fit), popsize))
# print(fit)
X_tmp=[]
attempts=0
while (len([i for i in range(len(fit)) if fit[i] in (np.NaN, None) or math.isnan(fit[i])]) > 0) and attempts < 100: # some evaluations are wonky
attempts=attempts+1
indecies = [i for i in range(len(fit)) if fit[i] in (np.NaN, None) or math.isnan(fit[i])]
for x in indecies: # generate completly new samples
X_tmp.append(self.ask(1, xmean, sigma_fac)[0])
new_fit = pool.map(func, X_tmp)
# append good evaluations from old stuff and new evaluations
X = [X[i] for i in [n for n in range(len(X)) if fit[n] not in (np.NaN, None) and not math.isnan(fit[n])]] + X_tmp
fit = [fit[i] for i in [n for n in range(len(X)) if fit[n] not in (np.NaN, None) and not math.isnan(fit[n])]] + new_fit
X_tmp=[]
if attempts > 99:
print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(attempts, self.countiter))
"""
# If bad things happen it is not supported for the moment
if f not in (np.NaN, None) and evaluations > 1:
f = aggregation([f] + [func(x, *args) for _i in xrange(int(evaluations-1))])
if nreject + 1 % 1000 == 0:
print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(nreject, self.countiter))
"""
self.evaluations_per_f_value = int(evaluations)
return X, fit
#____________________________________________________________
#____________________________________________________________
#
def ask_and_eval2(self, func, args=(), number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median, threads=1):
"""samples `number` solutions and evaluates them on `func`, where
each solution `s` is resampled until ``func(s) not in (numpy.NaN, None)``.
Arguments
---------
`func`
objective function
`args`
additional parameters for `func`
`number`
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`
number of evaluations for each sampled solution
`aggregation`
function that aggregates `evaluations` values to
as single value.
Return
------
``(X, fit)``, where
X -- list of solutions
fit -- list of respective function values
Details
-------
When ``func(x)`` returns `NaN` or `None` a new solution is sampled until
``func(x) not in (numpy.NaN, None)``. The argument to `func` can be
freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not sampled
independently but as mirrors of other bad solutions. This is a simple
derandomization that can save 10-30% of the evaluations in particular
with small populations, for example on the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8*[10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0)
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration
>>> print('terminated on ' + str(es.stop()))
<output omitted>
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes
::
while not es.stop():
es.tell(*es.ask_and_eval(cma.fcts.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = number
selective_mirroring = True
nmirrors = self.sp.lam_mirr
if popsize != self.sp.popsize:
nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
# TODO: now selective mirroring might be impaired
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
self.mirrors_rejected_idx = [] # might never be used
if xmean is None:
xmean = self.mean
# do the work
fit = [] # or np.NaN * np.empty(number)
X = []
popsize = int(popsize)
pool = Pool(1)
print('********* about to map CMA')
X, fit = pool.map(self._eval, range(int(popsize)), [nmirrors]*popsize, [evaluations]*popsize, [args]*popsize)
"""for k in range(int(popsize)):
x, f = self._eval(func, k, nmirrors, evaluations, args)
fit.append(f)
X.append(x)"""
self.evaluations_per_f_value = int(evaluations)
return X, fit
def _eval(self, func, k, nmirrors, evaluations, args=()):
print("made it inside _eval")
popsize = self.sp.popsize
X_first = self.ask(popsize)
nreject = -1
f = np.NaN
while f in (np.NaN, None): # rejection sampling
nreject += 1
if k < popsize - nmirrors or nreject:
if nreject:
x = self.ask(1, xmean, sigma_fac)[0]
else:
x = X_first.pop(0)
else: # mirrored sample
if k == popsize - nmirrors and selective_mirroring:
self.mirrors_idx = np.argsort(fit)[-1:-1-nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
if nreject == 1 and k >= popsize - nmirrors:
self.mirrors_rejected_idx.append(k)
# contraints handling test hardwired ccccccccccc
if 11 < 3 and self.opts['vv'] and nreject < 2: # trying out negative C-update as constraints handling
if not hasattr(self, 'constraints_paths'):
k = 1
self.constraints_paths = [np.zeros(self.N) for _i in xrange(k)]
Izero = np.zeros([self.N, self.N])
for i in xrange(self.N):
if x[i] < 0:
Izero[i][i] = 1
self.C -= self.opts['vv'] * Izero
Izero[i][i] = 0
if 1 < 3 and sum([ (9 + i + 1) * x[i] for i in xrange(self.N)]) > 50e3:
self.constraints_paths[0] = 0.9 * self.constraints_paths[0] + 0.1 * (x - self.mean) / self.sigma
self.C -= (self.opts['vv'] / self.N) * np.outer(self.constraints_paths[0], self.constraints_paths[0])
print("running a function eval")
f = func(x, *args)
if f not in (np.NaN, None) and evaluations > 1:
f = aggregation([f] + [func(x, *args) for _i in xrange(int(evaluations-1))])
if nreject + 1 % 1000 == 0:
print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(nreject, self.countiter))
return x, f
#____________________________________________________________
def tell(self, solutions, function_values, check_points=None, copy=False):
"""pass objective function values to prepare for next
iteration. This core procedure of the CMA-ES algorithm updates
all state variables, in particular the two evolution paths, the
distribution mean, the covariance matrix and a step-size.
Arguments
---------
`solutions`
list or array of candidate solution points (of
type `numpy.ndarray`), most presumably before
delivered by method `ask()` or `ask_and_eval()`.
`function_values`
list or array of objective function values
corresponding to the respective points. Beside for termination
decisions, only the ranking of values in `function_values`
is used.
`check_points`
If ``check_points is None``, only solutions that are not generated
by `ask()` are possibly clipped (recommended). ``False`` does not clip
any solution (not recommended).
If ``True``, clips solutions that realize long steps (i.e. also
those that are unlikely to be generated with `ask()`). `check_points`
can be a list of indices to be checked in solutions.
`copy`
``solutions`` can be modified in this routine, if ``copy is False``
Details
-------
`tell()` updates the parameters of the multivariate
normal search distribution, namely covariance matrix and
step-size and updates also the attributes `countiter` and
`countevals`. To check the points for consistency is quadratic
in the dimension (like sampling points).
Bugs
----
The effect of changing the solutions delivered by `ask()` depends on whether
boundary handling is applied. With boundary handling, modifications are
disregarded. This is necessary to apply the default boundary handling that
uses unrepaired solutions but might change in future.
Example
-------
::
import cma
func = cma.fcts.elli # choose objective function
es = cma.CMAEvolutionStrategy(cma.np.random.rand(10), 1)
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
es.result() # where the result can be found
:See: class `CMAEvolutionStrategy`, `ask()`, `ask_and_eval()`, `fmin()`
"""
#____________________________________________________________
# TODO: consider an input argument that flags injected trust-worthy solutions (which means
# that they can be treated "absolut" rather than "relative")
if self.flgtelldone:
raise _Error('tell should only be called once per iteration')
lam = len(solutions)
if lam != array(function_values).shape[0]:
raise _Error('for each candidate solution '
+ 'a function value must be provided')
if lam + self.sp.lam_mirr < 3:
raise _Error('population size ' + str(lam) + ' is too small when option CMA_mirrors * popsize < 0.5')
if not np.isscalar(function_values[0]):
if np.isscalar(function_values[0][0]):
if self.countiter <= 1:
print('WARNING: function values are not a list of scalars (further warnings are suppressed)')
function_values = [val[0] for val in function_values]
else:
raise _Error('objective function values must be a list of scalars')
### prepare
N = self.N
sp = self.sp
if 11 < 3 and lam != sp.popsize: # turned off, because mu should stay constant, still not desastrous
print('WARNING: population size has changed, recomputing parameters')
self.sp.set(self.opts, lam) # not really tested
if lam < sp.mu: # rather decrease cmean instead of having mu > lambda//2
raise _Error('not enough solutions passed to function tell (mu>lambda)')
self.countiter += 1 # >= 1 now
self.countevals += sp.popsize * self.evaluations_per_f_value
self.best.update(solutions, self.sent_solutions, function_values, self.countevals)
flgseparable = self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']
if not flgseparable and len(self.C.shape) == 1: # C was diagonal ie 1-D
# enter non-separable phase (no easy return from here)
self.B = np.eye(N) # identity(N)
self.C = np.diag(self.C)
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:,idx]
self.Zneg = np.zeros((N, N))
### manage fitness
fit = self.fit # make short cut
# CPU for N,lam=20,200: this takes 10s vs 7s
fit.bndpen = self.boundPenalty.update(function_values, self)(solutions, self.sent_solutions, self.gp)
# for testing:
# fit.bndpen = self.boundPenalty.update(function_values, self)([s.unrepaired for s in solutions])
fit.idx = np.argsort(array(fit.bndpen) + array(function_values))
fit.fit = array(function_values, copy=False)[fit.idx]
# update output data TODO: this is obsolete!? However: need communicate current best x-value?
# old: out['recent_x'] = self.gp.pheno(pop[0])
self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify
self.out['recent_f'] = fit.fit[0]
# fitness histories
fit.hist.insert(0, fit.fit[0])
# if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case
if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen.
fit.histbest.insert(0, fit.fit[0])
fit.histmedian.insert(0, np.median(fit.fit) if len(fit.fit) < 21
else fit.fit[self.popsize // 2])
if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize:
fit.histbest.pop()
fit.histmedian.pop()
if len(fit.hist) > 10 + 30*N/sp.popsize:
fit.hist.pop()
if self.opts['CMA_AII']:
self.aii.tell(solutions, function_values)
self.flgtelldone = True
# for output:
self.mean = self.aii.mean
self.dC = self.aii.sigmai**2
self.sigma = self.aii.sigma
self.D = 1e-11 + (self.aii.r**2)**0.5
self.more_to_write += [self.aii.sigma_r]
return
# TODO: clean up inconsistency when an unrepaired solution is available and used
pop = [] # create pop from input argument solutions
for s in solutions: # use phenotype before Solution.repair()
if use_sent_solutions:
x = self.sent_solutions.pop(s, None) # 12.7s vs 11.3s with N,lambda=20,200
if x is not None:
pop.append(x['geno'])
# TODO: keep additional infos or don't pop s from sent_solutions in the first place
else:
# print 'WARNING: solution not found in ``self.sent_solutions`` (is expected for injected solutions)'
pop.append(self.gp.geno(s, copy=copy)) # cannot recover the original genotype with boundary handling
if check_points in (None, True, 1):
self.repair_genotype(pop[-1]) # necessary if pop[-1] was changed or injected by the user.
else: # TODO: to be removed?
# print 'WARNING: ``geno`` mapping depreciated'
pop.append(self.gp.geno(s, copy=copy))
if check_points in (None, True, 1):
self.repair_genotype(pop[-1]) # necessary or not?
# print 'repaired'
mold = self.mean
sigma_fac = 1
# check and normalize each x - m
# check_points is a flag (None is default: check non-known solutions) or an index list
# should also a number possible (first check_points points)?
if check_points not in (None, False, 0, [], ()): # useful in case of injected solutions and/or adaptive encoding, however is automatic with use_sent_solutions
try:
if len(check_points):
idx = check_points
except:
idx = xrange(sp.popsize)
for k in idx:
self.repair_genotype(pop[k])
# sort pop
if type(pop) is not array: # only arrays can be multiple indexed
pop = array(pop, copy=False)
pop = pop[fit.idx]
if self.opts['CMA_elitist'] and self.best.f < fit.fit[0]:
if self.best.x_geno is not None:
xp = [self.best.x_geno]
# xp = [self.best.xdict['geno']]
# xp = [self.gp.geno(self.best.x[:])] # TODO: remove
# print self.mahalanobisNorm(xp[0]-self.mean)
self.clip_or_fit_solutions(xp, [0])
pop = array([xp[0]] + list(pop))
else:
print('genotype for elitist not found')
# compute new mean
self.mean = mold + self.sp.cmean * \
(sum(sp.weights * pop[0:sp.mu].T, 1) - mold)
# check Delta m (this is not default, but could become at some point)
# CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly.
# simple test case injecting self.mean:
# self.mean = 1e-4 * self.sigma * np.random.randn(N)
if 11 < 3 and self.opts['vv'] and check_points: # TODO: check_points might be an index-list
cmean = self.sp.cmean / min(1, (sqrt(self.opts['vv']*N)+2) / ( # abuse of cmean
(sqrt(self.sp.mueff) / self.sp.cmean) *
self.mahalanobisNorm(self.mean - mold)))
else:
cmean = self.sp.cmean
if 11 < 3: # plot length of mean - mold
self.more_to_write += [sqrt(sp.mueff) *
sum(((1./self.D) * dot(self.B.T, self.mean - mold))**2)**0.5 /
self.sigma / sqrt(N) / cmean]
# get learning rate constants
cc, c1, cmu = sp.cc, sp.c1, sp.cmu
if flgseparable:
cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep
# now the real work can start
# evolution paths
self.ps = (1-sp.cs) * self.ps + \
(sqrt(sp.cs*(2-sp.cs)*sp.mueff) / self.sigma / cmean) * \
dot(self.B, (1./self.D) * dot(self.B.T, (self.mean - mold) / self.sigma_vec))
# "hsig", correction with self.countiter seems not necessary, also pc starts with zero
hsig = sum(self.ps**2) / (1-(1-sp.cs)**(2*self.countiter)) / self.N < 2 + 4./(N+1)
if 11 < 3:
# hsig = 1
# sp.cc = 4 / (N + 4)
# sp.cs = 4 / (N + 4)
# sp.cc = 1
# sp.damps = 2 #
# sp.CMA_on = False
# c1 = 0 # 2 / ((N + 1.3)**2 + 0 * sp.mu) # 1 / N**2
# cmu = min([1 - c1, cmu])
if self.countiter == 1:
print('parameters modified')
# hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1)
# adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small
# hsig leads to premature convergence of C otherwise
#hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future
c1a = c1 - (1-hsig**2) * c1 * cc * (2-cc) # adjust for variance loss
if 11 < 3: # diagnostic data
self.out['hsigcount'] += 1 - hsig
if not hsig:
self.hsiglist.append(self.countiter)
if 11 < 3: # diagnostic message
if not hsig:
print(str(self.countiter) + ': hsig-stall')
if 11 < 3: # for testing purpose
hsig = 1 # TODO:
# put correction term, but how?
if self.countiter == 1:
print('hsig=1')
self.pc = (1-cc) * self.pc + \
hsig * (sqrt(cc*(2-cc)*sp.mueff) / self.sigma / cmean) * \
(self.mean - mold) / self.sigma_vec
# covariance matrix adaptation/udpate
if sp.CMA_on:
# assert sp.c1 + sp.cmu < sp.mueff / N # ??
assert c1 + cmu <= 1
# default full matrix case
if not flgseparable:
Z = (pop[0:sp.mu] - mold) / (self.sigma * self.sigma_vec)
Z = dot((cmu * sp.weights) * Z.T, Z) # learning rate integrated
if self.sp.neg.cmuexp:
tmp = (pop[-sp.neg.mu:] - mold) / (self.sigma * self.sigma_vec)
self.Zneg *= 1 - self.sp.neg.cmuexp # for some reason necessary?
self.Zneg += dot(sp.neg.weights * tmp.T, tmp) - self.C
# self.update_exponential(dot(sp.neg.weights * tmp.T, tmp) - 1 * self.C, -1*self.sp.neg.cmuexp)
if 11 < 3: # ?3 to 5 times slower??
Z = np.zeros((N,N))
for k in xrange(sp.mu):
z = (pop[k]-mold)
Z += np.outer((cmu * sp.weights[k] / (self.sigma * self.sigma_vec)**2) * z, z)
self.C *= 1 - c1a - cmu
self.C += np.outer(c1 * self.pc, self.pc) + Z
self.dC = np.diag(self.C) # for output and termination checking
else: # separable/diagonal linear case
assert(c1+cmu <= 1)
Z = np.zeros(N)
for k in xrange(sp.mu):
z = (pop[k]-mold) / (self.sigma * self.sigma_vec) # TODO see above
Z += sp.weights[k] * z * z # is 1-D
self.C = (1-c1a-cmu) * self.C + c1 * self.pc * self.pc + cmu * Z
# TODO: self.C *= exp(cmuneg * (N - dot(sp.neg.weights, **2)
self.dC = self.C
self.D = sqrt(self.C) # C is a 1-D array
self.itereigenupdated = self.countiter
# idx = self.mirror_idx_cov() # take half of mirrored vectors for negative update
# qqqqqqqqqqq
if 1 < 3 and np.isfinite(sp.dampsvec):
if self.countiter == 1:
print("WARNING: CMA_dampsvec option is experimental")
sp.dampsvec *= np.exp(sp.dampsvec_fading/self.N)
# TODO: rank-lambda update: *= (1 + sum(z[z>1]**2-1) * exp(sum(z[z<1]**2-1))
self.sigma_vec *= np.exp((sp.cs/sp.dampsvec/2) * (self.ps**2 - 1))
# self.sigma_vec *= np.exp((sp.cs/sp.dampsvec) * (abs(self.ps) - (2/np.pi)**0.5))
self.more_to_write += [exp(np.mean((self.ps**2 - 1)**2))]
# TODO: rank-mu update
# step-size adaptation, adapt sigma
if 1 < 3: #
self.sigma *= sigma_fac * \
np.exp((min((1, (sp.cs/sp.damps) *
(sqrt(sum(self.ps**2))/self.const.chiN - 1)))))
else:
self.sigma *= sigma_fac * \
np.exp((min((1000, (sp.cs/sp.damps/2) *
(sum(self.ps**2)/N - 1)))))
if 11 < 3:
# derandomized MSR = natural gradient descent using mean(z**2) instead of mu*mean(z)**2
lengths = array([sum(z**2)**0.5 for z in self.arz[fit.idx[:self.sp.mu]]])
# print lengths[0::int(self.sp.mu/5)]
self.sigma *= np.exp(self.sp.mueff**0.5 * dot(self.sp.weights, lengths / self.const.chiN - 1))**(2/(N+1))
if 11 < 3 and self.opts['vv']:
if self.countiter < 2:
print('constant sigma applied')
print(self.opts['vv']) # N=10,lam=10: 0.8 is optimal
self.sigma = self.opts['vv'] * self.sp.mueff * sum(self.mean**2)**0.5 / N
if self.sigma * min(self.dC)**0.5 < self.opts['minstd']:
self.sigma = self.opts['minstd'] / min(self.dC)**0.5
# g = self.countiter
# N = self.N
mindx = eval(self.opts['mindx']) if type(self.opts['mindx']) == type('') else self.opts['mindx']
if self.sigma * min(self.D) < mindx: # TODO: sigma_vec is missing here
self.sigma = mindx / min(self.D)
if self.sigma > 1e9 * self.sigma0:
alpha = self.sigma / max(self.D)
self.multiplyC(alpha)
self.sigma /= alpha**0.5
self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
# TODO increase sigma in case of a plateau?
# Uncertainty noise measurement is done on an upper level
# output, has moved up, e.g. as part of fmin, TODO to be removed
if 11 < 3 and self.opts['verb_log'] > 0 and (self.countiter < 4 or
self.countiter % self.opts['verb_log'] == 0):
# this assumes that two logger with the same name access the same data!
CMADataLogger(self.opts['verb_filenameprefix']).register(self, append=True).add()
# self.writeOutput(solutions[fit.idx[0]])
self.flgtelldone = True
# end tell()
def result(self):
"""return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``"""
# TODO: how about xcurrent?
return self.best.get() + (
self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)
def clip_or_fit_solutions(self, pop, idx):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
"""
for k in idx:
self.repair_genotype(pop[k])
def repair_genotype(self, x):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of x - self.mean being long is limited.
"""
mold = self.mean
if 1 < 3: # hard clip at upper_length
upper_length = self.N**0.5 + 2 * self.N / (self.N+2) # should become an Option, but how? e.g. [0, 2, 2]
fac = self.mahalanobisNorm(x - mold) / upper_length
if fac > 1:
x = (x - mold) / fac + mold
# print self.countiter, k, fac, self.mahalanobisNorm(pop[k] - mold)
# adapt also sigma: which are the trust-worthy/injected solutions?
elif 11 < 3:
return exp(np.tanh(((upper_length*fac)**2/self.N-1)/2) / 2)
else:
if 'checktail' not in self.__dict__: # hasattr(self, 'checktail')
raise NotImplementedError
# from check_tail_smooth import CheckTail # for the time being
# self.checktail = CheckTail()
# print('untested feature checktail is on')
fac = self.checktail.addchin(self.mahalanobisNorm(x - mold))
if fac < 1:
x = fac * (x - mold) + mold
return 1.0 # sigma_fac, not in use
#____________________________________________________________
#____________________________________________________________
#
def updateBD(self):
"""update internal variables for sampling the distribution with the
current covariance matrix C. This method is O(N^3), if C is not diagonal.
"""
# itereigenupdated is always up-to-date in the diagonal case
# just double check here
if self.itereigenupdated == self.countiter:
return
if self.sp.neg.cmuexp: # cave:
self.update_exponential(self.Zneg, -self.sp.neg.cmuexp)
# self.C += self.Zpos # pos update after Zneg would be the correct update, overall:
# self.C = self.Zpos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Zneg*Csi) * Cs
self.Zneg = np.zeros((self.N, self.N))
if self.sigma_vec is not 1 and not np.all(self.sigma_vec == 1):
self.C = dot(dot(np.diag(self.sigma_vec), self.C), np.diag(self.sigma_vec))
self.sigma_vec[:] = 1
if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C
if self.opts['CMA_const_trace'] == 2:
s = np.exp(np.mean(np.log(self.dC)))
else:
s = np.mean(self.dC)
self.C /= s
self.dC /= s
self.C = (self.C + self.C.T) / 2
# self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well
# self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed
if type(self.opts['CMA_eigenmethod']) == type(1):
print('WARNING: option CMA_eigenmethod should be a function, not an integer')
if self.opts['CMA_eigenmethod'] == -1:
# pygsl
# easy to install (well, in Windows install gsl binaries first,
# set system path to respective libgsl-0.dll (or cp the dll to
# python\DLLS ?), in unzipped pygsl edit
# gsl_dist/gsl_site_example.py into gsl_dist/gsl_site.py
# and run "python setup.py build" and "python setup.py install"
# in MINGW32)
if 1 < 3: # import pygsl on the fly
try:
import pygsl.eigen.eigenvectors # TODO efficient enough?
except ImportError:
print('WARNING: could not find pygsl.eigen module, either install pygsl \n' +
' or set option CMA_eigenmethod=1 (is much slower), option set to 1')
self.opts['CMA_eigenmethod'] = 0 # use 0 if 1 is too slow
self.D, self.B = pygsl.eigen.eigenvectors(self.C)
elif self.opts['CMA_eigenmethod'] == 0:
# TODO: thoroughly test np.linalg.eigh
# numpy.linalg.eig crashes in 200-D
# and EVecs with same EVals are not orthogonal
self.D, self.B = np.linalg.eigh(self.C) # self.B[i] is a row and not an eigenvector
else: # is overall two;ten times slower in 10;20-D
self.D, self.B = Misc.eig(self.C) # def eig, see below
else:
self.D, self.B = self.opts['CMA_eigenmethod'](self.C)
# assert(sum(self.D-DD) < 1e-6)
# assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)
# assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:,idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors
# assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))
# qqqqqqqqqq
if 11 < 3: # limit condition number to 1e13
climit = 1e13 # cave: conditioncov termination is 1e14
if self.D[-1] / self.D[0] > climit:
self.D += self.D[-1] / climit
for i in xrange(self.N):
self.C[i][i] += self.D[-1] / climit
if 11 < 3 and any(abs(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0)) > 1e-6):
print('B is not orthogonal')
print(self.D)
print(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0))
else:
# is O(N^3)
# assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)
pass
self.D **= 0.5
self.itereigenupdated = self.countiter
def multiplyC(self, alpha):
"""multiply C with a scalar and update all related internal variables (dC, D,...)"""
self.C *= alpha
if self.dC is not self.C:
self.dC *= alpha
self.D *= alpha**0.5
def update_exponential(self, Z, eta, BDpair=None):
"""exponential update of C that guarantees positive definiteness, that is,
instead of the assignment ``C = C + eta * Z``,
C gets C**.5 * exp(eta * C**-.5 * Z * C**-.5) * C**.5.
Parameter Z should have expectation zero, e.g. sum(w[i] * z[i] * z[i].T) - C
if E z z.T = C.
This function conducts two eigendecompositions, assuming that
B and D are not up to date, unless `BDpair` is given. Given BDpair,
B is the eigensystem and D is the vector of sqrt(eigenvalues), one
eigendecomposition is omitted.
Reference: Glasmachers et al 2010, Exponential Natural Evolution Strategies
"""
if eta == 0:
return
if BDpair:
B, D = BDpair
else:
D, B = self.opts['CMA_eigenmethod'](self.C)
D **= 0.5
Csi = dot(B, (B / D).T)
Cs = dot(B, (B * D).T)
self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)), self.opts['CMA_eigenmethod']), Cs))
#____________________________________________________________
#____________________________________________________________
#
def _updateCholesky(self, A, Ainv, p, alpha, beta):
"""not yet implemented"""
# BD is A, p is A*Normal(0,I) distributed
# input is assumed to be numpy arrays
# Ainv is needed to compute the evolution path
# this is a stump and is not tested
raise _Error("not yet implemented")
# prepare
alpha = float(alpha)
beta = float(beta)
y = np.dot(Ainv, p)
y_sum = sum(y**2)
# compute scalars
tmp = sqrt(1 + beta * y_sum / alpha)
fac = (sqrt(alpha) / sum(y**2)) * (tmp - 1)
facinv = (1. / (sqrt(alpha) * sum(y**2))) * (1 - 1. / tmp)
# update matrices
A *= sqrt(alpha)
A += np.outer(fac * p, y)
Ainv /= sqrt(alpha)
Ainv -= np.outer(facinv * y, np.dot(y.T, Ainv))
#____________________________________________________________
#____________________________________________________________
def feedForResume(self, X, function_values):
"""Given all "previous" candidate solutions and their respective
function values, the state of a `CMAEvolutionStrategy` object
can be reconstructed from this history. This is the purpose of
function `feedForResume`.
Arguments
---------
`X`
(all) solution points in chronological order, phenotypic
representation. The number of points must be a multiple
of popsize.
`function_values`
respective objective function values
Details
-------
`feedForResume` can be called repeatedly with only parts of
the history. The part must have the length of a multiple
of the population size.
`feedForResume` feeds the history in popsize-chunks into `tell`.
The state of the random number generator might not be
reconstructed, but this would be only relevant for the future.
Example
-------
::
import cma
# prepare
(x0, sigma0) = ... # initial values from previous trial
X = ... # list of generated solutions from a previous trial
f = ... # respective list of f-values
# resume
es = cma.CMAEvolutionStrategy(x0, sigma0)
es.feedForResume(X, f)
# continue with func as objective function
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
Credits to Dirk Bueche and Fabrice Marchal for the feeding idea.
:See: class `CMAEvolutionStrategy` for a simple dump/load to resume
"""
if self.countiter > 0:
print('WARNING: feed should generally be used with a new object instance')
if len(X) != len(function_values):
raise _Error('number of solutions ' + str(len(X)) +
' and number function values ' +
str(len(function_values))+' must not differ')
popsize = self.sp.popsize
if (len(X) % popsize) != 0:
raise _Error('number of solutions ' + str(len(X)) +
' must be a multiple of popsize (lambda) ' +
str(popsize))
for i in xrange(len(X) / popsize):
# feed in chunks of size popsize
self.ask() # a fake ask, mainly for a conditioned calling of updateBD
# and secondary to get possibly the same random state
self.tell(X[i*popsize:(i+1)*popsize], function_values[i*popsize:(i+1)*popsize])
#____________________________________________________________
#____________________________________________________________
def readProperties(self):
"""reads dynamic parameters from property file (not implemented)
"""
print('not yet implemented')
#____________________________________________________________
#____________________________________________________________
def mahalanobisNorm(self, dx):
"""
compute the Mahalanobis norm that is induced by the adapted covariance
matrix C times sigma**2.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobisNorm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where `N` is the dimension. In the example, `d` is the
Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1 * np.dot(self.B.T, dx))**2)) / self.sigma
#____________________________________________________________
#____________________________________________________________
#
def timesCroot(self, mat):
"""return C**0.5 times mat, where mat can be a vector or matrix.
Not functional, because _Croot=C**0.5 is never computed (should be in updateBD)
"""
print("WARNING: timesCroot is not yet tested")
if self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']:
res = (self._Croot * mat.T).T
else:
res = np.dot(self._Croot, mat)
return res
def divCroot(self, mat):
"""return C**-1/2 times mat, where mat can be a vector or matrix"""
print("WARNING: divCroot is not yet tested")
if self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']:
res = (self._Crootinv * mat.T).T
else:
res = np.dot(self._Crootinv, mat)
return res
#____________________________________________________________
#____________________________________________________________
def disp_annotation(self):
"""print annotation for `disp()`"""
print('Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec')
sys.stdout.flush()
#____________________________________________________________
#____________________________________________________________
def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?
"""prints some infos according to `disp_annotation()`, if
``iteration_counter % modulo == 0``
"""
if modulo is None:
modulo = self.opts['verb_disp']
# console display
if modulo:
if (self.countiter-1) % (10 * modulo) < 1:
self.disp_annotation()
if self.countiter > 0 and (self.stop() or self.countiter < 4
or self.countiter % modulo < 1):
if self.opts['verb_time']:
toc = self.elapsed_time()
stime = str(int(toc//60))+':'+str(round(toc%60,1))
else:
stime = ''
print(' '.join((repr(self.countiter).rjust(5),
repr(self.countevals).rjust(7),
'%.15e' % (min(self.fit.fit)),
'%4.1e' % (self.D.max()/self.D.min()),
'%6.2e' % self.sigma,
'%6.0e' % (self.sigma * sqrt(min(self.dC))),
'%6.0e' % (self.sigma * sqrt(max(self.dC))),
stime)))
# if self.countiter < 4:
sys.stdout.flush()
class Options(dict):
"""``Options()`` returns a dictionary with the available options and their
default values for function fmin and for class CMAEvolutionStrategy.
``Options(opts)`` returns the subset of recognized options in dict(opts).
``Options('pop')`` returns a subset of recognized options that contain
'pop' in there keyword name, value or description.
Option values can be "written" in a string and, when passed to fmin
or CMAEvolutionStrategy, are evaluated using "N" and "popsize" as
known values for dimension and population size (sample size, number
of new solutions per iteration). All default option values are such
a string.
Details
-------
All Options are originally defined via the input arguments of
`fmin()`.
Options starting with ``tol`` are termination "tolerances".
For `tolstagnation`, the median over the first and the second half
of at least `tolstagnation` iterations are compared for both, the
per-iteration best and per-iteration median function value.
Some options are, as mentioned (`restarts`,...), only used with `fmin`.
Example
-------
::
import cma
cma.Options('tol')
is a shortcut for cma.Options().match('tol') that returns all options
that contain 'tol' in their name or description.
:See: `fmin`(), `CMAEvolutionStrategy`, `CMAParameters`
"""
# @classmethod # self is the class, not the instance
# @property
# def default(self):
# """returns all options with defaults"""
# return fmin([],[])
@staticmethod
def defaults():
"""return a dictionary with default option values and description,
calls `fmin([], [])`"""
return fmin([], [])
@staticmethod
def versatileOptions():
"""return list of options that can be changed at any time (not only be
initialized), however the list might not be entirely up to date. The
string ' #v ' in the default value indicates a 'versatile' option
that can be changed any time.
"""
return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))
def __init__(self, s=None, unchecked=False):
"""return an `Options` instance, either with the default options,
if ``s is None``, or with all options whose name or description
contains `s`, if `s` is a string (case is disregarded),
or with entries from dictionary `s` as options, not complemented
with default options or settings
Returns: see above.
"""
# if not Options.defaults: # this is different from self.defaults!!!
# Options.defaults = fmin([],[])
if s is None:
super(Options, self).__init__(Options.defaults())
# self = Options.defaults()
elif type(s) is str:
super(Options, self).__init__(Options().match(s))
# we could return here
else:
super(Options, self).__init__(s)
if not unchecked:
for key in list(self.keys()):
if key not in Options.defaults():
print('Warning in cma.Options.__init__(): invalid key ``' + str(key) + '`` popped')
self.pop(key)
# self.evaluated = False # would become an option entry
def init(self, dict_or_str, val=None, warn=True):
"""initialize one or several options.
Arguments
---------
`dict_or_str`
a dictionary if ``val is None``, otherwise a key.
If `val` is provided `dict_or_str` must be a valid key.
`val`
value for key
Details
-------
Only known keys are accepted. Known keys are in `Options.defaults()`
"""
#dic = dict_or_key if val is None else {dict_or_key:val}
dic = dict_or_str
if val is not None:
dic = {dict_or_str:val}
for key, val in list(dic.items()):
if key not in Options.defaults():
# TODO: find a better solution?
if warn:
print('Warning in cma.Options.init(): key ' +
str(key) + ' ignored')
else:
self[key] = val
return self
def set(self, dic, val=None, warn=True):
"""set can assign versatile options from `Options.versatileOptions()`
with a new value, use `init()` for the others.
Arguments
---------
`dic`
either a dictionary or a key. In the latter
case, val must be provided
`val`
value for key
`warn`
bool, print a warning if the option cannot be changed
and is therefore omitted
This method will be most probably used with the ``opts`` attribute of
a `CMAEvolutionStrategy` instance.
"""
if val is not None: # dic is a key in this case
dic = {dic:val} # compose a dictionary
for key, val in list(dic.items()):
if key in Options.versatileOptions():
self[key] = val
elif warn:
print('Warning in cma.Options.set(): key ' + str(key) + ' ignored')
return self # to allow o = Options(o).set(new)
def complement(self):
"""add all missing options with their default values"""
for key in Options.defaults():
if key not in self:
self[key] = Options.defaults()[key]
return self
def settable(self):
"""return the subset of those options that are settable at any
time.
Settable options are in `versatileOptions()`, but the
list might be incomlete.
"""
return Options([i for i in list(self.items())
if i[0] in Options.versatileOptions()])
def __call__(self, key, default=None, loc=None):
"""evaluate and return the value of option `key` on the fly, or
returns those options whose name or description contains `key`,
case disregarded.
Details
-------
Keys that contain `filename` are not evaluated.
For ``loc==None``, `self` is used as environment
but this does not define `N`.
:See: `eval()`, `evalall()`
"""
try:
val = self[key]
except:
return self.match(key)
if loc is None:
loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing
try:
if type(val) is str:
val = val.split('#')[0].strip() # remove comments
if type(val) == type('') and key.find('filename') < 0 and key.find('mindx') < 0:
val = eval(val, globals(), loc)
# invoke default
# TODO: val in ... fails with array type, because it is applied element wise!
# elif val in (None,(),[],{}) and default is not None:
elif val is None and default is not None:
val = eval(str(default), globals(), loc)
except:
pass # slighly optimistic: the previous is bug-free
return val
def eval(self, key, default=None, loc=None):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need `N` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
self[key] = self(key, default, loc)
return self[key]
def evalall(self, loc=None):
"""Evaluates all option values in environment `loc`.
:See: `eval()`
"""
# TODO: this needs rather the parameter N instead of loc
if 'N' in list(loc.keys()): # TODO: __init__ of CMA can be simplified
popsize = self('popsize', Options.defaults()['popsize'], loc)
for k in list(self.keys()):
self.eval(k, Options.defaults()[k],
{'N':loc['N'], 'popsize':popsize})
return self
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.Options().match('verb')`` returns the verbosity options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return Options(res)
def pp(self):
pprint(self)
def printme(self, linebreak=80):
for i in sorted(Options.defaults().items()):
s = str(i[0]) + "='" + str(i[1]) + "'"
a = s.split(' ')
# print s in chunks
l = '' # start entire to the left
while a:
while a and len(l) + len(a[0]) < linebreak:
l += ' ' + a.pop(0)
print(l)
l = ' ' # tab for subsequent lines
#____________________________________________________________
#____________________________________________________________
class CMAParameters(object):
"""strategy parameters like population size and learning rates.
Note:
contrary to `Options`, `CMAParameters` is not (yet) part of the
"user-interface" and subject to future changes (it might become
a `collections.namedtuple`)
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1)
(6_w,12)-CMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=504519190) # the seed is "random" by default
>>>
>>> type(es.sp) # sp contains the strategy parameters
<class 'cma.CMAParameters'>
>>>
>>> es.sp.disp()
{'CMA_on': True,
'N': 20,
'c1': 0.004181139918745593,
'c1_sep': 0.034327992810300939,
'cc': 0.17176721127681213,
'cc_sep': 0.25259494835857677,
'cmean': 1.0,
'cmu': 0.0085149624979034746,
'cmu_sep': 0.057796356229390715,
'cs': 0.21434997799189287,
'damps': 1.2143499779918929,
'mu': 6,
'mu_f': 6.0,
'mueff': 3.7294589343030671,
'popsize': 12,
'rankmualpha': 0.3,
'weights': array([ 0.40240294, 0.25338908, 0.16622156, 0.10437523, 0.05640348,
0.01720771])}
>>>
>> es.sp == cma.CMAParameters(20, 12, cma.Options().evalall({'N': 20}))
True
:See: `Options`, `CMAEvolutionStrategy`
"""
def __init__(self, N, opts, ccovfac=1, verbose=True):
"""Compute strategy parameters, mainly depending on
dimension and population size, by calling `set`
"""
self.N = N
if ccovfac == 1:
ccovfac = opts['CMA_on'] # that's a hack
self.set(opts, ccovfac=ccovfac, verbose=verbose)
def set(self, opts, popsize=None, ccovfac=1, verbose=True):
"""Compute strategy parameters as a function
of dimension and population size """
alpha_cc = 1.0 # cc-correction for mueff, was zero before
def cone(df, mu, N, alphacov=2.0):
"""rank one update learning rate, ``df`` is disregarded and obsolete, reduce alphacov on noisy problems, say to 0.5"""
return alphacov / ((N + 1.3)**2 + mu)
def cmu(df, mu, alphamu=0.0, alphacov=2.0):
"""rank mu learning rate, disregarding the constrant cmu <= 1 - cone"""
c = alphacov * (alphamu + mu - 2 + 1/mu) / ((N + 2)**2 + alphacov * mu / 2)
# c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)
# print 'cmu =', c
return c
def conedf(df, mu, N):
"""used for computing separable learning rate"""
return 1. / (df + 2.*sqrt(df) + float(mu)/N)
def cmudf(df, mu, alphamu):
"""used for computing separable learning rate"""
return (alphamu + mu - 2. + 1./mu) / (df + 4.*sqrt(df) + mu/2.)
sp = self
N = sp.N
if popsize:
opts.evalall({'N':N, 'popsize':popsize})
else:
popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in Options()
sp.popsize = popsize
if opts['CMA_mirrors'] < 0.5:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
elif opts['CMA_mirrors'] > 1:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
else:
sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
# lam = arange(2,22)
# mirr = 0.16 + 0.29/lam
# print(lam); print([int(0.5 + l) for l in mirr*lam])
# [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
# [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
sp.mu_f = sp.popsize / 2.0 # float value of mu
if opts['CMA_mu'] is not None:
sp.mu_f = opts['CMA_mu']
sp.mu = int(sp.mu_f + 0.499999) # round down for x.5
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:
print("WARNING: pairwise selection is not implemented, therefore " +
" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
if sp.lam_mirr > sp.popsize // 2:
raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
"theoretically optimal is 0.159")
sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))
if 11 < 3: # equal recombination weights
sp.mu = sp.popsize // 4
sp.weights = np.ones(sp.mu)
print(sp.weights[:10])
sp.weights /= sum(sp.weights)
sp.mueff = 1 / sum(sp.weights**2)
sp.cs = (sp.mueff + 2) / (N + sp.mueff + 3)
# TODO: clean up (here the cumulation constant is shorter if sigma_vec is used)
sp.dampsvec = opts['CMA_dampsvec_fac'] * (N + 2) if opts['CMA_dampsvec_fac'] else np.Inf
sp.dampsvec_fading = opts['CMA_dampsvec_fade']
if np.isfinite(sp.dampsvec):
sp.cs = ((sp.mueff + 2) / (N + sp.mueff + 3))**0.5
# sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)
sp.cc = (4 + alpha_cc * sp.mueff / N) / (N + 4 + alpha_cc * 2 * sp.mueff / N)
sp.cc_sep = (1 + 1/N + alpha_cc * sp.mueff / N) / (N**0.5 + 1/N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc
sp.rankmualpha = opts['CMA_rankmualpha']
# sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)
sp.c1 = ccovfac * min(1, sp.popsize/6) * cone((N**2 + N) / 2, sp.mueff, N) # 2. / ((N+1.3)**2 + sp.mucov)
sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)
if 11 < 3:
sp.c1 = 0.
print('c1 is zero')
if opts['CMA_rankmu'] != 0: # also empty
sp.cmu = min(1 - sp.c1, ccovfac * cmu((N**2+N)/2, sp.mueff, sp.rankmualpha))
sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))
else:
sp.cmu = sp.cmu_sep = 0
sp.neg = BlancClass()
if opts['CMA_active']:
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
sp.neg.mu_f = popsize - (popsize + sp.lam_mirr) / 2 if popsize > 2 else 1
sp.neg.weights = log(sp.mu_f + 0.5) - log(1 + np.arange(sp.popsize - int(sp.neg.mu_f), sp.popsize))
sp.neg.mu = len(sp.neg.weights) # maybe never useful?
sp.neg.weights /= sum(sp.neg.weights)
sp.neg.mueff = 1 / sum(sp.neg.weights**2)
sp.neg.cmuexp = opts['CMA_activefac'] * 0.25 * sp.neg.mueff / ((N+2)**1.5 + 2 * sp.neg.mueff)
assert sp.neg.mu >= sp.lam_mirr # not really necessary
# sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical
else:
sp.neg.cmuexp = 0
sp.CMA_on = sp.c1 + sp.cmu > 0
# print(sp.c1_sep / sp.cc_sep)
if not opts['CMA_on'] and opts['CMA_on'] not in (None,[],(),''):
sp.CMA_on = False
# sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
sp.damps = opts['CMA_dampfac'] * (0.5 +
0.5 * min([1, (sp.lam_mirr/(0.159*sp.popsize) - 1)**2])**1 +
2 * max([0, ((sp.mueff-1) / (N+1))**0.5 - 1]) + sp.cs
)
if 11 < 3:
# this is worse than damps = 1 + sp.cs for the (1,10000)-ES on 40D parabolic ridge
sp.damps = 0.3 + 2 * max([sp.mueff/sp.popsize, ((sp.mueff-1)/(N+1))**0.5 - 1]) + sp.cs
if 11 < 3:
# this does not work for lambda = 4*N^2 on the parabolic ridge
sp.damps = opts['CMA_dampfac'] * (2 - 0*sp.lam_mirr/sp.popsize) * sp.mueff/sp.popsize + 0.3 + sp.cs # nicer future setting
print('damps =', sp.damps)
if 11 < 3:
sp.damps = 10 * sp.damps # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;
# sp.damps = 20 # 1. + 20 * sp.cs**-1 # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;
print('damps is %f' % (sp.damps))
sp.cmean = float(opts['CMA_cmean'])
# sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate
# in larger dim it does, 15-D with defaults, kappa=8 factor 2
if sp.cmean != 1:
print(' cmean = %f' % (sp.cmean))
if verbose:
if not sp.CMA_on:
print('covariance matrix adaptation turned off')
if opts['CMA_mu'] != None:
print('mu = %f' % (sp.mu_f))
# return self # the constructor returns itself
def disp(self):
pprint(self.__dict__)
#____________________________________________________________
#____________________________________________________________
class CMAStopDict(dict):
"""keep and update a termination condition dictionary, which is
"usually" empty and returned by `CMAEvolutionStrategy.stop()`.
Details
-------
This could be a nested class, but nested classes cannot be serialized.
:See: `OOOptimizer.stop()`, `CMAEvolutionStrategy.stop()`
"""
def __init__(self, d={}):
update = (type(d) == CMAEvolutionStrategy)
inherit = (type(d) == CMAStopDict)
super(CMAStopDict, self).__init__({} if update else d)
self._stoplist = d._stoplist if inherit else [] # multiple entries
self.lastiter = d.lastiter if inherit else 0 # probably not necessary
if update:
self._update(d)
def __call__(self, es):
"""update the dictionary"""
return self._update(es)
def _addstop(self, key, cond, val=None):
if cond:
self.stoplist.append(key) # can have the same key twice
if key in list(self.opts.keys()):
val = self.opts[key]
self[key] = val
def _update(self, es):
"""Test termination criteria and update dictionary.
"""
if es.countiter == self.lastiter:
if es.countiter == 0:
self.__init__()
return self
try:
if es == self.es:
return self
except: # self.es not yet assigned
pass
self.lastiter = es.countiter
self.es = es
self.stoplist = []
N = es.N
opts = es.opts
self.opts = opts # a hack to get _addstop going
# fitness: generic criterion, user defined w/o default
self._addstop('ftarget',
es.best.f < opts['ftarget'])
# maxiter, maxfevals: generic criteria
self._addstop('maxfevals',
es.countevals - 1 >= opts['maxfevals'])
self._addstop('maxiter',
es.countiter >= opts['maxiter'])
# tolx, tolfacupx: generic criteria
# tolfun, tolfunhist (CEC:tolfun includes hist)
self._addstop('tolx',
all([es.sigma*xi < opts['tolx'] for xi in es.pc]) and \
all([es.sigma*xi < opts['tolx'] for xi in sqrt(es.dC)]))
self._addstop('tolfacupx',
any([es.sigma * sig > es.sigma0 * opts['tolfacupx']
for sig in sqrt(es.dC)]))
self._addstop('tolfun',
es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and \
max(es.fit.hist) - min(es.fit.hist) < opts['tolfun'])
self._addstop('tolfunhist',
len(es.fit.hist) > 9 and \
max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist'])
# worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5
# but the median is not so much getting worse
# / 5 reflects the sparsity of histbest/median
# / 2 reflects the left and right part to be compared
l = int(max(opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10));
# TODO: why max(..., len(histbest)/10) ???
# TODO: the problem in the beginning is only with best ==> ???
if 11 < 3: #
print(es.countiter, (opts['tolstagnation'], es.countiter > N * (5 + 100 / es.popsize),
len(es.fit.histbest) > 100,
np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2*l]),
np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2*l])))
# equality should handle flat fitness
self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50?
1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and
len(es.fit.histbest) > 100 and 2*l < len(es.fit.histbest) and
np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2*l]) and
np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2*l]))
# iiinteger: stagnation termination can prevent to find the optimum
self._addstop('tolupsigma', opts['tolupsigma'] and
es.sigma / es.sigma0 / np.max(es.D) > opts['tolupsigma'])
if 11 < 3 and 2*l < len(es.fit.histbest): # TODO: this might go wrong, because the nb of written columns changes
tmp = np.array((-np.median(es.fit.histmedian[:l]) + np.median(es.fit.histmedian[l:2*l]),
-np.median(es.fit.histbest[:l]) + np.median(es.fit.histbest[l:2*l])))
es.more_to_write += [(10**t if t < 0 else t + 1) for t in tmp] # the latter to get monotonicy
if 1 < 3:
# non-user defined, method specific
# noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov
self._addstop('noeffectcoord',
any([es.mean[i] == es.mean[i] + 0.2*es.sigma*sqrt(es.dC[i])
for i in xrange(N)]))
if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']:
i = es.countiter % N
self._addstop('noeffectaxis',
sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N)
self._addstop('conditioncov',
es.D[-1] > 1e7 * es.D[0], 1e14) # TODO
self._addstop('callback', es.callbackstop) # termination_callback
if len(self):
self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate',
len(es.fit.hist) > 9 and \
max(es.fit.hist) == min(es.fit.hist))
if 11 < 3 and opts['vv'] == 321:
self._addstop('||xmean||^2<ftarget', sum(es.mean**2) <= opts['ftarget'])
return self
#_____________________________________________________________________
#_____________________________________________________________________
#
class BaseDataLogger2(DerivedDictBase):
""""abstract" base class for a data logger that can be used with an `OOOptimizer`"""
def add(self, optim=None, more_data=[]):
"""abstract method, add a "data point" from the state of `optim` into the
logger, the argument `optim` can be omitted if it was `register()`-ed before,
acts like an event handler"""
raise NotImplementedError()
def register(self, optim):
"""abstract method, register an optimizer `optim`, only needed if `add()` is
called without a value for the `optim` argument"""
self.optim = optim
def disp(self):
"""display some data trace (not implemented)"""
print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))
def plot(self):
"""plot data (not implemented)"""
print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))
def data(self):
"""return logged data in a dictionary (not implemented)"""
print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))
class BaseDataLogger(object):
""""abstract" base class for a data logger that can be used with an `OOOptimizer`"""
def add(self, optim=None, more_data=[]):
"""abstract method, add a "data point" from the state of `optim` into the
logger, the argument `optim` can be omitted if it was `register()`-ed before,
acts like an event handler"""
raise NotImplementedError()
def register(self, optim):
"""abstract method, register an optimizer `optim`, only needed if `add()` is
called without a value for the `optim` argument"""
self.optim = optim
def disp(self):
"""display some data trace (not implemented)"""
print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))
def plot(self):
"""plot data (not implemented)"""
print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))
def data(self):
"""return logged data in a dictionary (not implemented)"""
print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))
#_____________________________________________________________________
#_____________________________________________________________________
#
class CMADataLogger(BaseDataLogger): # might become a dict at some point
"""data logger for class `CMAEvolutionStrategy`. The logger is
identified by its name prefix and writes or reads according
data files.
Examples
========
::
import cma
es = cma.CMAEvolutionStrategy(...)
data = cma.CMADataLogger().register(es)
while not es.stop():
...
data.add() # add can also take an argument
data.plot() # or a short cut can be used:
cma.plot() # plot data from logger with default name
data2 = cma.CMADataLogger(another_filename_prefix).load()
data2.plot()
data2.disp()
::
import cma
from pylab import *
res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
dat = res[-1] # the CMADataLogger
dat.load() # by "default" data are on disk
semilogy(dat.f[:,0], dat.f[:,5]) # plot f versus iteration, see file header
show()
Details
=======
After loading data, the logger has the attributes `xmean`, `xrecent`, `std`, `f`, and `D`,
corresponding to xmean, xrecentbest, stddev, fit, and axlen filename trails.
:See: `disp()`, `plot()`
"""
default_prefix = 'outcmaes'
# names = ('axlen','fit','stddev','xmean','xrecentbest')
# key_names_with_annotation = ('std', 'xmean', 'xrecent')
def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
"""initialize logging of data from a `CMAEvolutionStrategy` instance,
default modulo expands to 1 == log with each call
"""
# super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[], 'sig':[], 'fit':[], 'xm':[]})
# class properties:
self.file_names = ('axlen','fit','stddev','xmean','xrecentbest') # used in load, however hard-coded in add
self.key_names = ('D', 'f', 'std', 'xmean', 'xrecent') # used in load, however hard-coded in plot
self.key_names_with_annotation = ('std', 'xmean', 'xrecent') # used in load
self.modulo = modulo # allows calling with None
self.append = append
self.counter = 0 # number of calls of add, should initial value depend on `append`?
self.name_prefix = name_prefix if name_prefix else CMADataLogger.default_prefix
if type(self.name_prefix) == CMAEvolutionStrategy:
self.name_prefix = self.name_prefix.opts.eval('verb_filenameprefix')
self.registered = False
def register(self, es, append=None, modulo=None):
"""register a `CMAEvolutionStrategy` instance for logging,
``append=True`` appends to previous data logged under the same name,
by default previous data are overwritten.
"""
if type(es) != CMAEvolutionStrategy:
raise TypeError("only class CMAEvolutionStrategy can be registered for logging")
self.es = es
if append is not None:
self.append = append
if modulo is not None:
self.modulo = modulo
if not self.append and self.modulo != 0:
self.initialize() # write file headers
self.registered = True
return self
def initialize(self, modulo=None):
"""reset logger, overwrite original files, `modulo`: log only every modulo call"""
if modulo is not None:
self.modulo = modulo
try:
es = self.es # must have been registered
except AttributeError:
pass # TODO: revise usage of es... that this can pass
raise _Error('call register() before initialize()')
self.counter = 0 # number of calls of add
# write headers for output
fn = self.name_prefix + 'fit.dat'
strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime())
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
'bestever, best, median, worst objective function value, ' +
'further objective values of best", ' +
strseedtime +
# strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'axlen.dat'
try:
f = open(fn, 'w')
f.write('% columns="iteration, evaluation, sigma, max axis length, ' +
' min axis length, all principle axes lengths ' +
' (sorted square roots of eigenvalues of C)", ' +
strseedtime +
'\n')
f.close()
except (IOError, OSError):
print('could not open file ' + fn)
finally:
f.close()
fn = self.name_prefix + 'stddev.dat'
try:
f = open(fn, 'w')
f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
' stds==sigma*sqrt(diag(C))", ' +
strseedtime +
'\n')
f.close()
except (IOError, OSError):
print('could not open file ' + fn)
finally:
f.close()
fn = self.name_prefix + 'xmean.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
strseedtime)
f.write(' # scaling_of_variables: ')
if np.size(es.gp.scales) > 1:
f.write(' '.join(map(str, es.gp.scales)))
else:
f.write(str(es.gp.scales))
f.write(', typical_x: ')
if np.size(es.gp.typical_x) > 1:
f.write(' '.join(map(str, es.gp.typical_x)))
else:
f.write(str(es.gp.typical_x))
f.write('\n')
f.close()
except (IOError, OSError):
print('could not open/write file ' + fn)
fn = self.name_prefix + 'xrecentbest.dat'
try:
with open(fn, 'w') as f:
f.write('% # iter+eval+sigma+0+fitness+xbest, ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
return self
# end def __init__
def load(self, filenameprefix=None):
"""loads data from files written and return a data dictionary, *not*
a prerequisite for using `plot()` or `disp()`.
Argument `filenameprefix` is the filename prefix of data to be loaded (five files),
by default ``'outcmaes'``.
Return data dictionary with keys `xrecent`, `xmean`, `f`, `D`, `std`
"""
if not filenameprefix:
filenameprefix = self.name_prefix
for i in xrange(len(self.file_names)):
fn = filenameprefix + self.file_names[i] + '.dat'
try:
self.__dict__[self.key_names[i]] = _fileToMatrix(fn)
except:
print('WARNING: reading from file "' + fn + '" failed')
if self.key_names[i] in self.key_names_with_annotation:
self.__dict__[self.key_names[i]].append(self.__dict__[self.key_names[i]][-1]) # copy last row to later fill in annotation position for display
self.__dict__[self.key_names[i]] = array(self.__dict__[self.key_names[i]], copy=False)
return self
def add(self, es=None, more_data=[], modulo=None): # TODO: find a different way to communicate current x and f
"""append some logging data from `CMAEvolutionStrategy` class instance `es`,
if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
The sequence ``more_data`` must always have the same length.
When used for a different optimizer class, this function can be
(easily?) adapted by changing the assignments under INTERFACE
in the implemention.
"""
self.counter += 1
mod = modulo if modulo is not None else self.modulo
if mod == 0 or (self.counter > 3 and self.counter % mod):
return
if es is None:
try:
es = self.es # must have been registered
except AttributeError :
raise _Error('call `add` with argument `es` or ``register(es)`` before ``add()``')
elif not self.registered:
self.register(es) # calls initialize
# --- INTERFACE, can be changed if necessary ---
if type(es) is not CMAEvolutionStrategy: # not necessary
print('WARNING: <type \'CMAEvolutionStrategy\'> expected, found '
+ str(type(es)) + ' in method CMADataLogger.add')
evals = es.countevals
iteration = es.countiter
sigma = es.sigma
axratio = es.D.max()/es.D.min()
xmean = es.mean # TODO: should be optionally phenotype?
fmean_noise_free = es.fmean_noise_free
fmean = es.fmean
try:
besteverf = es.best.f
bestf = es.fit.fit[0]
medianf = es.fit.fit[es.sp.popsize//2]
worstf = es.fit.fit[-1]
except:
if self.counter > 1: # first call without f-values is OK
raise
try:
xrecent = es.best.last.x
except:
xrecent = None
maxD = es.D.max()
minD = es.D.min()
diagD = es.D
diagC = es.sigma*es.sigma_vec*sqrt(es.dC)
more_to_write = es.more_to_write
es.more_to_write = []
# --- end interface ---
try:
# fit
if self.counter > 1:
fn = self.name_prefix + 'fit.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ str(axratio) + ' '
+ str(besteverf) + ' '
+ '%.16e' % bestf + ' '
+ str(medianf) + ' '
+ str(worstf) + ' '
# + str(es.sp.popsize) + ' '
# + str(10**es.noiseS) + ' '
# + str(es.sp.cmean) + ' '
+ ' '.join(str(i) for i in more_to_write)
+ ' '.join(str(i) for i in more_data)
+ '\n')
# axlen
fn = self.name_prefix + 'axlen.dat'
with open(fn, 'a') as f: # does not rely on reference counting
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ str(maxD) + ' '
+ str(minD) + ' '
+ ' '.join(map(str, diagD))
+ '\n')
# stddev
fn = self.name_prefix + 'stddev.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ '0 0 '
+ ' '.join(map(str, diagC))
+ '\n')
# xmean
fn = self.name_prefix + 'xmean.dat'
with open(fn, 'a') as f:
if iteration < 1: # before first iteration
f.write('0 0 0 0 0 '
+ ' '.join(map(str, xmean))
+ '\n')
else:
f.write(str(iteration) + ' '
+ str(evals) + ' '
# + str(sigma) + ' '
+ '0 '
+ str(fmean_noise_free) + ' '
+ str(fmean) + ' ' # TODO: this does not make sense
# TODO should be optional the phenotyp?
+ ' '.join(map(str, xmean))
+ '\n')
# xrecent
fn = self.name_prefix + 'xrecentbest.dat'
if iteration > 0 and xrecent is not None:
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ '0 '
+ str(bestf) + ' '
+ ' '.join(map(str, xrecent))
+ '\n')
except (IOError, OSError):
if iteration <= 1:
print('could not open/write file')
def closefig(self):
pylab.close(self.fighandle)
def save(self, nameprefix, switch=False):
"""saves logger data to a different set of files, for
``switch=True`` also the loggers name prefix is switched to
the new value
"""
if not nameprefix or type(nameprefix) is not str:
_Error('filename prefix must be a nonempty string')
if nameprefix == self.default_prefix:
_Error('cannot save to default name "' + nameprefix + '...", chose another name')
if nameprefix == self.name_prefix:
return
for name in CMADataLogger.names:
open(nameprefix+name+'.dat', 'w').write(open(self.name_prefix+name+'.dat').read())
if switch:
self.name_prefix = nameprefix
def plot(self, fig=None, iabscissa=1, iteridx=None, plot_mean=True, # TODO: plot_mean default should be False
foffset=1e-19, x_opt = None, fontsize=10):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 325
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g. from previous fmin calls)
logger.plot() # to continue you might need to close the pop-up window
# once and call plot() again.
# This behavior seems to disappear in subsequent
# calls of plot(). Also using ipython with -pylab
# option might help.
cma.savefig('fig325.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pylab.
"""
dat = self.load(self.name_prefix)
try:
# pylab: prodedural interface for matplotlib
from matplotlib.pylab import figure, ioff, ion, subplot, semilogy, hold, plot, grid, \
axis, title, text, xlabel, isinteractive, draw, gcf
except ImportError:
ImportError('could not find matplotlib.pylab module, function plot() is not available')
return
if fontsize and pylab.rcParams['font.size'] != fontsize:
print('global variable pylab.rcParams[\'font.size\'] set (from ' +
str(pylab.rcParams['font.size']) + ') to ' + str(fontsize))
pylab.rcParams['font.size'] = fontsize # subtracted in the end, but return can happen inbetween
if fig:
figure(fig)
else:
figure(325)
# show() # should not be necessary
self.fighandle = gcf() # fighandle.number
if iabscissa not in (0,1):
iabscissa = 1
interactive_status = isinteractive()
ioff() # prevents immediate drawing
dat.x = dat.xmean # this is the genotyp
if not plot_mean:
try:
dat.x = dat.xrecent
except:
pass
if len(dat.x) < 2:
print('not enough data to plot')
return {}
if iteridx is not None:
dat.f = dat.f[np.where([x in iteridx for x in dat.f[:,0]])[0],:]
dat.D = dat.D[np.where([x in iteridx for x in dat.D[:,0]])[0],:]
iteridx.append(dat.x[-1,1]) # last entry is artificial
dat.x = dat.x[np.where([x in iteridx for x in dat.x[:,0]])[0],:]
dat.std = dat.std[np.where([x in iteridx for x in dat.std[:,0]])[0],:]
if iabscissa == 0:
xlab = 'iterations'
elif iabscissa == 1:
xlab = 'function evaluations'
# use fake last entry in x and std for line extension-annotation
if dat.x.shape[1] < 100:
minxend = int(1.06*dat.x[-2, iabscissa])
# write y-values for individual annotation into dat.x
dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.x[-2,5:])
idx2 = np.argsort(idx)
if x_opt is None:
dat.x[-1,5+idx] = np.linspace(np.min(dat.x[:,5:]),
np.max(dat.x[:,5:]), dat.x.shape[1]-5)
else:
dat.x[-1,5+idx] = np.logspace(np.log10(np.min(abs(dat.x[:,5:]))),
np.log10(np.max(abs(dat.x[:,5:]))), dat.x.shape[1]-5)
else:
minxend = 0
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
ioff() # turns update off
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
subplot(2,2,1)
self.plotdivers(dat, iabscissa, foffset)
# TODO: modularize also the remaining subplots
subplot(2,2,2)
hold(False)
if x_opt is not None: # TODO: differentate neg and pos?
semilogy(dat.x[:, iabscissa], abs(dat.x[:,5:]) - x_opt, '-')
else:
plot(dat.x[:, iabscissa], dat.x[:,5:],'-')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
ax[1] -= 1e-6
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2]+1e-6, ax[3]-1e-6, dat.x.shape[1]-5)
#yyl = np.sort(dat.x[-1,5:])
idx = np.argsort(dat.x[-1,5:])
idx2 = np.argsort(idx)
if x_opt is not None:
semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1,5:]), yy[idx2]], 'k-') # line from last data point
semilogy(np.dot(dat.x[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-')
else:
# plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
plot(np.dot(dat.x[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-')
# plot(array([dat.x[-1, iabscissa], ax[1]]),
# reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
for i in range(len(idx)):
# TODOqqq: annotate phenotypic value!?
# text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
text(dat.x[-1,iabscissa], dat.x[-1,5+i], 'x(' + str(i) + ')=' + str(dat.x[-2,5+i]))
i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title('Object Variables (' + ('mean' if plot_mean else 'curr best') +
', ' + str(dat.x.shape[1]-5) + '-D, popsize~' +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
+ ')')
# pylab.xticks(xticklocs)
# Scaling
subplot(2,2,3)
hold(False)
semilogy(dat.D[:, iabscissa], dat.D[:,5:], '-b')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
title('Scaling (All Main Axes)')
# pylab.xticks(xticklocs)
xlabel(xlab)
# standard deviations
subplot(2,2,4)
hold(False)
# remove sigma from stds (graphs become much better readible)
dat.std[:,5:] = np.transpose(dat.std[:,5:].T / dat.std[:,2].T)
# ax = array(axis())
# ax[1] = max(minxend, ax[1])
# axis(ax)
if 1 < 2 and dat.std.shape[1] < 100:
# use fake last entry in x and std for line extension-annotation
minxend = int(1.06*dat.x[-2, iabscissa])
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.std[-2,5:])
idx2 = np.argsort(idx)
dat.std[-1,5+idx] = np.logspace(np.log10(np.min(dat.std[:,5:])),
np.log10(np.max(dat.std[:,5:])), dat.std.shape[1]-5)
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1]-5)
#yyl = np.sort(dat.std[-1,5:])
idx = np.argsort(dat.std[-1,5:])
idx2 = np.argsort(idx)
# plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
# vertical separator
plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([np.min(dat.std[-2,5:]), np.max(dat.std[-2,5:])]), 'k-')
hold(True)
# plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
for i in xrange(len(idx)):
# text(ax[1], yy[i], ' '+str(idx[i]))
text(dat.std[-1, iabscissa], dat.std[-1, 5+i], ' '+str(i))
semilogy(dat.std[:, iabscissa], dat.std[:,5:], '-')
grid(True)
title('Standard Deviations in All Coordinates')
# pylab.xticks(xticklocs)
xlabel(xlab)
draw() # does not suffice
if interactive_status:
ion() # turns interactive mode on (again)
draw()
show()
return self
#____________________________________________________________
#____________________________________________________________
#
@staticmethod
def plotdivers(dat, iabscissa, foffset):
"""helper function for `plot()` that plots all what is
in the upper left subplot like fitness, sigma, etc.
Arguments
---------
`iabscissa` in ``(0,1)``
0==versus fevals, 1==versus iteration
`foffset`
offset to fitness for log-plot
:See: `plot()`
"""
from matplotlib.pylab import semilogy, hold, grid, \
axis, title, text
fontsize = pylab.rcParams['font.size']
hold(False)
dfit = dat.f[:,5]-min(dat.f[:,5])
dfit[dfit<1e-98] = np.NaN
if dat.f.shape[1] > 7:
# semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7]])+foffset,'-k')
hold(True)
# (larger indices): additional fitness data, for example constraints values
if dat.f.shape[1] > 8:
# dd = abs(dat.f[:,7:]) + 10*foffset
# dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
semilogy(dat.f[:, iabscissa], np.abs(dat.f[:,8:]) + 10*foffset, 'm')
hold(True)
idx = np.where(dat.f[:,5]>1e-98)[0] # positive values
semilogy(dat.f[idx, iabscissa], dat.f[idx,5]+foffset, '.b')
hold(True)
grid(True)
idx = np.where(dat.f[:,5] < -1e-98) # negative values
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx,5])+foffset,'.r')
semilogy(dat.f[:, iabscissa],abs(dat.f[:,5])+foffset,'-b')
semilogy(dat.f[:, iabscissa], dfit, '-c')
if 11 < 3: # delta-fitness as points
dfit = dat.f[1:, 5] - dat.f[:-1,5] # should be negative usually
semilogy(dat.f[1:,iabscissa], # abs(fit(g) - fit(g-1))
np.abs(dfit)+foffset, '.c')
i = dfit > 0
# print(np.sum(i) / float(len(dat.f[1:,iabscissa])))
semilogy(dat.f[1:,iabscissa][i], # abs(fit(g) - fit(g-1))
np.abs(dfit[i])+foffset, '.r')
# overall minimum
i = np.argmin(dat.f[:,5])
semilogy(dat.f[i, iabscissa]*np.ones(2), dat.f[i,5]*np.ones(2), 'rd')
# semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
# AR and sigma
semilogy(dat.f[:, iabscissa], dat.f[:,3], '-r') # AR
semilogy(dat.f[:, iabscissa], dat.f[:,2],'-g') # sigma
semilogy(dat.std[:-1, iabscissa], np.vstack([list(map(max, dat.std[:-1,5:])), list(map(min, dat.std[:-1,5:]))]).T,
'-m', linewidth=2)
text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize)
text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
text(ax[0]+0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
'.f_recent=' + repr(dat.f[-1,5]) )
# title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
title('blue:abs(f), cyan:f-min(f), green:sigma, red:axis ratio', fontsize=fontsize-1)
# pylab.xticks(xticklocs)
def downsampling(self, factor=10, first=3, switch=True):
"""
rude downsampling of a `CMADataLogger` data file by `factor`, keeping
also the first `first` entries. This function is a stump and subject
to future changes.
Arguments
---------
- `factor` -- downsampling factor
- `first` -- keep first `first` entries
- `switch` -- switch the new logger name to oldname+'down'
Details
-------
``self.name_prefix+'down'`` files are written
Example
-------
::
import cma
cma.downsampling() # takes outcmaes* files
cma.plot('outcmaesdown')
"""
newprefix = self.name_prefix + 'down'
for name in CMADataLogger.names:
f = open(newprefix+name+'.dat','w')
iline = 0
cwritten = 0
for line in open(self.name_prefix+name+'.dat'):
if iline < first or iline % factor == 0:
f.write(line)
cwritten += 1
iline += 1
f.close()
print('%d' % (cwritten) + ' lines written in ' + newprefix+name+'.dat')
if switch:
self.name_prefix += 'down'
return self
#____________________________________________________________
#____________________________________________________________
#
def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
"""displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, verb_disp=1e9) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()`
"""
filenameprefix=self.name_prefix
def printdatarow(dat, iteration):
"""print data of iteration i"""
i = np.where(dat.f[:, 0] == iteration)[0][0]
j = np.where(dat.std[:, 0] == iteration)[0][0]
print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +
' %5.1e' % (dat.f[i,3]) +
' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))
dat = CMADataLogger(filenameprefix).load()
ndata = dat.f.shape[0]
# map index to iteration number, is difficult if not all iteration numbers exist
# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
# otherwise:
if idx is None:
idx = 100
if np.isscalar(idx):
# idx = np.arange(0, ndata, idx)
if idx:
idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]
else:
idx = np.r_[0, 1, -3:0]
idx = array(idx)
idx = idx[idx<ndata]
idx = idx[-idx<=ndata]
iters = dat.f[idx, 0]
idxbest = np.argmin(dat.f[:,5])
iterbest = dat.f[idxbest, 0]
if len(iters) == 1:
printdatarow(dat, iters[0])
else:
self.disp_header()
for i in iters:
printdatarow(dat, i)
self.disp_header()
printdatarow(dat, iterbest)
sys.stdout.flush()
def disp_header(self):
heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
print(heading)
# end class CMADataLogger
#____________________________________________________________
#____________________________________________________________
#
#_____________________________________________________________________
#_____________________________________________________________________
#
class DEAPCMADataLogger(BaseDataLogger): # might become a dict at some point
"""data logger for class `Strategy`. The logger is
identified by its name prefix and writes or reads according
data files.
Examples
========
::
import cma_logger
es = deap.cma.Strategy(...)
data = cma_logger.DEAPCMADataLogger().register(es)
while not es.stop():
...
data.add(fitness_values) # add can also take `es` as additional argument
data.plot() # or a short cut can be used:
cma.plot() # plot data from logger with default name
data2 = cma_logger.DEAPCMADataLogger(another_filename_prefix).load()
data2.plot()
data2.disp()
::
import cma
from pylab import *
res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
dat = res[-1] # the CMADataLogger
dat.load() # by "default" data are on disk
semilogy(dat.f[:,0], dat.f[:,5]) # plot f versus iteration, see file header
show()
Details
=======
After loading data, the logger has the attributes `xmean`, `xrecent`, `std`, `f`, and `D`,
corresponding to xmean, xrecentbest, stddev, fit, and axlen filename trails.
:See: `disp()`, `plot()`
"""
default_prefix = 'outcmaes'
names = ('axlen','fit','stddev','xmean') # ,'xrecentbest')
key_names_with_annotation = ('std', 'xmean')
def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
"""initialize logging of data from a `CMAEvolutionStrategy` instance,
default modulo expands to 1 == log with each call
"""
# super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[], 'sig':[], 'fit':[], 'xm':[]})
# class properties:
self.counter = 0 # number of calls of add
self.best_fitness = np.inf
self.modulo = modulo # allows calling with None
self.append = append
self.name_prefix = name_prefix if name_prefix else CMADataLogger.default_prefix
if type(self.name_prefix) == CMAEvolutionStrategy:
self.name_prefix = self.name_prefix.opts.eval('verb_filenameprefix')
self.registered = False
def register(self, es, append=None, modulo=None):
"""register a `CMAEvolutionStrategy` instance for logging,
``append=True`` appends to previous data logged under the same name,
by default previous data are overwritten.
"""
self.es = es
if append is not None:
self.append = append
if modulo is not None:
self.modulo = modulo
if not self.append and self.modulo != 0:
self.initialize() # write file headers
self.registered = True
return self
def initialize(self, modulo=None):
"""reset logger, overwrite original files, `modulo`: log only every modulo call"""
if modulo is not None:
self.modulo = modulo
try:
es = self.es # must have been registered
except AttributeError:
pass # TODO: revise usage of es... that this can pass
raise _Error('call register() before initialize()')
# write headers for output
fn = self.name_prefix + 'fit.dat'
if 11 < 3:
strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime())
else:
strseedtime = 'seed=unkown, %s' % (time.asctime())
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
'bestever, best, median, worst objective function value, ' +
'further objective values of best", ' +
strseedtime +
# strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'axlen.dat'
try:
f = open(fn, 'w')
f.write('% columns="iteration, evaluation, sigma, max axis length, ' +
' min axis length, all principle axes lengths ' +
' (sorted square roots of eigenvalues of C)", ' +
strseedtime +
'\n')
f.close()
except (IOError, OSError):
print('could not open file ' + fn)
finally:
f.close()
fn = self.name_prefix + 'stddev.dat'
try:
f = open(fn, 'w')
f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
' stds==sigma*sqrt(diag(C))", ' +
strseedtime +
'\n')
f.close()
except (IOError, OSError):
print('could not open file ' + fn)
finally:
f.close()
fn = self.name_prefix + 'xmean.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
strseedtime)
if 11 < 3:
f.write(' # scaling_of_variables: ')
if np.size(es.gp.scales) > 1:
f.write(' '.join(map(str, es.gp.scales)))
else:
f.write(str(es.gp.scales))
f.write(', typical_x: ')
if np.size(es.gp.typical_x) > 1:
f.write(' '.join(map(str, es.gp.typical_x)))
else:
f.write(str(es.gp.typical_x))
f.write('\n')
f.close()
except (IOError, OSError):
print('could not open/write file ' + fn)
if 11 < 3:
fn = self.name_prefix + 'xrecentbest.dat'
try:
with open(fn, 'w') as f:
f.write('% # iter+eval+sigma+0+fitness+xbest, ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
return self
# end def __init__
def load(self, filenameprefix=None):
"""loads data from files written and return a data dictionary, *not*
a prerequisite for using `plot()` or `disp()`.
Argument `filenameprefix` is the filename prefix of data to be loaded (five files),
by default ``'outcmaes'``.
Return data dictionary with keys `xrecent`, `xmean`, `f`, `D`, `std`
"""
if not filenameprefix:
filenameprefix = self.name_prefix
dat = self # historical
# dat.xrecent = _fileToMatrix(filenameprefix + 'xrecentbest.dat')
dat.xmean = _fileToMatrix(filenameprefix + 'xmean.dat')
dat.std = _fileToMatrix(filenameprefix + 'stddev' + '.dat')
# a hack to later write something into the last entry
for key in ['xmean', 'std']: # 'xrecent',
dat.__dict__[key].append(dat.__dict__[key][-1]) # copy last row to later fill in annotation position for display
dat.__dict__[key] = array(dat.__dict__[key], copy=False)
dat.f = array(_fileToMatrix(filenameprefix + 'fit.dat'))
dat.D = array(_fileToMatrix(filenameprefix + 'axlen' + '.dat'))
return dat
def add(self, fitness_values, es=None, more_data=[], modulo=None): # TODO: find a different way to communicate current x and f
"""append some logging data from `CMAEvolutionStrategy` class instance `es`,
if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
The sequence ``more_data`` must always have the same length.
"""
self.counter += 1
fitness_values = np.sort(fitness_values)
if fitness_values[0] < self.best_fitness:
self.best_fitness = fitness_values[0]
mod = modulo if modulo is not None else self.modulo
if mod == 0 or (self.counter > 3 and self.counter % mod):
return
if es is None:
try:
es = self.es # must have been registered
except AttributeError :
raise _Error('call register() before add() or add(es)')
elif not self.registered:
self.register(es)
if 11 < 3:
try: # TODO: find a more decent interface to store and pass recent_x
xrecent = es.best.last.x
except:
if self.counter == 2: # by now a recent_x should be available
print('WARNING: es.out[\'recent_x\'] not found in CMADataLogger.add, count='
+ str(self.counter))
try:
# fit
if es.update_count > 0:
# fit = es.fit.fit[0] # TODO: where do we get the fitness from?
fn = self.name_prefix + 'fit.dat'
with open(fn, 'a') as f:
f.write(str(es.update_count) + ' '
+ str(es.update_count * es.lambda_) + ' '
+ str(es.sigma) + ' '
+ str(es.diagD[-1]/es.diagD[0]) + ' '
+ str(self.best_fitness) + ' '
+ '%.16e' % fitness_values[0] + ' '
+ str(fitness_values[es.lambda_//2]) + ' '
+ str(fitness_values[-1]) + ' '
# + str(es.sp.popsize) + ' '
# + str(10**es.noiseS) + ' '
# + str(es.sp.cmean) + ' '
# + ' '.join(str(i) for i in es.more_to_write)
+ ' '.join(str(i) for i in more_data)
+ '\n')
# es.more_to_write = []
# axlen
fn = self.name_prefix + 'axlen.dat'
with open(fn, 'a') as f: # does not rely on reference counting
f.write(str(es.update_count) + ' '
+ str(es.update_count * es.lambda_) + ' '
+ str(es.sigma) + ' '
+ str(es.diagD[-1]) + ' '
+ str(es.diagD[0]) + ' '
+ ' '.join(map(str, es.diagD))
+ '\n')
# stddev
fn = self.name_prefix + 'stddev.dat'
with open(fn, 'a') as f:
f.write(str(es.update_count) + ' '
+ str(es.update_count * es.lambda_) + ' '
+ str(es.sigma) + ' '
+ '0 0 '
+ ' '.join(map(str, es.sigma*np.sqrt([es.C[i][i] for i in xrange(es.dim)])))
+ '\n')
# xmean
fn = self.name_prefix + 'xmean.dat'
with open(fn, 'a') as f:
if es.update_count < 1:
f.write('0 0 0 0 0 '
+ ' '.join(map(str,
# TODO should be optional the phenotyp?
# es.gp.geno(es.x0)
es.mean))
+ '\n')
else:
f.write(str(es.update_count) + ' '
+ str(es.update_count * es.lambda_) + ' '
# + str(es.sigma) + ' '
+ '0 0 0 '
# + str(es.fmean_noise_free) + ' '
# + str(es.fmean) + ' ' # TODO: this does not make sense
# TODO should be optional the phenotyp?
+ ' '.join(map(str, es.centroid))
+ '\n')
# xrecent
if 11 < 3:
fn = self.name_prefix + 'xrecentbest.dat'
if es.countiter > 0 and xrecent is not None:
with open(fn, 'a') as f:
f.write(str(es.countiter) + ' '
+ str(es.countevals) + ' '
+ str(es.sigma) + ' '
+ '0 '
+ str(es.fit.fit[0]) + ' '
+ ' '.join(map(str, xrecent))
+ '\n')
except (IOError, OSError):
if es.countiter == 1:
print('could not open/write file')
def closefig(self):
pylab.close(self.fighandle)
def save(self, nameprefix, switch=False):
"""saves logger data to a different set of files, for
``switch=True`` also the loggers name prefix is switched to
the new value
"""
if not nameprefix or type(nameprefix) is not str:
_Error('filename prefix must be a nonempty string')
if nameprefix == self.default_prefix:
_Error('cannot save to default name "' + nameprefix + '...", chose another name')
if nameprefix == self.name_prefix:
return
for name in CMADataLogger.names:
open(nameprefix+name+'.dat', 'w').write(open(self.name_prefix+name+'.dat').read())
if switch:
self.name_prefix = nameprefix
def plot(self, fig=None, iabscissa=1, iteridx=None, plot_mean=True, # TODO: plot_mean default should be False
foffset=1e-19, x_opt = None, fontsize=10):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 325
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g. from previous fmin calls)
logger.plot() # to continue you might need to close the pop-up window
# once and call plot() again.
# This behavior seems to disappear in subsequent
# calls of plot(). Also using ipython with -pylab
# option might help.
cma.savefig('fig325.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pylab.
"""
dat = self.load(self.name_prefix)
try:
# pylab: prodedural interface for matplotlib
from matplotlib.pylab import figure, ioff, ion, subplot, semilogy, hold, plot, grid, \
axis, title, text, xlabel, isinteractive, draw, gcf
except ImportError:
ImportError('could not find matplotlib.pylab module, function plot() is not available')
return
if fontsize and pylab.rcParams['font.size'] != fontsize:
print('global variable pylab.rcParams[\'font.size\'] set (from ' +
str(pylab.rcParams['font.size']) + ') to ' + str(fontsize))
pylab.rcParams['font.size'] = fontsize # subtracted in the end, but return can happen inbetween
if fig:
figure(fig)
else:
figure(325)
# show() # should not be necessary
self.fighandle = gcf() # fighandle.number
if iabscissa not in (0,1):
iabscissa = 1
interactive_status = isinteractive()
ioff() # prevents immediate drawing
if 11 < 3:
dat.x = dat.xrecent
if len(dat.x) < 2:
print('not enough data to plot')
return {}
# if plot_mean:
dat.x = dat.xmean # this is the genotyp
if iteridx is not None:
dat.f = dat.f[np.where([x in iteridx for x in dat.f[:,0]])[0],:]
dat.D = dat.D[np.where([x in iteridx for x in dat.D[:,0]])[0],:]
iteridx.append(dat.x[-1,1]) # last entry is artificial
dat.x = dat.x[np.where([x in iteridx for x in dat.x[:,0]])[0],:]
dat.std = dat.std[np.where([x in iteridx for x in dat.std[:,0]])[0],:]
if iabscissa == 0:
xlab = 'iterations'
elif iabscissa == 1:
xlab = 'function evaluations'
# use fake last entry in x and std for line extension-annotation
if dat.x.shape[1] < 100:
minxend = int(1.06*dat.x[-2, iabscissa])
# write y-values for individual annotation into dat.x
dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.x[-2,5:])
idx2 = np.argsort(idx)
if x_opt is None:
dat.x[-1,5+idx] = np.linspace(np.min(dat.x[:,5:]),
np.max(dat.x[:,5:]), dat.x.shape[1]-5)
else:
dat.x[-1,5+idx] = np.logspace(np.log10(np.min(abs(dat.x[:,5:]))),
np.log10(np.max(abs(dat.x[:,5:]))), dat.x.shape[1]-5)
else:
minxend = 0
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
ioff() # turns update off
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
subplot(2,2,1)
self.plotdivers(dat, iabscissa, foffset)
# TODO: modularize also the remaining subplots
subplot(2,2,2)
hold(False)
if x_opt is not None: # TODO: differentate neg and pos?
semilogy(dat.x[:, iabscissa], abs(dat.x[:,5:]) - x_opt, '-')
else:
plot(dat.x[:, iabscissa], dat.x[:,5:],'-')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
ax[1] -= 1e-6
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2]+1e-6, ax[3]-1e-6, dat.x.shape[1]-5)
#yyl = np.sort(dat.x[-1,5:])
idx = np.argsort(dat.x[-1,5:])
idx2 = np.argsort(idx)
if x_opt is not None:
semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1,5:]), yy[idx2]], 'k-') # line from last data point
semilogy(np.dot(dat.x[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-')
else:
# plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
plot(np.dot(dat.x[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-')
# plot(array([dat.x[-1, iabscissa], ax[1]]),
# reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
for i in range(len(idx)):
# TODOqqq: annotate phenotypic value!?
# text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
text(dat.x[-1,iabscissa], dat.x[-1,5+i], 'x(' + str(i) + ')=' + str(dat.x[-2,5+i]))
i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title('Object Variables (' + ('mean' if plot_mean else 'curr best') +
', ' + str(dat.x.shape[1]-5) + '-D, popsize~' +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
+ ')')
# pylab.xticks(xticklocs)
# Scaling
subplot(2,2,3)
hold(False)
semilogy(dat.D[:, iabscissa], dat.D[:,5:], '-b')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
title('Scaling (All Main Axes)')
# pylab.xticks(xticklocs)
xlabel(xlab)
# standard deviations
subplot(2,2,4)
hold(False)
# remove sigma from stds (graphs become much better readible)
dat.std[:,5:] = np.transpose(dat.std[:,5:].T / dat.std[:,2].T)
# ax = array(axis())
# ax[1] = max(minxend, ax[1])
# axis(ax)
if 1 < 2 and dat.std.shape[1] < 100:
# use fake last entry in x and std for line extension-annotation
minxend = int(1.06*dat.x[-2, iabscissa])
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.std[-2,5:])
idx2 = np.argsort(idx)
dat.std[-1,5+idx] = np.logspace(np.log10(np.min(dat.std[:,5:])),
np.log10(np.max(dat.std[:,5:])), dat.std.shape[1]-5)
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1]-5)
#yyl = np.sort(dat.std[-1,5:])
idx = np.argsort(dat.std[-1,5:])
idx2 = np.argsort(idx)
# plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
# vertical separator
plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([np.min(dat.std[-2,5:]), np.max(dat.std[-2,5:])]), 'k-')
hold(True)
# plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
for i in xrange(len(idx)):
# text(ax[1], yy[i], ' '+str(idx[i]))
text(dat.std[-1, iabscissa], dat.std[-1, 5+i], ' '+str(i))
semilogy(dat.std[:, iabscissa], dat.std[:,5:], '-')
grid(True)
title('Standard Deviations in All Coordinates')
# pylab.xticks(xticklocs)
xlabel(xlab)
draw() # does not suffice
if interactive_status:
ion() # turns interactive mode on (again)
draw()
show()
return self
#____________________________________________________________
#____________________________________________________________
#
@staticmethod
def plotdivers(dat, iabscissa, foffset):
"""helper function for `plot()` that plots all what is
in the upper left subplot like fitness, sigma, etc.
Arguments
---------
`iabscissa` in ``(0,1)``
0==versus fevals, 1==versus iteration
`foffset`
offset to fitness for log-plot
:See: `plot()`
"""
from matplotlib.pylab import semilogy, hold, grid, \
axis, title, text
fontsize = pylab.rcParams['font.size']
hold(False)
dfit = dat.f[:,5]-min(dat.f[:,5])
dfit[dfit<1e-98] = np.NaN
if dat.f.shape[1] > 7:
# semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7]])+foffset,'-k')
hold(True)
# (larger indices): additional fitness data, for example constraints values
if dat.f.shape[1] > 8:
# dd = abs(dat.f[:,7:]) + 10*foffset
# dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
semilogy(dat.f[:, iabscissa], np.abs(dat.f[:,8:]) + 10*foffset, 'm')
hold(True)
idx = np.where(dat.f[:,5]>1e-98)[0] # positive values
semilogy(dat.f[idx, iabscissa], dat.f[idx,5]+foffset, '.b')
hold(True)
grid(True)
idx = np.where(dat.f[:,5] < -1e-98) # negative values
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx,5])+foffset,'.r')
semilogy(dat.f[:, iabscissa],abs(dat.f[:,5])+foffset,'-b')
semilogy(dat.f[:, iabscissa], dfit, '-c')
if 11 < 3: # delta-fitness as points
dfit = dat.f[1:, 5] - dat.f[:-1,5] # should be negative usually
semilogy(dat.f[1:,iabscissa], # abs(fit(g) - fit(g-1))
np.abs(dfit)+foffset, '.c')
i = dfit > 0
# print(np.sum(i) / float(len(dat.f[1:,iabscissa])))
semilogy(dat.f[1:,iabscissa][i], # abs(fit(g) - fit(g-1))
np.abs(dfit[i])+foffset, '.r')
# overall minimum
i = np.argmin(dat.f[:,5])
semilogy(dat.f[i, iabscissa]*np.ones(2), dat.f[i,5]*np.ones(2), 'rd')
# semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
# AR and sigma
semilogy(dat.f[:, iabscissa], dat.f[:,3], '-r') # AR
semilogy(dat.f[:, iabscissa], dat.f[:,2],'-g') # sigma
semilogy(dat.std[:-1, iabscissa], np.vstack([list(map(max, dat.std[:-1,5:])), list(map(min, dat.std[:-1,5:]))]).T,
'-m', linewidth=2)
text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize)
text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
text(ax[0]+0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
'.f_recent=' + repr(dat.f[-1,5]) )
# title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
title('blue:abs(f), cyan:f-min(f), green:sigma, red:axis ratio', fontsize=fontsize-1)
# pylab.xticks(xticklocs)
def downsampling(self, factor=10, first=3, switch=True):
"""
rude downsampling of a `CMADataLogger` data file by `factor`, keeping
also the first `first` entries. This function is a stump and subject
to future changes.
Arguments
---------
- `factor` -- downsampling factor
- `first` -- keep first `first` entries
- `switch` -- switch the new logger name to oldname+'down'
Details
-------
``self.name_prefix+'down'`` files are written
Example
-------
::
import cma
cma.downsampling() # takes outcmaes* files
cma.plot('outcmaesdown')
"""
newprefix = self.name_prefix + 'down'
for name in CMADataLogger.names:
f = open(newprefix+name+'.dat','w')
iline = 0
cwritten = 0
for line in open(self.name_prefix+name+'.dat'):
if iline < first or iline % factor == 0:
f.write(line)
cwritten += 1
iline += 1
f.close()
print('%d' % (cwritten) + ' lines written in ' + newprefix+name+'.dat')
if switch:
self.name_prefix += 'down'
return self
#____________________________________________________________
#____________________________________________________________
#
def disp_header(self):
heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
print(heading)
def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
"""displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
If ``len(idx) == 1``, only a single row is displayed, e.g. the last
entry when ``idx == [-1]``.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, verb_disp=1e9) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()`
"""
filenameprefix=self.name_prefix
def printdatarow(dat, iteration):
"""print data of iteration i"""
i = np.where(dat.f[:, 0] == iteration)[0][0]
j = np.where(dat.std[:, 0] == iteration)[0][0]
print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +
' %5.1e' % (dat.f[i,3]) +
' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))
dat = CMADataLogger(filenameprefix).load()
ndata = dat.f.shape[0]
# map index to iteration number, is difficult if not all iteration numbers exist
# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
# otherwise:
if idx is None:
idx = 100
if np.isscalar(idx):
# idx = np.arange(0, ndata, idx)
if idx:
idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]
else:
idx = np.r_[0, 1, -3:0]
idx = array(idx)
idx = idx[idx<=ndata] # TODO: shouldn't this be "<"?
idx = idx[-idx<=ndata]
iters = dat.f[idx, 0]
idxbest = np.argmin(dat.f[:,5])
iterbest = dat.f[idxbest, 0]
if len(iters) == 1:
printdatarow(dat, iters[0])
else:
self.disp_header()
for i in iters:
printdatarow(dat, i)
self.disp_header()
printdatarow(dat, iterbest)
sys.stdout.flush()
def irg(ar):
return xrange(len(ar))
class AII(object):
"""unstable experimental code, updates ps, sigma, sigmai, pr, r, sigma_r, mean,
all from self.
Depends on that the ordering of solutions has not change upon calling update
should become a OOOptimizer in far future?
"""
# Try: ps**2 - 1 instead of (ps**2)**0.5 / chi1 - 1: compare learning rate etc
# and dito for psr
def __init__(self, x0, sigma0, randn=np.random.randn):
"""TODO: check scaling of r-learing: seems worse than linear: 9e3 25e3 65e3 (10,20,40-D)"""
self.N = len(x0)
N = self.N
# parameters to play with:
# PROBLEM: smaller eta_r even fails on *axparallel* cigar!! Also dampi needs to be smaller then!
self.dampi = 4 * N # two times smaller is
self.eta_r = 0 / N / 3 # c_r learning rate for direction, cigar: 4/N/3 is optimal in 10-D, 10/N/3 still works (15 in 20-D) but not on the axparallel cigar with recombination
self.mu = 1
self.use_abs_sigma = 1 # without it is a problem on 20=D axpar-cigar!!, but why?? Because dampi is just boarderline
self.use_abs_sigma_r = 1 #
self.randn = randn
self.x0 = array(x0, copy=True)
self.sigma0 = sigma0
self.cs = 1 / N**0.5 # evolution path for step-size(s)
self.damps = 1
self.use_sign = 0
self.use_scalar_product = 0 # sometimes makes it somewhat worse on Rosenbrock, don't know why
self.csr = 1 / N**0.5 # cumulation for sigma_r
self.dampsr = (4 * N)**0.5
self.chi1 = (2/np.pi)**0.5
self.chiN = N**0.5*(1-1./(4.*N)+1./(21.*N**2)) # expectation of norm(randn(N,1))
self.initialize()
def initialize(self):
"""alias ``reset``, set all state variables to initial values"""
N = self.N
self.mean = array(self.x0, copy=True)
self.sigma = self.sigma0
self.sigmai = np.ones(N)
self.ps = np.zeros(N) # path for individual and globalstep-size(s)
self.r = np.zeros(N)
self.pr = 0 # cumulation for zr = N(0,1)
self.sigma_r = 0
def ask(self, popsize):
if popsize == 1:
raise NotImplementedError()
self.Z = [self.randn(self.N) for _i in xrange(popsize)]
self.zr = list(self.randn(popsize))
pop = [self.mean + self.sigma * (self.sigmai * self.Z[k])
+ self.zr[k] * self.sigma_r * self.r
for k in xrange(popsize)]
if not np.isfinite(pop[0][0]):
raise ValueError()
return pop
def tell(self, X, f):
"""update """
mu = 1 if self.mu else int(len(f) / 4)
idx = np.argsort(f)[:mu]
zr = [self.zr[i] for i in idx]
Z = [self.Z[i] for i in idx]
X = [X[i] for i in idx]
xmean = np.mean(X, axis=0)
self.ps *= 1 - self.cs
self.ps += (self.cs*(2-self.cs))**0.5 * mu**0.5 * np.mean(Z, axis=0)
self.sigma *= np.exp((self.cs/self.damps) * (sum(self.ps**2)**0.5 / self.chiN - 1))
if self.use_abs_sigma:
self.sigmai *= np.exp((1/self.dampi) * (np.abs(self.ps) / self.chi1 - 1))
else:
self.sigmai *= np.exp((1.3/self.dampi/2) * (self.ps**2 - 1))
self.pr *= 1 - self.csr
self.pr += (self.csr*(2-self.csr))**0.5 * mu**0.5 * np.mean(zr)
fac = 1
if self.use_sign:
fac = np.sign(self.pr) # produces readaptations on the cigar
else:
self.pr = max([0, self.pr])
if self.use_scalar_product:
if np.sign(sum(self.r * (xmean - self.mean))) < 0: # and self.pr > 1:
# if np.sign(sum(self.r * self.ps)) < 0:
self.r *= -1
if self.eta_r:
self.r *= (1 - self.eta_r) * self.sigma_r
self.r += fac * self.eta_r * mu**0.5 * (xmean - self.mean)
self.r /= sum(self.r**2)**0.5
if self.use_abs_sigma_r:
self.sigma_r *= np.exp((1/self.dampsr) * ((self.pr**2)**0.5 / self.chi1 - 1))
else:
# this is worse on the cigar, where the direction vector(!) behaves strangely
self.sigma_r *= np.exp((1/self.dampsr) * (self.pr**2 - 1) / 2)
self.sigma_r = max([self.sigma * sum(self.sigmai**2)**0.5 / 3, self.sigma_r])
# self.sigma_r = 0
self.mean = xmean
def fmin(func, x0, sigma0=None, args=()
# the follow string arguments are evaluated, besides the verb_filenameprefix
, CMA_active='False # exponential negative update, conducted after the original update'
, CMA_activefac='1 # learning rate multiplier for active update'
, CMA_cmean='1 # learning rate for the mean value'
, CMA_const_trace='False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero'
, CMA_diagonal='0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always' # TODO 4/ccov_separable?
, CMA_eigenmethod='np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)'
, CMA_elitist='False # elitism likely impairs global search performance'
, CMA_mirrors='popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used'
, CMA_mu='None # parents selection parameter, default is popsize // 2'
, CMA_on='True # False or 0 for no adaptation of the covariance matrix'
, CMA_rankmu='True # False or 0 for omitting rank-mu update of covariance matrix'
, CMA_rankmualpha='0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0'
, CMA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere'
, CMA_dampsvec_fac='np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update'
, CMA_dampsvec_fade='0.1 # tentative fading out parameter for sigma vector update'
, CMA_teststds='None # factors for non-isotropic initial distr. mainly for test purpose, see scaling_...'
, CMA_AII='False # not yet tested'
, bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector'
, eval_parallel='False # when True, func might be called with more than one solution as first argument'
, eval_initial_x='False # '
, fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized'
, ftarget='-inf #v target function value, minimization'
, incpopsize='2 # in fmin(): multiplier for increasing popsize before each restart'
, maxfevals='inf #v maximum number of function evaluations'
, maxiter='100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations'
, mindx='0 #v minimal std in any direction, cave interference with tol*'
, minstd='0 #v minimal std in any coordinate direction, cave interference with tol*'
, noise_handling='False # maximal number of evaluations for noise treatment, only fmin'
, noise_reevals=' 1.5 + popsize/20 # number of solution to be reevaluated for noise measurement, only fmin'
, noise_eps='1e-7 # perturbation factor for noise handling reevaluations, only fmin'
, noise_change_sigma='True # exponent to default sigma increment'
, popsize='4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration'
, randn='np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)'
, restarts='0 # in fmin(): number of restarts'
, restart_from_best='False'
, scaling_of_variables='None # scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is ones(N)'
, seed='None # random number seed'
, termination_callback='None #v a function returning True for termination, called after each iteration step and could be abused for side effects'
, tolfacupx='1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0'
, tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements'
, tolfun='1e-11 #v termination criterion: tolerance in function value, quite useful'
, tolfunhist='1e-12 #v termination criterion: tolerance in function value history'
, tolstagnation='int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations'
, tolx='1e-11 #v termination criterion: tolerance in x-changes'
, transformation='None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno'
, typical_x='None # used with scaling_of_variables'
, updatecovwait='None #v number of iterations without distribution update, name is subject to future changes' # TODO: rename: iterwaitupdatedistribution?
, verb_append='0 # initial evaluation counter, if append, do not overwrite output files'
, verb_disp='100 #v verbosity: display console output every verb_disp iteration'
, verb_filenameprefix='outcmaes # output filenames prefix'
, verb_log='1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions'
, verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration'
, verb_time='True #v output timings on console'
, vv='0 #? versatile variable for hacking purposes, value found in self.opts[\'vv\']'
, availableProceses='1 # number of available processes CMA can use to parallelize each iteration'
):
"""functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin([],[])``
returns all optional arguments, that is,
all keyword arguments to fmin with their default values
in a dictionary.
``fmin(func, x0, sigma0)``
minimizes `func` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(func, x0, sigma0, ftarget=1e-5)``
minimizes `func` up to target function value 1e-5
``fmin(func, x0, sigma0, args=('f',), **options)``
minimizes `func` called with an additional argument ``'f'``.
`options` is a dictionary with additional keyword arguments, e.g.
delivered by `Options()`.
``fmin(func, x0, sigma0, **{'ftarget':1e-5, 'popsize':40})``
the same as ``fmin(func, x0, sigma0, ftarget=1e-5, popsize=40)``
``fmin(func, esobj, **{'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`func`, similar to `CMAEvolutionStrategy.optimize()`.
Arguments
=========
`func`
function to be minimized. Called as
``func(x,*args)``. `x` is a one-dimensional `numpy.ndarray`. `func`
can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
or `cma.CMAEvolutionStrategy` object instance. In this case
`sigma0` can be omitted.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4 of the search domain width where the
optimum is to be expected. The variables in `func` should be
scaled such that they presumably have similar sensitivity.
See also option `scaling_of_variables`.
Keyword Arguments
=================
All arguments besides `args` and `verb_filenameprefix` are evaluated
if they are of type `str`, see class `Options` for details. The following
list might not be fully up-to-date, use ``cma.Options()`` or
``cma.fmin([],[])`` to get the actual list.
::
args=() -- additional arguments for func, not in `cma.Options()`
CMA_active='False # exponential negative update, conducted after the original
update'
CMA_activefac='1 # learning rate multiplier for active update'
CMA_cmean='1 # learning rate for the mean value'
CMA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to
optimal on the sphere'
CMA_diagonal='0*100*N/sqrt(popsize) # nb of iterations with diagonal
covariance matrix, True for always'
CMA_eigenmethod='np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, alternative: Misc.eig (slower)'
CMA_elitist='False # elitism likely impairs global search performance'
CMA_mirrors='0 # values <0.5 are interpreted as fraction, values >1 as numbers
(rounded), otherwise about 0.16 is used'
CMA_mu='None # parents selection parameter, default is popsize // 2'
CMA_on='True # False or 0 for no adaptation of the covariance matrix'
CMA_rankmu='True # False or 0 for omitting rank-mu update of covariance
matrix'
CMA_rankmualpha='0.3 # factor of rank-mu update if mu=1, subject to removal,
default might change to 0.0'
CMA_teststds='None # factors for non-isotropic initial distr. mainly for test
purpose, see scaling_...'
bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a
scalar or a list/vector'
eval_initial_x='False # '
fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1}
that are not optimized'
ftarget='-inf #v target function value, minimization'
incpopsize='2 # in fmin(): multiplier for increasing popsize before each
restart'
maxfevals='inf #v maximum number of function evaluations'
maxiter='long(1e3*N**2/sqrt(popsize)) #v maximum number of iterations'
mindx='0 #v minimal std in any direction, cave interference with tol*'
minstd='0 #v minimal std in any coordinate direction, cave interference with
tol*'
noise_eps='1e-7 # perturbation factor for noise handling reevaluations, only
fmin'
noise_handling='False # maximal number of evaluations for noise treatment,
only fmin'
noise_reevals=' 1.5 + popsize/20 # number of solution to be reevaluated for
noise measurement, only fmin'
popsize='4+int(3*log(N)) # population size, AKA lambda, number of new solution
per iteration'
randn='np.random.standard_normal #v randn((lam, N)) must return an np.array of
shape (lam, N)'
restarts='0 # in fmin(): number of restarts'
scaling_of_variables='None # scale for each variable, sigma0 is interpreted
w.r.t. this scale, in that effective_sigma0 = sigma0*scaling.
Internally the variables are divided by scaling_of_variables and sigma
is unchanged, default is ones(N)'
seed='None # random number seed'
termination_callback='None #v in fmin(): a function returning True for
termination, called after each iteration step and could be abused for
side effects'
tolfacupx='1e3 #v termination when step-size increases by tolfacupx
(diverges). That is, the initial step-size was chosen far too small and
better solutions were found far away from the initial solution x0'
tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C)))
indicates "creeping behavior" with usually minor improvements'
tolfun='1e-11 #v termination criterion: tolerance in function value, quite
useful'
tolfunhist='1e-12 #v termination criterion: tolerance in function value
history'
tolstagnation='int(100 * N**1.5 / popsize) #v termination if no improvement
over tolstagnation iterations'
tolx='1e-11 #v termination criterion: tolerance in x-changes'
transformation='None # [t0, t1] are two mappings, t0 transforms solutions from
CMA-representation to f-representation, t1 is the back transformation,
see class GenoPheno'
typical_x='None # used with scaling_of_variables'
updatecovwait='None #v number of iterations without distribution update, name
is subject to future changes'
verb_append='0 # initial evaluation counter, if append, do not overwrite
output files'
verb_disp='100 #v verbosity: display console output every verb_disp iteration'
verb_filenameprefix='outcmaes # output filenames prefix'
verb_log='1 #v verbosity: write data to files every verb_log iteration,
writing can be time critical on fast to evaluate functions'
verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration'
verb_time='True #v output timings on console'
vv='0 #? versatile variable for hacking purposes, value found in
self.opts['vv']'
Subsets of options can be displayed, for example like ``cma.Options('tol')``,
see also class `Options`.
Return
======
Similar to `OOOptimizer.optimize()` and/or `CMAEvolutionStrategy.optimize()`, return the
list provided by `CMAEvolutionStrategy.result()` appended with an `OOOptimizer` and an
`BaseDataLogger`::
res = optim.result() + (optim.stop(), optim, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[4]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[5]`` (``iterations``) -- number of overall conducted iterations
- ``res[6]`` (``xmean``) -- mean of the final sample distribution
- ``res[7]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
class can be used when full control over the iteration loop of the
optimizer is desired.
The noise handling follows closely [Hansen et al 2009, A Method for Handling
Uncertainty in Evolutionary Optimization...] in the measurement part, but the
implemented treatment is slightly different: for ``noiseS > 0``, ``evaluations``
(time) and sigma are increased by ``alpha``. For ``noiseS < 0``, ``evaluations``
(time) is decreased by ``alpha**(1/4)``. The option ``noise_handling`` switches
the uncertainty handling on/off, the given value defines the maximal number
of evaluations for a single fitness computation. If ``noise_handling`` is a list,
the smallest element defines the minimal number and if the list has three elements,
the median value is the start value for ``evaluations``. See also class
`NoiseHandler`.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.Options() # returns all possible options
>>> options = {'CMA_diagonal':10, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, **options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The method ::
cma.plot();
(based on `matplotlib.pylab`) produces a plot of the run and, if necessary::
cma.show()
shows the plot in a window. To continue you might need to
close the pop-up window. This behavior seems to disappear in
subsequent calls of `cma.plot()` and is avoided by using
`ipython` with `-pylab` option. Finally ::
cma.savefig('myfirstrun') # savefig from matplotlib.pylab
will save the figure in a png.
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`, `Options`, `scipy.optimize.fmin()`
""" # style guides say there should be the above empty line
try: # pass on KeyboardInterrupt
opts = locals() # collect all local variables (i.e. arguments) in a dictionary
del opts['func'] # remove those without a default value
del opts['args']
del opts['x0'] # is not optional, no default available
del opts['sigma0'] # is not optional for the constructor CMAEvolutionStrategy
if not func: # return available options in a dictionary
return Options(opts, True) # these opts are by definition valid
# TODO: this is very ugly:
incpopsize = Options({'incpopsize':incpopsize}).eval('incpopsize')
restarts = Options({'restarts':restarts}).eval('restarts')
del opts['restarts']
noise_handling = Options({'noise_handling': noise_handling}).eval('noise_handling')
del opts['noise_handling']# otherwise CMA throws an error
irun = 0
best = BestSolution()
while 1:
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if sigma0 is None or not np.isscalar(array(sigma0)):
sigma0 = es.inputargs['sigma0'] # for the next restarts
# ignore further input args and keep original options
else: # default case
if irun and opts['restart_from_best']:
print('CAVE: restart_from_best is typically not useful')
es = CMAEvolutionStrategy(best.x, sigma0, opts)
else:
es = CMAEvolutionStrategy(x0, sigma0, opts)
if opts['eval_initial_x']:
x = es.gp.pheno(es.mean, bounds=es.gp.bounds)
es.best.update([x], None, [func(x, *args)], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
append = opts['verb_append'] or es.countiter > 0 or irun > 0
logger = CMADataLogger(opts['verb_filenameprefix'], opts['verb_log'])
logger.register(es, append).add() # initial values, not fitness values
# if es.countiter == 0 and es.opts['verb_log'] > 0 and not es.opts['verb_append']:
# logger = CMADataLogger(es.opts['verb_filenameprefix']).register(es)
# logger.add()
# es.writeOutput() # initial values for sigma etc
print('Avaiable Processes is: %s' % (str(opts['availableProceses'])))
# print 'Avaiable Processes is: ' + str(self.opts.availableProceses)
noisehandler = NoiseHandler(es.N, noise_handling, np.median, opts['noise_reevals'], opts['noise_eps'], opts['eval_parallel'])
while not es.stop():
if opts['availableProceses'] == 1:
X, fit = es.ask_and_eval(func, args, evaluations=noisehandler.evaluations,
aggregation=np.median) # treats NaN with resampling
else:
X, fit = es.ask_and_eval3(func, args, evaluations=noisehandler.evaluations,
aggregation=np.median, processes=opts['availableProceses']) # treats NaN with resampling
# TODO: check args and in case use args=(noisehandler.evaluations, )
if 11 < 3 and opts['vv']: # inject a solution
# use option check_point = [0]
if 0 * np.random.randn() >= 0:
X[0] = 0 + opts['vv'] * es.sigma**0 * np.random.randn(es.N)
fit[0] = func(X[0], *args)
# print fit[0]
es.tell(X, fit) # prepare for next iteration
if noise_handling:
es.sigma *= noisehandler(X, fit, func, es.ask, args)**opts['noise_change_sigma']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
es.disp()
logger.add(more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if opts['verb_log'] and opts['verb_plot'] and \
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop()):
logger.plot(324, fontsize=10)
# end while not es.stop
mean_pheno = es.gp.pheno(es.mean, bounds=es.gp.bounds)
fmean = func(mean_pheno, *args)
es.countevals += 1
es.best.update([mean_pheno], None, [fmean], es.countevals)
best.update(es.best) # in restarted case
# final message
if opts['verb_disp']:
srestarts = (' after %i restart' + ('s' if irun > 1 else '')) % irun if irun else ''
for k, v in list(es.stop().items()):
print('termination on %s=%s%s (%s)' % (k, str(v), srestarts, time.asctime()))
print('final/bestever f-value = %e %e' % (es.best.last.f, best.f))
if es.N < 9:
print('mean solution: ' + str(es.gp.pheno(es.mean)))
print('std deviation: ' + str(es.sigma * sqrt(es.dC) * es.gp.scales))
else:
print('mean solution: %s ...]' % (str(es.gp.pheno(es.mean)[:8])[:-1]))
print('std deviations: %s ...]' % (str((es.sigma * sqrt(es.dC) * es.gp.scales)[:8])[:-1]))
irun += 1
if irun > restarts or 'ftarget' in es.stopdict or 'maxfunevals' in es.stopdict:
break
opts['verb_append'] = es.countevals
opts['popsize'] = incpopsize * es.sp.popsize # TODO: use rather options?
opts['seed'] += 1
# while irun
es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if 1 < 3:
return es.result() + (es.stop(), es, logger)
else: # previously: to be removed
return (best.x.copy(), best.f, es.countevals,
dict((('stopdict', CMAStopDict(es.stopdict))
,('mean', es.gp.pheno(es.mean))
,('std', es.sigma * sqrt(es.dC) * es.gp.scales)
,('out', es.out)
,('opts', es.opts) # last state of options
,('cma', es)
,('inputargs', es.inputargs)
))
)
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
except KeyboardInterrupt: # Exception, e:
if opts['verb_disp'] > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit
def plot(name=None, fig=None, abscissa=1, iteridx=None, plot_mean=True, # TODO: plot_mean default should be False
foffset=1e-19, x_opt=None, fontsize=10):
"""
plot data from files written by a `CMADataLogger`,
the call ``cma.plot(name, **argsdict)`` is a shortcut for
``cma.CMADataLogger(name).plot(**argsdict)``
Arguments
---------
`name`
name of the logger, filename prefix, None evaluates to
the default 'outcmaes'
`fig`
filename or figure number, or both as a tuple (any order)
`abscissa`
0==plot versus iteration count,
1==plot versus function evaluation number
`iteridx`
iteration indices to plot
Return `None`
Examples
--------
::
cma.plot(); # the optimization might be still
# running in a different shell
cma.show() # to continue you might need to close the pop-up window
# once and call cma.plot() again.
# This behavior seems to disappear in subsequent
# calls of cma.plot(). Also using ipython with -pylab
# option might help.
cma.savefig('fig325.png')
cma.close()
cdl = cma.CMADataLogger().downsampling().plot()
Details
-------
Data from codes in other languages (C, Java, Matlab, Scilab) have the same
format and can be plotted just the same.
:See: `CMADataLogger`, `CMADataLogger.plot()`
"""
CMADataLogger(name).plot(fig, abscissa, iteridx, plot_mean, foffset, x_opt, fontsize)
def disp(name=None, idx=None):
"""displays selected data from (files written by) the class `CMADataLogger`.
The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``.
Arguments
---------
`name`
name of the logger, filename prefix, `None` evaluates to
the default ``'outcmaes'``
`idx`
indices corresponding to rows in the data file; by
default the first five, then every 100-th, and the last
10 rows. Too large index values are removed.
Examples
--------
::
import cma, numpy
# assume some data are available from previous runs
cma.disp(None,numpy.r_[0,-1]) # first and last
cma.disp(None,numpy.r_[0:1e9:100,-1]) # every 100-th and last
cma.disp(idx=numpy.r_[0,-10:0]) # first and ten last
cma.disp(idx=numpy.r_[0:1e9:1e3,-10:0])
:See: `CMADataLogger.disp()`
"""
return CMADataLogger(name if name else 'outcmaes'
).disp(idx)
#____________________________________________________________
def _fileToMatrix(file_name):
"""rudimentary method to read in data from a file"""
# TODO: np.loadtxt() might be an alternative
# try:
if 1 < 3:
lres = []
for line in open(file_name, 'r').readlines():
if len(line) > 0 and line[0] not in ('%', '#'):
lres.append(list(map(float, line.split())))
res = lres
else:
fil = open(file_name, 'r')
fil.readline() # rudimentary, assume one comment line
lineToRow = lambda line: list(map(float, line.split()))
res = list(map(lineToRow, fil.readlines()))
fil.close() # close file could be omitted, reference counting should do during garbage collection, but...
while res != [] and res[0] == []: # remove further leading empty lines
del res[0]
return res
# except:
print('could not read file ' + file_name)
#____________________________________________________________
#____________________________________________________________
class NoiseHandler(object):
"""Noise handling according to [Hansen et al 2009, A Method for Handling
Uncertainty in Evolutionary Optimization...]
The interface of this class is yet versatile and subject to changes.
The attribute ``evaluations`` serves to control the noise via number of
evaluations, for example with `ask_and_eval()`, see also parameter
``maxevals`` and compare the example.
Example
-------
>>> import cma, numpy as np
>>> func = cma.Fcts.noisysphere
>>> es = cma.CMAEvolutionStrategy(np.ones(10), 1)
>>> logger = cma.CMADataLogger().register(es)
>>> nh = cma.NoiseHandler(es.N, maxevals=[1, 30])
>>> while not es.stop():
... X, fit = es.ask_and_eval(func, evaluations=nh.evaluations)
... es.tell(X, fit) # prepare for next iteration
... es.sigma *= nh(X, fit, func, es.ask) # see method __call__
... es.countevals += nh.evaluations_just_done # this is a hack, not important though
... logger.add(more_data = [nh.evaluations, nh.noiseS]) # add a data point
... es.disp()
... # nh.maxevals = ... it might be useful to start with smaller values and then increase
>>> print(es.stop())
>>> print(es.result()[-2]) # take mean value, the best solution is totally off
>>> assert sum(es.result()[-2]**2) < 1e-9
>>> print(X[np.argmin(fit)]) # not bad, but probably worse than the mean
>>> logger.plot()
The noise options of `fmin()` control a `NoiseHandler` instance similar to this
example. The command ``cma.Options('noise')`` lists in effect the parameters of
`__init__` apart from ``aggregate``.
Details
-------
The parameters reevals, theta, c_s, and alpha_t are set differently
than in the original publication, see method `__init__()`. For a
very small population size, say popsize <= 5, the measurement
technique based on rank changes is likely to fail.
Missing Features
----------------
In case no noise is found, ``self.lam_reeval`` should be adaptive
and get at least as low as 1 (however the possible savings from this
are rather limited). Another option might be to decide during the
first call by a quantitative analysis of fitness values whether
``lam_reeval`` is set to zero. More generally, an automatic noise
mode detection might also set the covariance matrix learning rates
to smaller values.
:See: `fmin()`, `ask_and_eval()`
"""
def __init__(self, N, maxevals=10, aggregate=np.median, reevals=None, epsilon=1e-7, parallel=False):
"""parameters are
`N`
dimension
`maxevals`
maximal value for ``self.evaluations``, where
``self.evaluations`` function calls are aggregated for
noise treatment. With ``maxevals == 0`` the noise
handler is (temporarily) "switched off". If `maxevals`
is a list, min value and (for >2 elements) median are
used to define minimal and initial value of
``self.evaluations``. Choosing ``maxevals > 1`` is only
reasonable, if also the original ``fit`` values (that
are passed to `__call__`) are computed by aggregation of
``self.evaluations`` values (otherwise the values are
not comparable), as it is done within `fmin()`.
`aggregate`
function to aggregate single f-values to a 'fitness', e.g.
``np.median``.
`reevals`
number of solutions to be reevaluated for noise measurement,
can be a float, by default set to ``1.5 + popsize/20``,
zero switches noise handling off.
`epsilon`
multiplier for perturbation of the reevaluated solutions
`parallel`
a single f-call with all resampled solutions
:See: `fmin()`, `Options`, `CMAEvolutionStrategy.ask_and_eval()`
"""
self.lam_reeval = reevals # 2 + popsize/20, see method indices(), originally 2 + popsize/10
self.epsilon = epsilon
self.parallel = parallel
self.theta = 0.5 # originally 0.2
self.cum = 0.3 # originally 1, 0.3 allows one disagreement of current point with resulting noiseS
self.alphasigma = 1 + 2 / (N+10)
self.alphaevals = 1 + 2 / (N+10) # originally 1.5
self.alphaevalsdown = self.alphaevals**-0.25 # originally 1/1.5
self.evaluations = 1 # to aggregate for a single f-evaluation
self.minevals = 1
self.maxevals = int(np.max(maxevals))
if hasattr(maxevals, '__contains__'): # i.e. can deal with ``in``
if len(maxevals) > 1:
self.minevals = min(maxevals)
self.evaluations = self.minevals
if len(maxevals) > 2:
self.evaluations = np.median(maxevals)
self.f_aggregate = aggregate
self.evaluations_just_done = 0 # actually conducted evals, only for documentation
self.noiseS = 0
def __call__(self, X, fit, func, ask=None, args=()):
"""proceed with noise measurement, set anew attributes ``evaluations``
(proposed number of evaluations to "treat" noise) and ``evaluations_just_done``
and return a factor for increasing sigma.
Parameters
----------
`X`
a list/sequence/vector of solutions
`fit`
the respective list of function values
`func`
the objective function, ``fit[i]`` corresponds to ``func(X[i], *args)``
`ask`
a method to generate a new, slightly disturbed solution. The argument
is mandatory if ``epsilon`` is not zero, see `__init__()`.
`args`
optional additional arguments to `func`
Details
-------
Calls the methods ``reeval()``, ``update_measure()`` and ``treat()`` in this order.
``self.evaluations`` is adapted within the method `treat()`.
"""
self.evaluations_just_done = 0
if not self.maxevals or self.lam_reeval == 0:
return 1.0
res = self.reeval(X, fit, func, ask, args)
if not len(res):
return 1.0
self.update_measure()
return self.treat()
def get_evaluations(self):
"""return ``self.evaluations``, the number of evalutions to get a single fitness measurement"""
return self.evaluations
def treat(self):
"""adapt self.evaluations depending on the current measurement value
and return ``sigma_fac in (1.0, self.alphasigma)``
"""
if self.noiseS > 0:
self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))
return self.alphasigma
else:
self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))
return 1.0
def reeval(self, X, fit, func, ask, args=()):
"""store two fitness lists, `fit` and ``fitre`` reevaluating some
solutions in `X`.
``self.evaluations`` evaluations are done for each reevaluated
fitness value.
See `__call__()`, where `reeval()` is called.
"""
self.fit = list(fit)
self.fitre = list(fit)
self.idx = self.indices(fit)
if not len(self.idx):
return self.idx
evals = int(self.evaluations) if self.f_aggregate else 1
fagg = np.median if self.f_aggregate is None else self.f_aggregate
for i in self.idx:
if self.epsilon:
if self.parallel:
self.fitre[i] = fagg(func(ask(evals, X[i], self.epsilon), *args))
else:
self.fitre[i] = fagg([func(ask(1, X[i], self.epsilon)[0], *args)
for _k in xrange(evals)])
else:
self.fitre[i] = fagg([func(X[i], *args) for _k in xrange(evals)])
self.evaluations_just_done = evals * len(self.idx)
return self.fit, self.fitre, self.idx
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0,i] + 1 - (ranks[0,i] > ranks[1,i]))),
self.theta*50) +
Mh.prctile(np.abs(r - (ranks[1,i] + 1 - (ranks[1,i] > ranks[0,i]))),
self.theta*50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def indices(self, fit):
"""return the set of indices to be reevaluted for noise measurement,
taking the ``lam_reeval`` best from the first ``2 * lam_reeval + 2``
values.
Given the first values are the earliest, this is a useful policy also
with a time changing objective.
"""
lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20
reev = int(lam) + ((lam % 1) > np.random.rand())
return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]
#____________________________________________________________
#____________________________________________________________
class Sections(object):
"""plot sections through an objective function. A first
rational thing to do, when facing an (expensive) application.
By default 6 points in each coordinate are evaluated.
This class is still experimental.
Examples
--------
>>> import cma, numpy as np
>>> s = cma.Sections(cma.Fcts.rosen, np.zeros(3)).do(plot=False)
>>> s.do(plot=False) # evaluate the same points again, i.e. check for noise
>>> try:
... s.plot()
... except:
... print('plotting failed: pylab package is missing?')
Details
-------
Data are saved after each function call during `do()`. The filename is attribute
``name`` and by default ``str(func)``, see `__init__()`.
A random (orthogonal) basis can be generated with ``cma.Rotation()(np.eye(3))``.
The default name is unique in the function name, but it should be unique in all
parameters of `__init__()` but `plot_cmd` and `load`.
``self.res`` is a dictionary with an entry for each "coordinate" ``i`` and with an
entry ``'x'``, the middle point. Each entry ``i`` is again a dictionary with keys
being different dx values and the value being a sequence of f-values.
For example ``self.res[2][0.1] == [0.01, 0.01]``, which is generated using the
difference vector ``self.basis[2]`` like
``self.res[2][dx] += func(self.res['x'] + dx * self.basis[2])``.
:See: `__init__()`
"""
def __init__(self, func, x, args=(), basis=None, name=None,
plot_cmd=pylab.plot if pylab else None, load=True):
"""
Parameters
----------
`func`
objective function
`x`
point in search space, middle point of the sections
`args`
arguments passed to `func`
`basis`
evaluated points are ``func(x + locations[j] * basis[i]) for i in len(basis) for j in len(locations)``,
see `do()`
`name`
filename where to save the result
`plot_cmd`
command used to plot the data, typically matplotlib pylabs `plot` or `semilogy`
`load`
load previous data from file ``str(func) + '.pkl'``
"""
self.func = func
self.args = args
self.x = x
self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '')
self.plot_cmd = plot_cmd # or semilogy
self.basis = np.eye(len(x)) if basis is None else basis
try:
self.load()
if any(self.res['x'] != x):
self.res = {}
self.res['x'] = x # TODO: res['x'] does not look perfect
else:
print(self.name + ' loaded')
except:
self.res = {}
self.res['x'] = x
def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True):
"""generates, plots and saves function values ``func(y)``,
where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in
the ``res`` attribute and the class instance is saved in a file
with (the weired) name ``str(func)``.
Parameters
----------
`repetitions`
for each point, only for noisy functions is >1 useful. For
``repetitions==0`` only already generated data are plotted.
`locations`
coordinated wise deviations from the middle point given in `__init__`
"""
if not repetitions:
self.plot()
return
res = self.res
for i in range(len(self.basis)): # i-th coordinate
if i not in res:
res[i] = {}
# xx = np.array(self.x)
# TODO: store res[i]['dx'] = self.basis[i] here?
for dx in locations:
xx = self.x + dx * self.basis[i]
xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx
if xkey not in res[i]:
res[i][xkey] = []
n = repetitions
while n > 0:
n -= 1
res[i][xkey].append(self.func(xx, *self.args))
if plot:
self.plot()
self.save()
return self
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pylab.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 0 else 0
for i in sorted(res.keys()): # we plot not all values here
if type(i) is int:
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pylab.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pylab.ylabel('f + ' + str(addf))
pylab.draw()
show()
# raw_input('press return')
return self
def flattened(self):
"""return flattened data ``(x, f)`` such that for the sweep through
coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
"""
flatx = {}
flatf = {}
for i in self.res:
if type(i) is int:
flatx[i] = []
flatf[i] = []
for x in sorted(self.res[i]):
for d in sorted(self.res[i][x]):
flatx[i].append(x)
flatf[i].append(d)
return flatx, flatf
def save(self, name=None):
"""save to file"""
import pickle
name = name if name else self.name
fun = self.func
del self.func # instance method produces error
pickle.dump(self, open(name + '.pkl', "wb" ))
self.func = fun
return self
def load(self, name=None):
"""load from file"""
import pickle
name = name if name else self.name
s = pickle.load(open(name + '.pkl', 'rb'))
self.res = s.res # disregard the class
return self
#____________________________________________________________
#____________________________________________________________
class _Error(Exception):
"""generic exception of cma module"""
pass
#____________________________________________________________
#____________________________________________________________
#
class ElapsedTime(object):
"""32-bit C overflows after int(2**32/1e6) == 4294s about 72 min"""
def __init__(self):
self.tic0 = time.clock()
self.tic = self.tic0
self.lasttoc = time.clock()
self.lastdiff = time.clock() - self.lasttoc
self.time_to_add = 0
self.messages = 0
def __call__(self):
toc = time.clock()
if toc - self.tic >= self.lasttoc - self.tic:
self.lastdiff = toc - self.lasttoc
self.lasttoc = toc
else: # overflow, reset self.tic
if self.messages < 3:
self.messages += 1
print(' in cma.ElapsedTime: time measure overflow, last difference estimated from',
self.tic0, self.tic, self.lasttoc, toc, toc - self.lasttoc, self.lastdiff)
self.time_to_add += self.lastdiff + self.lasttoc - self.tic
self.tic = toc # reset
self.lasttoc = toc
self.elapsedtime = toc - self.tic + self.time_to_add
return self.elapsedtime
#____________________________________________________________
#____________________________________________________________
#
class TimeIt(object):
def __init__(self, fct, args=(), seconds=1):
pass
class Misc(object):
#____________________________________________________________
#____________________________________________________________
#
@staticmethod
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
"""return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
# testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
if m is None:
dx = x
else:
dx = x - m # array(x) - array(m)
n = len(x)
s2pi = (2*np.pi)**(n/2.)
if Cinv is None:
return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
if detC is None:
detC = 1. / np.linalg.linalg.det(Cinv)
return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
@staticmethod
def loglikelihood(self, x, previous=False):
"""return log-likelihood of `x` regarding the current sample distribution"""
# testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
# TODO: test this!!
# c=cma.fmin...
# c[3]['cma'].loglikelihood(...)
if previous and hasattr(self, 'lastiter'):
sigma = self.lastiter.sigma
Crootinv = self.lastiter._Crootinv
xmean = self.lastiter.mean
D = self.lastiter.D
elif previous and self.countiter > 1:
raise _Error('no previous distribution parameters stored, check options importance_mixing')
else:
sigma = self.sigma
Crootinv = self._Crootinv
xmean = self.mean
D = self.D
dx = array(x) - xmean # array(x) - array(m)
n = self.N
logs2pi = n * log(2*np.pi) / 2.
logdetC = 2 * sum(log(D))
dx = np.dot(Crootinv, dx)
res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC/2 - n*log(sigma)
if 1 < 3: # testing
s2pi = (2*np.pi)**(n/2.)
detC = np.prod(D)**2
res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)
assert res2 < res + 1e-8 or res2 > res - 1e-8
return res
#____________________________________________________________
#____________________________________________________________
#
# C and B are arrays rather than matrices, because they are
# addressed via B[i][j], matrices can only be addressed via B[i,j]
# tred2(N, B, diagD, offdiag);
# tql2(N, diagD, offdiag, B);
# Symmetric Householder reduction to tridiagonal form, translated from JAMA package.
@staticmethod
def eig(C):
"""eigendecomposition of a symmetric matrix, much slower than
`numpy.linalg.eigh`, return ``(EVals, Basis)``, the eigenvalues
and an orthonormal basis of the corresponding eigenvectors, where
``Basis[i]``
the i-th row of ``Basis``
columns of ``Basis``, ``[Basis[j][i] for j in range(len(Basis))]``
the i-th eigenvector with eigenvalue ``EVals[i]``
"""
# class eig(object):
# def __call__(self, C):
# Householder transformation of a symmetric matrix V into tridiagonal form.
# -> n : dimension
# -> V : symmetric nxn-matrix
# <- V : orthogonal transformation matrix:
# tridiag matrix == V * V_in * V^t
# <- d : diagonal
# <- e[0..n-1] : off diagonal (elements 1..n-1)
# Symmetric tridiagonal QL algorithm, iterative
# Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations
# -> n : Dimension.
# -> d : Diagonale of tridiagonal matrix.
# -> e[1..n-1] : off-diagonal, output from Householder
# -> V : matrix output von Householder
# <- d : eigenvalues
# <- e : garbage?
# <- V : basis of eigenvectors, according to d
# tred2(N, B, diagD, offdiag); B=C on input
# tql2(N, diagD, offdiag, B);
# private void tred2 (int n, double V[][], double d[], double e[]) {
def tred2 (n, V, d, e):
# This is derived from the Algol procedures tred2 by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # factor 1.5 in 30-D
for j in range(n):
d[j] = V[n-1][j] # d is output argument
# Householder reduction to tridiagonal form.
for i in range(n-1,0,-1):
# Scale to avoid under/overflow.
h = 0.0
if not num_opt:
scale = 0.0
for k in range(i):
scale = scale + abs(d[k])
else:
scale = sum(abs(d[0:i]))
if scale == 0.0:
e[i] = d[i-1]
for j in range(i):
d[j] = V[i-1][j]
V[i][j] = 0.0
V[j][i] = 0.0
else:
# Generate Householder vector.
if not num_opt:
for k in range(i):
d[k] /= scale
h += d[k] * d[k]
else:
d[:i] /= scale
h = np.dot(d[:i],d[:i])
f = d[i-1]
g = h**0.5
if f > 0:
g = -g
e[i] = scale * g
h = h - f * g
d[i-1] = f - g
if not num_opt:
for j in range(i):
e[j] = 0.0
else:
e[:i] = 0.0
# Apply similarity transformation to remaining columns.
for j in range(i):
f = d[j]
V[j][i] = f
g = e[j] + V[j][j] * f
if not num_opt:
for k in range(j+1, i):
g += V[k][j] * d[k]
e[k] += V[k][j] * f
e[j] = g
else:
e[j+1:i] += V.T[j][j+1:i] * f
e[j] = g + np.dot(V.T[j][j+1:i],d[j+1:i])
f = 0.0
if not num_opt:
for j in range(i):
e[j] /= h
f += e[j] * d[j]
else:
e[:i] /= h
f += np.dot(e[:i],d[:i])
hh = f / (h + h)
if not num_opt:
for j in range(i):
e[j] -= hh * d[j]
else:
e[:i] -= hh * d[:i]
for j in range(i):
f = d[j]
g = e[j]
if not num_opt:
for k in range(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i-1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in range(n-1):
V[n-1][i] = V[i][i]
V[i][i] = 1.0
h = d[i+1]
if h != 0.0:
if not num_opt:
for k in range(i+1):
d[k] = V[k][i+1] / h
else:
d[:i+1] = V.T[i+1][:i+1] / h
for j in range(i+1):
if not num_opt:
g = 0.0
for k in range(i+1):
g += V[k][i+1] * V[k][j]
for k in range(i+1):
V[k][j] -= g * d[k]
else:
g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])
V.T[j][:i+1] -= g * d[:i+1]
if not num_opt:
for k in range(i+1):
V[k][i+1] = 0.0
else:
V.T[i+1][:i+1] = 0.0
if not num_opt:
for j in range(n):
d[j] = V[n-1][j]
V[n-1][j] = 0.0
else:
d[:n] = V[n-1][:n]
V[n-1][:n] = 0.0
V[n-1][n-1] = 1.0
e[0] = 0.0
# Symmetric tridiagonal QL algorithm, taken from JAMA package.
# private void tql2 (int n, double d[], double e[], double V[][]) {
# needs roughly 3N^3 operations
def tql2 (n, d, e, V):
# This is derived from the Algol procedures tql2, by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # using vectors from numpy makes it faster
if not num_opt:
for i in range(1,n): # (int i = 1; i < n; i++):
e[i-1] = e[i]
else:
e[0:n-1] = e[1:n]
e[n-1] = 0.0
f = 0.0
tst1 = 0.0
eps = 2.0**-52.0
for l in range(n): # (int l = 0; l < n; l++) {
# Find small subdiagonal element
tst1 = max(tst1, abs(d[l]) + abs(e[l]))
m = l
while m < n:
if abs(e[m]) <= eps*tst1:
break
m += 1
# If m == l, d[l] is an eigenvalue,
# otherwise, iterate.
if m > l:
iiter = 0
while 1: # do {
iiter += 1 # (Could check iteration count here.)
# Compute implicit shift
g = d[l]
p = (d[l+1] - g) / (2.0 * e[l])
r = (p**2 + 1)**0.5 # hypot(p,1.0)
if p < 0:
r = -r
d[l] = e[l] / (p + r)
d[l+1] = e[l] * (p + r)
dl1 = d[l+1]
h = g - d[l]
if not num_opt:
for i in range(l+2, n):
d[i] -= h
else:
d[l+2:n] -= h
f = f + h
# Implicit QL transformation.
p = d[m]
c = 1.0
c2 = c
c3 = c
el1 = e[l+1]
s = 0.0
s2 = 0.0
# hh = V.T[0].copy() # only with num_opt
for i in range(m-1, l-1, -1): # (int i = m-1; i >= l; i--) {
c3 = c2
c2 = c
s2 = s
g = c * e[i]
h = c * p
r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])
e[i+1] = s * r
s = e[i] / r
c = p / r
p = c * d[i] - s * g
d[i+1] = h + s * (c * g + s * d[i])
# Accumulate transformation.
if not num_opt: # overall factor 3 in 30-D
for k in range(n): # (int k = 0; k < n; k++) {
h = V[k][i+1]
V[k][i+1] = s * V[k][i] + c * h
V[k][i] = c * V[k][i] - s * h
else: # about 20% faster in 10-D
hh = V.T[i+1].copy()
# hh[:] = V.T[i+1][:]
V.T[i+1] = s * V.T[i] + c * hh
V.T[i] = c * V.T[i] - s * hh
# V.T[i] *= c
# V.T[i] -= s * hh
p = -s * s2 * c3 * el1 * e[l] / dl1
e[l] = s * p
d[l] = c * p
# Check for convergence.
if abs(e[l]) <= eps*tst1:
break
# } while (Math.abs(e[l]) > eps*tst1);
d[l] = d[l] + f
e[l] = 0.0
# Sort eigenvalues and corresponding vectors.
if 11 < 3:
for i in range(n-1): # (int i = 0; i < n-1; i++) {
k = i
p = d[i]
for j in range(i+1, n): # (int j = i+1; j < n; j++) {
if d[j] < p: # NH find smallest k>i
k = j
p = d[j]
if k != i:
d[k] = d[i] # swap k and i
d[i] = p
for j in range(n): # (int j = 0; j < n; j++) {
p = V[j][i]
V[j][i] = V[j][k]
V[j][k] = p
# tql2
N = len(C[0])
if 11 < 3:
V = np.array([x[:] for x in C]) # copy each "row"
N = V[0].size
d = np.zeros(N)
e = np.zeros(N)
else:
V = [[x[i] for i in xrange(N)] for x in C] # copy each "row"
d = N * [0.]
e = N * [0.]
tred2(N, V, d, e)
tql2(N, d, e, V)
return (array(d), array(V))
class MathHelperFunctions(object):
"""static convenience math helper functions, if the function name
is preceded with an "a", a numpy array is returned
"""
@staticmethod
def aclamp(x, upper):
return -Misc.MathHelperFunctions.apos(-x, -upper)
@staticmethod
def expms(A, eig=np.linalg.eigh):
"""matrix exponential for a symmetric matrix"""
# TODO: check that this works reliably for low rank matrices
# first: symmetrize A
D, B = eig(A)
return np.dot(B, (np.exp(D) * B).T)
@staticmethod
def amax(vec, vec_or_scalar):
return array(Misc.MathHelperFunctions.max(vec, vec_or_scalar))
@staticmethod
def max(vec, vec_or_scalar):
b = vec_or_scalar
if np.isscalar(b):
m = [max(x, b) for x in vec]
else:
m = [max(vec[i], b[i]) for i in xrange(len(vec))]
return m
@staticmethod
def amin(vec_or_scalar, vec_or_scalar2):
return array(Misc.MathHelperFunctions.min(vec_or_scalar, vec_or_scalar2))
@staticmethod
def min(a, b):
iss = np.isscalar
if iss(a) and iss(b):
return min(a, b)
if iss(a):
a, b = b, a
# now only b can be still a scalar
if iss(b):
return [min(x, b) for x in a]
else: # two non-scalars must have the same length
return [min(a[i], b[i]) for i in xrange(len(a))]
@staticmethod
def norm(vec, expo=2):
return sum(vec**expo)**(1/expo)
@staticmethod
def apos(x, lower=0):
"""clips argument (scalar or array) from below at lower"""
if lower == 0:
return (x > 0) * x
else:
return lower + (x > lower) * (x - lower)
@staticmethod
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples better values than matplotlib.mlab.prctile,
however also slower.
"""
ps = [p_vals] if np.isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])
return d[0] if np.isscalar(p_vals) else d
@staticmethod
def sround(nb): # TODO: to be vectorized
"""return stochastic round: floor(nb) + (rand()<remainder(nb))"""
return nb // 1 + (np.random.rand(1)[0] < (nb % 1))
@staticmethod
def cauchy_with_variance_one():
n = np.random.randn() / np.random.randn()
while abs(n) > 1000:
n = np.random.randn() / np.random.randn()
return n / 25
@staticmethod
def standard_finite_cauchy(size=1):
try:
l = len(size)
except TypeError:
l = 0
if l == 0:
return array([Mh.cauchy_with_variance_one() for _i in xrange(size)])
elif l == 1:
return array([Mh.cauchy_with_variance_one() for _i in xrange(size[0])])
elif l == 2:
return array([[Mh.cauchy_with_variance_one() for _i in xrange(size[1])]
for _j in xrange(size[0])])
else:
raise _Error('len(size) cannot be large than two')
Mh = MathHelperFunctions
def pprint(to_be_printed):
"""nicely formated print"""
try:
import pprint as pp
# generate an instance PrettyPrinter
# pp.PrettyPrinter().pprint(to_be_printed)
pp.pprint(to_be_printed)
except ImportError:
print('could not use pprint module, will apply regular print')
print(to_be_printed)
class Rotation(object):
"""Rotation class that implements an orthogonal linear transformation,
one for each dimension. Used to implement non-separable test functions.
Example:
>>> import cma, numpy as np
>>> R = cma.Rotation()
>>> R2 = cma.Rotation() # another rotation
>>> x = np.array((1,2,3))
>>> print(R(R(x), inverse=1))
[ 1. 2. 3.]
"""
dicMatrices = {} # store matrix if necessary, for each dimension
def __init__(self):
self.dicMatrices = {} # otherwise there might be shared bases which is probably not what we want
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if str(N) not in self.dicMatrices: # create new N-basis for once and all
B = np.random.randn(N, N)
for i in xrange(N):
for j in xrange(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
rotate = Rotation()
#____________________________________________________________
#____________________________________________________________
#
class FitnessFunctions(object):
""" versatile container for test objective functions """
def __init__(self):
self.counter = 0 # number of calls or any other practical use
def rot(self, x, fun, rot=1, args=()):
"""returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
if len(np.shape(array(x))) > 1: # parallelized
res = []
for x in x:
res.append(self.rot(x, fun, rot, args))
return res
if rot:
return fun(rotate(x, *args))
else:
return fun(x)
def somenan(self, x, fun, p=0.1):
"""returns sometimes np.NaN, otherwise fun(x)"""
if np.random.rand(1) < p:
return np.NaN
else:
return fun(x)
def rand(self, x):
"""Random test objective function"""
return np.random.random(1)[0]
def linear(self, x):
return -x[0]
def lineard(self, x):
if 1 < 3 and any(array(x) < 0):
return np.nan
if 1 < 3 and sum([ (10 + i) * x[i] for i in xrange(len(x))]) > 50e3:
return np.nan
return -sum(x)
def sphere(self, x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
return sum((x+0)**2)
def spherewithoneconstraint(self, x):
return sum((x+0)**2) if x[0] > 1 else np.nan
def elliwithoneconstraint(self, x, idx=[-1]):
return self.ellirot(x) if all(array(x)[idx] > 1) else np.nan
def spherewithnconstraints(self, x):
return sum((x+0)**2) if all(array(x) > 1) else np.nan
def noisysphere(self, x, noise=4.0, cond=1.0):
"""noise=10 does not work with default popsize, noise handling does not help """
return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x))
def spherew(self, x):
"""Sphere (squared norm) with sum x_i = 1 test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
# s = sum(abs(x))
# return sum((x/s+0)**2) - 1/len(x)
# return sum((x/s)**2) - 1/len(x)
return -0.01*x[0] + abs(x[0])**-2 * sum(x[1:]**2)
def partsphere(self, x):
"""Sphere (squared norm) test objective function"""
self.counter += 1
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
dim = len(x)
x = array([x[i % dim] for i in range(2*dim)])
N = 8
i = self.counter % dim
#f = sum(x[i:i + N]**2)
f = sum(x[np.random.randint(dim, size=N)]**2)
return f
def sectorsphere(self, x):
"""asymmetric Sphere (squared norm) test objective function"""
return sum(x**2) + (1e6-1) * sum(x[x<0]**2)
def cornersphere(self, x):
"""Sphere (squared norm) test objective function constraint to the corner"""
nconstr = len(x) - 0
if any(x[:nconstr] < 1):
return np.NaN
return sum(x**2) - nconstr
def cornerelli(self, x):
""" """
if any(x < 1):
return np.NaN
return self.elli(x) - self.elli(np.ones(len(x)))
def cornerellirot(self, x):
""" """
if any(x < 1):
return np.NaN
return self.ellirot(x)
def normalSkew(self, f):
N = np.random.randn(1)[0]**2
if N < 1:
N = f * N # diminish blow up lower part
return N
def noiseC(self, x, func=sphere, fac=10, expon=0.8):
f = func(self, x)
N = np.random.randn(1)[0]/np.random.randn(1)[0]
return max(1e-19, f + (float(fac)/len(x)) * f**expon * N)
def noise(self, x, func=sphere, fac=10, expon=1):
f = func(self, x)
#R = np.random.randn(1)[0]
R = np.log10(f) + expon * abs(10-np.log10(f)) * np.random.rand(1)[0]
# sig = float(fac)/float(len(x))
# R = log(f) + 0.5*log(f) * random.randn(1)[0]
# return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R))
# return max(1e-19, f * np.exp(sig * N / f**expon))
# return max(1e-19, f * normalSkew(f**expon)**sig)
return f + 10**R # == f + f**(1+0.5*RN)
def cigar(self, x, rot=0, cond=1e6):
"""Cigar test objective function"""
if rot:
x = rotate(x)
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [x[0]**2 + cond * sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def tablet(self, x, rot=0):
"""Tablet test objective function"""
if rot:
x = rotate(x)
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [1e6*x[0]**2 + sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def cigtab(self, y):
"""Cigtab test objective function"""
X = [y] if np.isscalar(y[0]) else y
f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def twoaxes(self, y):
"""Cigtab test objective function"""
X = [y] if np.isscalar(y[0]) else y
N2 = len(X[0]) // 2
f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def ellirot(self, x):
return fcts.elli(array(x), 1)
def hyperelli(self, x):
N = len(x)
return sum((np.arange(1, N+1) * x)**2)
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not np.isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N)/(N-1.))*(x+xoffset)**2)
alpha = 0.49 + 1./N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue+1e-99))**(alpha*np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def elliconstraint(self, x, cfac = 1e8, tough=True, cond=1e6):
"""ellipsoid test objective function with "constraints" """
N = len(x)
f = sum(cond**(np.arange(N)[-1::-1]/(N-1)) * x**2)
cvals = (x[0] + 1,
x[0] + 1 + 100*x[1],
x[0] + 1 - 100*x[1])
if tough:
f += cfac * sum(max(0,c) for c in cvals)
else:
f += cfac * sum(max(0,c+1e-3)**2 for c in cvals)
return f
def rosen(self, x, alpha=1e2):
"""Rosenbrock test objective function"""
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [sum(alpha*(x[:-1]**2-x[1:])**2 + (1.-x[:-1])**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def diffpow(self, x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x)
return sum(np.abs(x)**(2.+4.*np.arange(N)/(N-1.)))**0.5
def rosenelli(self, x):
N = len(x)
return self.rosen(x[:N/2]) + self.elli(x[N/2:], cond=1)
def ridge(self, x, expo=2):
x = [x] if np.isscalar(x[0]) else x # scalar into list
f = [x[0] + 100*np.sum(x[1:]**2)**(expo/2.) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def ridgecircle(self, x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo/2) + s/a + sum(x)/a
def happycat(self, x, alpha=1./8):
s = sum(x**2)
return ((s - len(x))**2)**alpha + (s/2 + sum(x)) / len(x) + 0.5
def flat(self,x):
return 1
return 1 if np.random.rand(1) < 0.9 else 1.1
return np.random.randint(1,30)
def branin(self, x):
# in [0,15]**2
y = x[1]
x = x[0] + 5
return (y - 5.1*x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1/8/np.pi) * np.cos(x) + 10 - 0.397887357729738160000
def goldsteinprice(self, x):
x1 = x[0]
x2 = x[1]
return (1 + (x1 +x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * (
30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3
def griewank(self, x):
# was in [-600 600]
x = (600./5) * x
return 1 - np.prod(np.cos(x/sqrt(1.+np.arange(len(x))))) + sum(x**2)/4e3
def rastrigin(self, x):
"""Rastrigin test objective function"""
if not np.isscalar(x[0]):
N = len(x[0])
return [10*N + sum(xi**2 - 10*np.cos(2*np.pi*xi)) for xi in x]
# return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1)
N = len(x)
return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x))
def schaffer(self, x):
""" Schaffer function x0 in [-100..100]"""
N = len(x);
s = x[0:N-1]**2 + x[1:N]**2;
return sum(s**0.25 * (np.sin(50*s**0.1)**2 + 1))
def schwefelelli(self, x):
s = 0
f = 0
for i in xrange(len(x)):
s += x[i]
f += s**2
return f
def schwefelmult(self, x, pen_fac = 1e4):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if np.isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829*N - 1.27275661e-5*N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0]
def optprob(self, x):
n = np.arange(len(x)) + 1
f = n * x * (1-x)**(n-1)
return sum(1-f)
def lincon(self, x, theta=0.01):
"""ridge like linear function with one linear constraint"""
if x[0] < 0:
return np.NaN
return theta * x[1] + x[0]
def rosen_nesterov(self, x, rho=100):
"""needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
"""
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f
fcts = FitnessFunctions()
Fcts = fcts # for cross compatibility, as if the functions were static members of class Fcts
def felli(x): # unbound function, needed to test multiprocessor
return sum(1e6**(np.arange(len(x))/(len(x)-1))*(x)**2)
#____________________________________________
#____________________________________________________________
def _test(module=None): # None is fine when called from inside the module
import doctest
print(doctest.testmod(module)) # this is pretty coool!
def process_test(stream=None):
""" """
import fileinput
s1 = ""
s2 = ""
s3 = ""
state = 0
for line in fileinput.input(stream): # takes argv as file or stdin
if 1 < 3:
s3 += line
if state < -1 and line.startswith('***'):
print(s3)
if line.startswith('***'):
s3 = ""
if state == -1: # found a failed example line
s1 += '\n\n*** Failed Example:' + line
s2 += '\n\n\n' # line
# state = 0 # wait for 'Expected:' line
if line.startswith('Expected:'):
state = 1
continue
elif line.startswith('Got:'):
state = 2
continue
elif line.startswith('***'): # marks end of failed example
state = 0
elif line.startswith('Failed example:'):
state = -1
elif line.startswith('Exception raised'):
state = -2
# in effect more else:
if state == 1:
s1 += line + ''
if state == 2:
s2 += line + ''
#____________________________________________________________
#____________________________________________________________
#
def main(argv=None):
"""to install and/or test from the command line use::
python cma.py [options | func dim sig0 [optkey optval][optkey optval]...]
--test (or -t) to run the doctest, ``--test -v`` to get (much) verbosity
and ``--test -q`` to run it quietly with output only in case of errors.
install to install cma.py (uses setup from distutils.core).
--fcts and --doc for more infos or start ipython --pylab.
Examples
--------
First, testing with the local python distribution::
python cma.py --test
If succeeded install (uses setup from distutils.core)::
python cma.py install
A single run on the ellipsoid function::
python cma.py elli 10 1
"""
if argv is None:
argv = sys.argv # should have better been sys.argv[1:]
# uncomment for unit test
# _test()
# handle input arguments, getopt might be helpful ;-)
if len(argv) >= 1: # function and help
if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'):
print(main.__doc__)
fun = None
elif argv[1].startswith('-t') or argv[1].startswith('--test'):
import doctest
if len(argv) > 2 and (argv[2].startswith('--v') or argv[2].startswith('-v')): # verbose
print('doctest for cma.py: due to different platforms and python versions')
print('and in some cases due to a missing unique random seed')
print('many examples will "fail". This is OK, if they give a similar')
print('to the expected result and if no exception occurs. ')
# if argv[1][2] == 'v':
doctest.testmod(report=True) # this is quite cool!
else: # was: if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')):
print('doctest for cma.py: launching (it might be necessary to close a few pop up windows to finish)')
fn = '__cma_doctest__.txt'
stdout = sys.stdout
try:
with open(fn, 'w') as f:
sys.stdout = f
doctest.testmod(report=True) # this is quite cool!
finally:
sys.stdout = stdout
process_test(fn)
print('doctest for cma.py: finished (no other output should be seen after launching)')
return
elif argv[1] == '--doc':
print(__doc__)
print(CMAEvolutionStrategy.__doc__)
print(fmin.__doc__)
fun = None
elif argv[1] == '--fcts':
print('List of valid function names:')
print([d for d in dir(fcts) if not d.startswith('_')])
fun = None
elif argv[1] in ('install', '--install'):
from distutils.core import setup
setup(name = "cma",
version = __version__,
author = "Nikolaus Hansen",
# packages = ["cma"],
py_modules = ["cma"],
)
fun = None
elif argv[1] in ('plot',):
plot()
raw_input('press return')
fun = None
elif len(argv) > 3:
fun = eval('fcts.' + argv[1])
else:
print('try -h option')
fun = None
if fun is not None:
if len(argv) > 2: # dimension
x0 = np.ones(eval(argv[2]))
if len(argv) > 3: # sigma
sig0 = eval(argv[3])
opts = {}
for i in xrange(5, len(argv), 2):
opts[argv[i-1]] = eval(argv[i])
# run fmin
if fun is not None:
tic = time.time()
fmin(fun, x0, sig0, **opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10)
# plot()
# print ' best function value ', res[2]['es'].best[1]
print('elapsed time [s]: + %.2f', round(time.time() - tic, 2))
elif not len(argv):
fmin(fcts.elli, np.ones(6)*0.1, 0.1, ftarget=1e-9)
#____________________________________________________________
#____________________________________________________________
#
# mainly for testing purpose
# executed when called from an OS shell
if __name__ == "__main__":
# for i in range(1000): # how to find the memory leak
# main(["cma.py", "rastrigin", "10", "5", "popsize", "200", "maxfevals", "24999", "verb_log", "0"])
main()
| CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/cma/cma.py | Python | gpl-3.0 | 315,091 | [
"exciting"
] | 6d03ce069db3e5f888b2167c38869ca9bf0cdb0a9aec209cc27f6b233a16da86 |
import mdtraj as md
import numpy as np
from mdtraj.testing import get_fn, eq
from msmbuilder.featurizer import SASAFeaturizer
def _test_sasa_featurizer(t, value):
sasa = md.shrake_rupley(t)
rids = np.array([a.residue.index for a in t.top.atoms])
for i, rid in enumerate(np.unique(rids)):
mask = (rids == rid)
eq(value[:, i], np.sum(sasa[:, mask], axis=1))
def test_sasa_featurizer_1():
t = md.load(get_fn('frame0.h5'))
value = SASAFeaturizer(mode='residue').partial_transform(t)
assert value.shape == (t.n_frames, t.n_residues)
_test_sasa_featurizer(t, value)
def test_sasa_featurizer_2():
t = md.load(get_fn('frame0.h5'))
# scramle the order of the atoms, and which residue each is a
# member of
df, bonds = t.top.to_dataframe()
df['resSeq'] = np.random.randint(5, size=(t.n_atoms))
df['resName'] = df['resSeq']
t.top = md.Topology.from_dataframe(df, bonds)
value = SASAFeaturizer(mode='residue').partial_transform(t)
_test_sasa_featurizer(t, value)
| msultan/msmbuilder | msmbuilder/tests/test_sasa_featurizer.py | Python | lgpl-2.1 | 1,041 | [
"MDTraj"
] | 0b97f566ad394a3c89ea1e5a29f871ec7df574248ebed03e5a9b7e206590bfda |
import urllib.request as ul
import re
from bs4 import BeautifulSoup
import time
import random
class HotBlog(object):
url_pattern = "http://www.baidu.com/s?ie=utf-8&pn={pn}&wd={wd}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
def get_page(self, url):
contents = b""
try:
page = ul.urlopen(url, timeout=5)
contents = page.read()
except Exception:
print("Connection timeout!")
return contents.decode("utf-8")
def scan_page(self, n, wd, username):
html = self.get_page(self.url_pattern.format(pn=10 * n, wd=wd))
soup = BeautifulSoup(html, "lxml")
blog_title_pattern = re.compile(".*- {username} - 博客频道 - CSDN.NET$".format(username=username))
print("----- 开始访问第" + str(n + 1) + "页搜索结果-----")
for target in soup.find_all(id=re.compile("tools_[0-9]*_[1-9]")):
data_tools = target.attrs["data-tools"]
parts = data_tools.split('","url":"')
if len(parts) != 2:
continue
title = parts[0][10:]
url = parts[1][:-2]
if re.match(blog_title_pattern, title):
random.seed(time.time())
time.sleep(random.uniform(random.random() * 2, random.random() * 50))
request = ul.Request(url, headers=self.headers)
ul.urlopen(request)
print("visit:" + title)
print("----- 结束访问第" + str(n + 1) + "页搜索结果-----")
def scan_n_pages(self, n, wd, username):
for i in range(n):
self.scan_page(i, wd, username)
def interpret(self):
print("这个脚本可以通过百度搜索引擎来访问CSDN博客,提高博客检索排名。")
username = input("请输入你的CSDN用户名:\n")
key = input("请输入搜索关键词:\n")
page_count = int(input("请输入你想要搜索的页面数:\n"))
self.scan_n_pages(page_count, key, username)
if __name__ == "__main__":
HotBlog().interpret()
| gavinfish/Awesome-Python | HotBlog/HotBlog.py | Python | apache-2.0 | 2,166 | [
"VisIt"
] | 82d0a129ddb1a7980cc81b07772a4458aa1f084feaa9cee42662062f1c479fe5 |
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.sdist import sdist as _sdist
from distutils.errors import CompileError
from warnings import warn
import os.path
from glob import glob
try:
from Cython.Distutils import build_ext as _build_ext
except ImportError:
use_cython = False
else:
use_cython = True
class build_ext(_build_ext):
# see http://stackoverflow.com/q/19919905 for explanation
def finalize_options(self):
_build_ext.finalize_options(self)
__builtins__.__NUMPY_SETUP__ = False
import numpy as np
self.include_dirs.append(np.get_include())
# if extension modules fail to build, keep going anyway
def run(self):
try:
_build_ext.run(self)
except CompileError:
warn('Failed to build extension modules')
class sdist(_sdist):
def run(self):
try:
from Cython.Build import cythonize
cythonize(os.path.join('pylds','**','*.pyx'))
except:
warn('Failed to generate extension files from Cython sources')
finally:
_sdist.run(self)
extension_pathspec = os.path.join('pylds','**','*.pyx')
paths = [os.path.splitext(fp)[0] for fp in glob(extension_pathspec)]
names = ['.'.join(os.path.split(p)) for p in paths]
ext_modules = [
Extension(
name, sources=[path + '.cpp'], include_dirs=[os.path.join('deps')],
extra_compile_args=['-O3','-std=c++11','-w'])
for name, path in zip(names,paths)]
if use_cython:
from Cython.Build import cythonize
try:
ext_modules = cythonize(extension_pathspec)
except:
warn('Failed to generate extension module code from Cython files')
setup(
name='pylds',
version='0.0.2',
description="Learning and inference for Gaussian linear dynamical systems"
"with fast Cython and BLAS/LAPACK implementations",
author='Matthew James Johnson',
author_email='mattjj@csail.mit.edu',
license="MIT",
url='https://github.com/mattjj/pylds',
packages=['pylds'],
install_requires=[
'numpy>=1.9.3', 'scipy>=0.16', 'matplotlib', 'pybasicbayes', 'autoregressive'],
ext_modules=cythonize('pylds/**/*.pyx'),
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++',
],
keywords=[
'lds', 'linear dynamical system', 'kalman filter', 'kalman',
'kalman smoother', 'rts smoother'],
platforms="ALL",
cmdclass={'build_ext': build_ext, 'sdist': sdist})
| mackelab/pyRRHDLDS | setup.py | Python | bsd-3-clause | 2,633 | [
"Gaussian"
] | 0bd9c05a5f86499b718aca597139324687238f45ee4909673c1297ac8c538616 |
#! /usr/bin/env python
"""
##############################################################################
##
##
## @name : WeeklyGraphicsWebPageGenerator.py
##
##
## @author: Nicholas Lemay
##
## @license : MetPX Copyright (C) 2004-2006 Environment Canada
## MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
## named COPYING in the root of the source directory tree.
##
## @since : 2006-11-22, last updated on 2008-05-01
##
##
## @summary : Generates a web pages that gives access to user
## to the weekly graphics of the last 5 weeks for all rx sources
## and tx clients.
##
##
##############################################################################
"""
"""
Small function that adds pxlib to the environment path.
"""
import os, time, sys
sys.path.insert(1, os.path.dirname( os.path.abspath(__file__) ) + '/../../')
try:
pxlib = os.path.normpath( os.environ['PXROOT'] ) + '/lib/'
except KeyError:
pxlib = '/apps/px/lib/'
sys.path.append(pxlib)
"""
Imports
PXManager requires pxlib
"""
from PXManager import *
from pxStats.lib.StatsPaths import StatsPaths
from pxStats.lib.StatsDateLib import StatsDateLib
from pxStats.lib.GeneralStatsLibraryMethods import GeneralStatsLibraryMethods
from pxStats.lib.StatsConfigParameters import StatsConfigParameters
from pxStats.lib.WebPageGeneratorInterface import WebPageGeneratorInterface
LOCAL_MACHINE = os.uname()[1]
NB_WEEKS_DISPLAYED = 3
CURRENT_MODULE_ABS_PATH = os.path.abspath(__file__).replace( ".pyc", ".py" )
class WeeklyGraphicsWebPageGenerator( WebPageGeneratorInterface ):
def __init__( self, displayedLanguage = 'en', filesLanguage='en', weeks = None, \
pathsTowardsGraphics = None, pathsTowardsOutputFiles = None ):
"""
@summary : Constructor
@param displayedLanguage: Languages in which to display
the different captions found within
the generated web page.
@param fileLanguages: Language in which the files that
will be referenced within this page
have been generated.
@param weeks : List of weeks that the web page covers.
@note : Will set two global translators to be used throughout this module
_ which translates every caption that is to be printed.
_F which translates every filename that is to be linked.
"""
configParameters = StatsConfigParameters()
configParameters.getGeneralParametersFromStatsConfigurationFile()
global _
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, displayedLanguage )
if weeks == None:
self.setWeeks()
else:
self.weeks = weeks
self.displayedLanguage = displayedLanguage
self.filesLanguage = filesLanguage
self.pathsTowardsGraphics = StatsPaths()
self.pathsTowardsGraphics.setPaths( filesLanguage )
self.pathsTowardsOutputFiles = StatsPaths()
self.pathsTowardsOutputFiles.setPaths( self.displayedLanguage )
StatsDateLib.setLanguage(filesLanguage)
def setWeeks( self ):
"""
Returns the 3 week numbers including current week number.
"""
weeks = []
startTime = (time.time() - ( NB_WEEKS_DISPLAYED*7*24*60*60 ) )
for i in range( 1, ( NB_WEEKS_DISPLAYED + 1 ) ):
weeks.append( startTime + (i*7*24*60*60) )
self.weeks = weeks
def getStartEndOfWebPage():
"""
Returns the time of the first
graphics to be shown on the web
page and the time of the last
graphic to be displayed.
"""
currentTime = StatsDateLib.getIsoFromEpoch( time.time() )
start = StatsDateLib.rewindXDays( currentTime, ( NB_WEEKS_DISPLAYED - 1 ) * 7 )
start = StatsDateLib.getIsoTodaysMidnight( start )
end = StatsDateLib.getIsoTodaysMidnight( currentTime )
return start, end
getStartEndOfWebPage = staticmethod( getStartEndOfWebPage )
def printWebPage( self, rxNames, txNames ):
"""
@summary : Generates a web page based on all the
rxnames and tx names that have run during
the past x weeks.
@param rxNames: List of sources for which to write
links to their weekly graphics.
@param txNames: List of clients for which to write
links to their weekly graphics.
@precondition: global _ translator must be set prior to calling this function.
@notes : Only links to available graphics will be
displayed.
@return : None
"""
global _
rxNamesArray = rxNames.keys()
txNamesArray = txNames.keys()
rxNamesArray.sort()
txNamesArray.sort()
#Redirect output towards html page to generate.
if not os.path.isdir( self.pathsTowardsOutputFiles.STATSWEBPAGESHTML ):
os.makedirs( self.pathsTowardsOutputFiles.STATSWEBPAGESHTML )
fileHandle = open( "%sweeklyGraphs_%s.html" % (self.pathsTowardsOutputFiles.STATSWEBPAGESHTML, self.displayedLanguage) , 'w' )
print "%sweeklyGraphs_%s.html" % (self.pathsTowardsOutputFiles.STATSWEBPAGESHTML, self.displayedLanguage)
fileHandle.write( """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<title> PX Graphics </title>
<link rel="stylesheet" href="../scripts/js_%s/windowfiles/dhtmlwindow.css" type="text/css" />
<script type="text/javascript" src="../scripts/js_%s/windowfiles/dhtmlwindow.js">
This is left here to give credit to the original
creators of the dhtml script used for the group pop ups:
/***********************************************
* DHTML Window Widget- Dynamic Drive (www.dynamicdrive.com)
* This notice must stay intact for legal use.
* Visit http://www.dynamicdrive.com/ for full source code
***********************************************/
</script>
"""%( self.displayedLanguage, self.displayedLanguage ) + """
<script type="text/javascript">
var descriptionWindow=dhtmlwindow.open("description", "inline", "description", "Group description", "width=900px,height=120px,left=150px,top=10px,resize=1,scrolling=0", "recal")
descriptionWindow.hide()
</script>
<STYLE>
<!--
A{text-decoration:none}
-->
</STYLE>
<style type="text/css">
div.left { float: left; }
div.right {float: right; }
</style>
<style type="text/css">
a.blackLinks{
color: #000000;
}
div.tableContainer {
width: 95%%; /* table width will be 99%% of this*/
height: 275px; /* must be greater than tbody*/
overflow: auto;
margin: 0 auto;
}
table.cssTable {
width: 99%%; /*100%% of container produces horiz. scroll in Mozilla*/
border: none;
background-color: #f7f7f7;
table-layout: fixed;
}
table.cssTable>tbody { /* child selector syntax which IE6 and older do not support*/
overflow: auto;
height: 225px;
overflow-x: hidden;
}
thead tr {
position:relative;
}
thead td, thead th {
text-align: center;
font-size: 14px;
background-color:"#006699";
color: steelblue;
font-weight: bold;
border-top: solid 1px #d8d8d8;
}
td.cssTable {
color: #000;
padding-right: 2px;
font-size: 12px;
text-align: left;
border-bottom: solid 1px #d8d8d8;
border-left: solid 1px #d8d8d8;
}
tfoot td {
text-align: center;
font-size: 11px;
font-weight: bold;
background-color: papayawhip;
color: steelblue;
border-top: solid 2px slategray;
}
td:last-child {padding-right: 20px;} /*prevent Mozilla scrollbar from hiding cell content*/
</style>
<script>
counter =0;
function wopen(url, name, w, h){
// This function was taken on www.boutell.com
w += 32;
h += 96;
counter +=1;
var win = window.open(url,
counter,
'width=' + w + ', height=' + h + ', ' +
'location=no, menubar=no, ' +
'status=no, toolbar=no, scrollbars=no, resizable=no');
win.resizeTo(w, h);
win.focus();
}
</script>
<script>
function showSourceHelpPage(){
var sourceHelpPage = dhtmlwindow.open("sourceHelpPage", "iframe", "helpPages/source_%s.html" """ %self.displayedLanguage + """, " """ + _("Definition of 'source'") + """", "width=875px,height=100px,resize=1,scrolling=1,center=1", "recal")
sourceHelpPage.moveTo("middle", "middle");
}
function showBytecountHelpPage(){
var byteCountHelpPage = dhtmlwindow.open("byteCount", "iframe", "helpPages/byteCount_%s.html" """ %self.displayedLanguage + """, " """ +_( "Definition of 'byteCount'") + """", "width=875px,height=150px,resize=1,scrolling=1,center=1", "recal")
byteCountHelpPage.moveTo("middle", "middle");
}
function showClientHelpPage(){
var clientHelpPage = dhtmlwindow.open("client", "iframe", "helpPages/client_%s.html" """ %self.displayedLanguage + """, " """ +_( "Definition of 'client'") + """", "width=875px,height=150px,resize=1,scrolling=1,center=1", "recal")
.moveTo("middle", "middle");
}
function showErrorsHelpPage(){
var errorsHelpPage = dhtmlwindow.open("errors", "iframe", "helpPages/errors_%s.html" """ %self.displayedLanguage + """, " """ +_( "Definition of 'errors'") +"""", "width=875px,height=150px,resize=1,scrolling=1,center=1", "recal")
errorsHelpPage.moveTo("middle", "middle");
}
function showFilecountHelpPage(){
var fileCountHelpPage = dhtmlwindow.open("fileCount", "iframe", "helpPages/fileCount_%s.html" """ %self.displayedLanguage + """, " """ +_("Definition of 'filecount'") + """", "width=875px,height=150px,resize=1,scrolling=1,center=1", "recal")
fileCountHelpPage.moveTo("middle", "middle");
}
function showFilesOverMaxLatencyHelpPage(){
var filesOverMaxLatencyHelpPage = dhtmlwindow.open("filesOverMaxLatency", "iframe", "helpPages/filesOverMaxLatency_%s.html" """ %self.displayedLanguage + """, " """ +_("Definition of 'filesOverMaxLatency'") + """", "width=875px,height=150px,resize=1,scrolling=1,center=1", "recal")
filesOverMaxLatencyHelpPage.moveTo("middle", "middle");
}
function showLatencyHelpPage(){
var latencyHelpPage = dhtmlwindow.open("latency", "iframe", "helpPages/latency_%s.html" """ %self.displayedLanguage + """, " """ + _("Definition of 'latency'") + """" , "width=875px,height=150px,resize=1,scrolling=1,center=1", "recal")
latencyHelpPage.moveTo("middle", "middle");
}
</script>
</head>
<body text="#000000" link="#FFFFFF" vlink="000000" bgcolor="#FFF4E5" >
<br>
<table width = "100%">
<tr width = "100%">
<div class="left"><b><font size="5"> """ + _("Weekly graphics for RX sources from MetPx.") +""" </font><font size = "2">""" + _("*updated hourly") + """</font></b></div>
""")
oneFileFound = False
for weekNumber in self.weeks:
parameters = StatsConfigParameters( )
parameters.getAllParameters()
machinesStr = str( parameters.sourceMachinesTags ).replace( '[','' ).replace( ']','' ).replace(',','').replace("'","").replace('"','').replace(" ","")
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( weekNumber )
currentWeek = time.strftime("%W", time.gmtime(weekNumber))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSCSVFILES + _( "weekly/rx/%s/%s/%s.csv" ) %( machinesStr, currentYear, currentWeek )
webLink = _("csvFiles/weekly/rx/%s/%s/%s.csv") %( machinesStr, currentYear, currentWeek )
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
if oneFileFound == False:
fileHandle.write( "<div class='right'><font size='2' color='black'>" +_("CSV files") + " : " )
oneFileFound = True
fileHandle.write( """<a href="%s" class="blackLinks">%.3s.csv </a>"""%( webLink,currentWeek ) )
if oneFileFound == True :
fileHandle.write( """
</font>
</div>
""" )
fileHandle.write("""
</tr>
</table>
<br>
<br>
<div class="tableContainer">
<table class="cssTable">
<thead>
<tr>
<td bgcolor="#006699" class="cssTable">
<font color = "white">
<center>
""" + _('Sources') + """
<br>
<a target ="popup" href="#" onClick="showSourceHelpPage(); return false;">
?
</a>
<center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title =" """ + _("Display the total of bytes received every day of the week for each sources.") + """">
<font color = "white">
<center>
""" + _('Bytecount') + """
<br>
<a target ="popup" href="#" onClick="showBytecountHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title =" """ + _("Display the total of files received every day of the week for each sources.") + """">
<font color = "white">
<center>
""" + _('Filecount') + """
<br>
<a target ="popup" href="#" onClick="showFilecountHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title =" """+ _("Display the total of errors that occured during the receptions for every day of the week for each sources.") + """">
<font color = "white">
<center>
""" + _('Errors') + """
<br>
<a target ="popup" href="#" onClick="showErrorsHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
</tr>
</thead>
<tbody>
""" )
for rxName in rxNamesArray :
if rxNames[rxName] == "" :
fileHandle.write( """ <tr> <td bgcolor="#99FF99" class="cssTable"> %s </td> """ %(rxName))
fileHandle.write( """<td bgcolor="#66CCFF"> """+ _("Weeks") + """ : """ )
else:
machineName = self.getMachineNameFromDescription( rxNames[rxName] )
fileHandle.write( """ <tr> <td bgcolor="#99FF99" class="cssTable"><div class="left"> %s </div> <div class="right"><a href="#" onClick="descriptionWindow.load('inline', '%s', 'Description');descriptionWindow.show(); return false"><font color="black">?</font></a></div><br>(%s)</td> """ %(rxName, rxNames[rxName].replace("'","").replace('"',''), machineName ) )
fileHandle.write( """<td bgcolor="#66CCFF"> """+ _("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/rx/%s/") %( rxName ) + str(currentYear) + _("/bytecount/%s.png") %str(currentWeek)
webLink = _("archives/weekly/rx/%s/")%( rxName ) + str(currentYear) + _("/bytecount/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;">%s </a>"""%( rxName, webLink , currentWeek ) )
fileHandle.write( "</td>" )
fileHandle.write( """ <td bgcolor="#66CCFF" class="cssTable" >"""+ _("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/rx/%s/")%( rxName ) + str(currentYear) + _("/filecount/%s.png") %str(currentWeek)
webLink = _("archives/weekly/rx/%s/")%( rxName ) + str(currentYear) + _("/filecount/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;">%s </a>"""%( rxName, webLink , currentWeek ) )
fileHandle.write( "</td>" )
fileHandle.write( """ <td bgcolor="#66CCFF" class="cssTable">"""+ _("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/rx/%s/")%( rxName ) + str(currentYear) + _("/errors/%s.png") %str(currentWeek)
webLink = _("archives/weekly/rx/%s/")%( rxName ) + str(currentYear) + _("/errors/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;">%s </a>"""%( rxName, webLink , currentWeek ) )
fileHandle.write( "</td></tr>" )
fileHandle.write( """
</tbody>
</table>
</div>
<br>
<table width = "100%">
<tr width = "100%">
<div class="left"><b><font size="5"> """ + _("Weekly graphics for TX clients from MetPx.") + """ </font><font size = "2">""" +_("*updated hourly") +"""</font></b></div>
""")
oneFileFound = False
for weekNumber in self.weeks:
parameters = StatsConfigParameters( )
parameters.getAllParameters()
machinesStr = str( parameters.sourceMachinesTags ).replace( '[','' ).replace( ']','' ).replace(',','').replace("'","").replace('"','').replace(" ","")
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( weekNumber )
currentWeek = time.strftime("%W", time.gmtime(weekNumber))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSCSVFILES + _("/weekly/tx/%s/%s/%s.csv") %( machinesStr, currentYear, currentWeek )
webLink = _("csvFiles/weekly/tx/%s/%s/%s.csv") %( machinesStr, currentYear, currentWeek )
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
if oneFileFound == False:
fileHandle.write( "<div class='right'><font size='2' color='black'>" + _("CSV files") + " : " )
oneFileFound = True
fileHandle.write( """<a href="%s" class="blackLinks">%.3s.csv </a>"""%( webLink,currentWeek ) )
if oneFileFound == True :
fileHandle.write( """
</font>
</div>
""" )
fileHandle.write("""
</tr>
</table>
<br>
<br>
<div class="tableContainer">
<table class="cssTable" >
<thead>
<tr>
<td bgcolor="#006699" class="cssTable">
<font color = "white">
<center>
""" + _('Clients') + """
<br>
<a target ="popup" href="#" onClick="showClientHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title = " """ + _("Display the average latency of file transfers for every day of the week for each clients.") + """">
<font color = "white">
<center>
""" + _('Latency') + """
<br>
<a target ="popup" href="#" onClick="showLatencyHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title = " """ + _("Display the total number of files for wich the latency was over 15 seconds for every day of the week for each clients.") +"""">
<font color = "white">
<center>
""" + _('Files Over Max. Lat.') + """
<br>
<a target ="popup" href="#" onClick="showFilesOverMaxLatencyHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title = " """ +_("Display the total of bytes transfered every day of the week for each clients.") + """">
<font color = "white">
<center>
""" + _('Bytecount') + """
<br>
<a target ="popup" href="#" onClick="showBytecountHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title = " """ +_("Display the total of files transferred every day of the week for each clients.") + """">
<font color = "white">
<center>
""" + _('Filecount') + """
<br>
<a target ="popup" href="#" onClick="showFilecountHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
<td bgcolor="#006699" class="cssTable" title = " """ + _("Display the total of errors that occured during file transfers every day of the week for each clients.") +"""">
<font color = "white">
<center>
""" + _('Errors') + """
<br>
<a target ="popup" href="#" onClick="showErrorsHelpPage(); return false;">
?
</a>
</center>
</font>
</td>
</tr>
</thead>
<tbody>
""" )
for txName in txNamesArray :
if txNames[txName] == "" :
fileHandle.write( """<tr> <td bgcolor="#99FF99" class="cssTable"> %s </td> """ %(txName))
fileHandle.write( """<td bgcolor="#66CCFF" class="cssTable">""" +_("Weeks") + """ : """ )
else:
machineName = self.getMachineNameFromDescription( txNames[txName] )
fileHandle.write( """<tr> <td bgcolor="#99FF99" class="cssTable"><div class="left"> %s </div><div class="right"><a href="#" onClick="descriptionWindow.load('inline', '%s', 'Description');descriptionWindow.show(); return false"><font color="black">?</font></a></div><br>(%s)</td> """ %(txName, txNames[txName].replace("'","").replace('"',''), machineName ))
fileHandle.write( """<td bgcolor="#66CCFF" class="cssTable">""" +_("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/tx/%s/")%( txName ) + str(currentYear) + _("/latency/%s.png") %str(currentWeek)
webLink = _("archives/weekly/tx/%s/")%( txName ) + str(currentYear) + _("/latency/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;"> %s</a>"""%( txName, webLink , currentWeek ) )
fileHandle.write( "</td>" )
fileHandle.write( """ <td bgcolor="#66CCFF" class="cssTable">""" +_("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/tx/%s/")%( txName ) + str(currentYear) + _("/filesOverMaxLatency/%s.png") %str(currentWeek)
webLink = _("archives/weekly/tx/%s/")%( txName ) + str(currentYear) + _("/filesOverMaxLatency/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;"> %s</a>"""%( txName, webLink, currentWeek ) )
fileHandle.write( "</td>" )
fileHandle.write( """ <td bgcolor="#66CCFF" class="cssTable">""" +_("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/tx/%s/")%( txName ) + str(currentYear) + _("/bytecount/%s.png") %str(currentWeek)
webLink = _("archives/weekly/tx/%s/")%( txName ) + str(currentYear) + _("/bytecount/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;"> %s</a>"""%( txName, webLink, currentWeek ) )
fileHandle.write( "</td>" )
fileHandle.write( """<td bgcolor="#66CCFF" class="cssTable">""" +_("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/tx/%s/")%( txName ) + str(currentYear) + _("/filecount/%s.png") %str(currentWeek)
webLink = _("archives/weekly/tx/%s/")%( txName ) + str(currentYear) + _("/filecount/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;"> %s</a>"""%( txName, webLink, currentWeek ) )
fileHandle.write( "</td>" )
fileHandle.write( """ <td bgcolor="#66CCFF" class="cssTable">""" +_("Weeks") + """ : """ )
for week in self.weeks:
currentYear, currentMonth, currentDay = StatsDateLib.getYearMonthDayInStrfTime( week )
currentWeek = time.strftime("%W", time.gmtime(week))
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.filesLanguage )
file = self.pathsTowardsGraphics.STATSGRAPHSARCHIVES + _("weekly/tx/%s/")%( txName ) + str(currentYear) + _("/errors/%s.png") %str(currentWeek)
webLink = _("archives/weekly/tx/%s/")%( txName ) + str(currentYear) + _("/errors/%s.png") %str(currentWeek)
_ = self.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, self.displayedLanguage )
if os.path.isfile( file ):
fileHandle.write( """<a target ="popup" href="%s" onClick="wopen('%s', 'popup', 875, 240); return false;"> %s</a>"""%( txName, webLink, currentWeek ) )
fileHandle.write( "</td></tr>" )
fileHandle.write( """
</tbody>
</table>
</div>
</body>
</html>
""" )
fileHandle.close()
def generateWebPage( self ):
"""
@summary : Call to generate the web page.
"""
self.setWeeks()
start, end = self.getStartEndOfWebPage()
rxNames, txNames = GeneralStatsLibraryMethods.getRxTxNamesForWebPages( start, end )
self.printWebPage( rxNames, txNames )
| khosrow/metpx | pxStats/lib/WeeklyGraphicsWebPageGenerator.py | Python | gpl-2.0 | 39,530 | [
"VisIt"
] | 897ff2c36d3a18bf582652afbf623cd1d1edf7333b61e84a04ffa40ea63a36a3 |
# -*- coding: utf-8 -*-
"""Collection of physical constants and conversion factors.
The magnitudes of the defined constants are taken from
:mod:`typhon.constants`.
This module adds units defined with pint's UnitRegistry..
Physical constants
==================
============================ ============================
``g`` Earth standard gravity
``h`` Planck constant
``k`` Boltzmann constant
``c`` Speed of light
``N_A`` Avogadro constant
``R`` Universal gas constant
``molar_mass_dry_air`` Molar mass for dry air
``molar_mass_water`` Molar mass for water vapor
``gas_constant_dry_air`` Gas constant for dry air
``gas_constant_water_vapor`` Gas constant for water vapor
============================ ============================
Mathematical constants
======================
========== ============
``golden`` Golden ratio
========== ============
SI prefixes
===========
========= ================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
========= ================
Non-SI ratios
=============
======= =====================================
``ppm`` :math:`10^{-6}` `parts per million`
``ppb`` :math:`10^{-9}` `parts per billion`
``ppt`` :math:`10^{-12}` `parts per trillion`
======= =====================================
Binary prefixes
===============
================= ==============
``kibi``, ``KiB`` :math:`2^{10}`
``mebi``, ``MiB`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
================= ==============
================= ==============
``KB`` :math:`10^3`
``MB`` :math:`10^6`
================= ==============
Earth characteristics
=====================
================ ===================
``earth_mass`` Earth mass
``earth_radius`` Earth radius
``atm`` Standard atmosphere
================ ===================
"""
import numpy as np
from typhon import constants
from typhon.physics.units.common import ureg
# Physcial constants
g = earth_standard_gravity = constants.g * ureg('m / s**2')
h = planck = constants.planck * ureg.joule
k = boltzmann = constants.boltzmann * ureg('J / K')
c = speed_of_light = constants.speed_of_light * ureg('m / s')
N_A = avogadro = N = constants.avogadro * ureg('1 / mol')
R = gas_constant = constants.gas_constant * ureg('J * mol**-1 * K**-1')
molar_mass_dry_air = 28.9645e-3 * ureg('kg / mol')
molar_mass_water = 18.01528e-3 * ureg('kg / mol')
gas_constant_dry_air = R / molar_mass_dry_air # J K^-1 kg^-1
gas_constant_water_vapor = R / molar_mass_water # J K^-1 kg^-1
# Mathematical constants
golden = golden_ratio = (1 + np.sqrt(5)) / 2
# SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
# Non-SI ratios
ppm = 1e-6 # parts per million
ppb = 1e-9 # parts per billion
ppt = 1e-12 # parts per trillion
# Binary prefixes
kibi = KiB = 2**10
mebi = MiB = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
KB = 10**3
MB = 10**6
# Earth characteristics
earth_mass = constants.earth_mass * ureg.kg
earth_radius = constants.earth_radius * ureg.m
atm = atmosphere = constants.atm * ureg.pascal
| atmtools/typhon | typhon/physics/units/constants.py | Python | mit | 4,058 | [
"Avogadro"
] | 3842ebff2759fe43d6785b26f14b02dce42dea21abbcecd10b6f75686329eed0 |
"""Store configuration options as a singleton."""
import os
import re
import subprocess
import sys
from argparse import Namespace
from functools import lru_cache
from typing import Any, Dict, List, Optional, Tuple
from packaging.version import Version
from ansiblelint.constants import ANSIBLE_MISSING_RC
DEFAULT_KINDS = [
# Do not sort this list, order matters.
{"jinja2": "**/*.j2"}, # jinja2 templates are not always parsable as something else
{"jinja2": "**/*.j2.*"},
{"inventory": "**/inventory/**.yml"},
{"requirements": "**/meta/requirements.yml"}, # v1 only
# https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html
{"galaxy": "**/galaxy.yml"}, # Galaxy collection meta
{"reno": "**/releasenotes/*/*.{yaml,yml}"}, # reno release notes
{"playbook": "**/playbooks/*.{yml,yaml}"},
{"playbook": "**/*playbook*.{yml,yaml}"},
{"role": "**/roles/*/"},
{"tasks": "**/tasks/**/*.{yaml,yml}"},
{"handlers": "**/handlers/*.{yaml,yml}"},
{"vars": "**/{host_vars,group_vars,vars,defaults}/**/*.{yaml,yml}"},
{"meta": "**/meta/main.{yaml,yml}"},
{"yaml": ".config/molecule/config.{yaml,yml}"}, # molecule global config
{
"requirements": "**/molecule/*/{collections,requirements}.{yaml,yml}"
}, # molecule old collection requirements (v1), ansible 2.8 only
{"yaml": "**/molecule/*/{base,molecule}.{yaml,yml}"}, # molecule config
{"requirements": "**/requirements.yml"}, # v2 and v1
{"playbook": "**/molecule/*/*.{yaml,yml}"}, # molecule playbooks
{"yaml": "**/{.ansible-lint,.yamllint}"},
{"yaml": "**/*.{yaml,yml}"},
{"yaml": "**/.*.{yaml,yml}"},
]
BASE_KINDS = [
# These assignations are only for internal use and are only inspired by
# MIME/IANA model. Their purpose is to be able to process a file based on
# it type, including generic processing of text files using the prefix.
{
"text/jinja2": "**/*.j2"
}, # jinja2 templates are not always parsable as something else
{"text/jinja2": "**/*.j2.*"},
{"text": "**/templates/**/*.*"}, # templates are likely not validable
{"text/json": "**/*.json"}, # standardized
{"text/markdown": "**/*.md"}, # https://tools.ietf.org/html/rfc7763
{"text/rst": "**/*.rst"}, # https://en.wikipedia.org/wiki/ReStructuredText
{"text/ini": "**/*.ini"},
# YAML has no official IANA assignation
{"text/yaml": "**/{.ansible-lint,.yamllint}"},
{"text/yaml": "**/*.{yaml,yml}"},
{"text/yaml": "**/.*.{yaml,yml}"},
]
options = Namespace(
cache_dir=None,
colored=True,
configured=False,
cwd=".",
display_relative_path=True,
exclude_paths=[],
lintables=[],
listrules=False,
listtags=False,
parseable=False,
parseable_severity=False,
quiet=False,
rulesdirs=[],
skip_list=[],
tags=[],
verbosity=False,
warn_list=[],
kinds=DEFAULT_KINDS,
mock_modules=[],
mock_roles=[],
loop_var_prefix=None,
var_naming_pattern=None,
offline=False,
project_dir=".", # default should be valid folder (do not use None here)
extra_vars=None,
enable_list=[],
skip_action_validation=True,
rules=dict(), # Placeholder to set and keep configurations for each rule.
)
# Used to store detected tag deprecations
used_old_tags: Dict[str, str] = {}
# Used to store collection list paths (with mock paths if needed)
collection_list: List[str] = []
def get_rule_config(rule_id: str) -> Dict[str, Any]:
"""Get configurations for the rule ``rule_id``."""
rule_config = options.rules.get(rule_id, dict())
if not isinstance(rule_config, dict):
raise RuntimeError("Invalid rule config for %s: %s" % (rule_id, rule_config))
return rule_config
@lru_cache()
def ansible_collections_path() -> str:
"""Return collection path variable for current version of Ansible."""
# respect Ansible behavior, which is to load old name if present
for env_var in ["ANSIBLE_COLLECTIONS_PATHS", "ANSIBLE_COLLECTIONS_PATH"]:
if env_var in os.environ:
return env_var
# https://github.com/ansible/ansible/pull/70007
if ansible_version() >= ansible_version("2.10.0.dev0"):
return "ANSIBLE_COLLECTIONS_PATH"
return "ANSIBLE_COLLECTIONS_PATHS"
def parse_ansible_version(stdout: str) -> Tuple[str, Optional[str]]:
"""Parse output of 'ansible --version'."""
# Ansible can produce extra output before displaying version in debug mode.
# ansible-core 2.11+: 'ansible [core 2.11.3]'
match = re.search(r"^ansible \[(?:core|base) ([^\]]+)\]", stdout, re.MULTILINE)
if match:
return match.group(1), None
# ansible-base 2.10 and Ansible 2.9: 'ansible 2.x.y'
match = re.search(r"^ansible ([^\s]+)", stdout, re.MULTILINE)
if match:
return match.group(1), None
return "", "FATAL: Unable parse ansible cli version: %s" % stdout
@lru_cache()
def ansible_version(version: str = "") -> Version:
"""Return current Version object for Ansible.
If version is not mentioned, it returns current version as detected.
When version argument is mentioned, it return converts the version string
to Version object in order to make it usable in comparisons.
"""
if not version:
proc = subprocess.run(
["ansible", "--version"],
universal_newlines=True,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if proc.returncode == 0:
version, error = parse_ansible_version(proc.stdout)
if error is not None:
print(error)
sys.exit(ANSIBLE_MISSING_RC)
else:
print(
"Unable to find a working copy of ansible executable.",
proc,
)
sys.exit(ANSIBLE_MISSING_RC)
return Version(version)
if ansible_collections_path() in os.environ:
collection_list = os.environ[ansible_collections_path()].split(':')
| ansible/ansible-lint | src/ansiblelint/config.py | Python | mit | 6,042 | [
"Galaxy"
] | 92dbab92fe56e5b4e0969cff0af7a116a6c97e1214cc6d9da1c43fb7b3ff7cb1 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from ...pages.common.logout import LogoutPage
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Kingdom')
profile_page.value_for_textarea_field('bio', 'Nothing Special')
def visit_profile_page(self, username, privacy=None):
"""
Visits a user's profile page.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
profile_page.wait_for_page()
profile_page.privacy = privacy
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.reset_event_tracking()
# Load the page
profile_page.visit()
profile_page.wait_for_page()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
self.verify_events_of_type(
requesting_username,
u"edx.user.settings.viewed",
[{
u"user_id": int(profile_user_id),
u"page": u"profile",
u"visibility": unicode(visibility),
}]
)
def assert_event_emitted_num_times(self, profile_user_id, setting, num_times):
"""
Verify a particular user settings change event was emitted a certain
number of times.
"""
# pylint disable=no-member
super(LearnerProfileTestMixin, self).assert_event_emitted_num_times(
self.USER_SETTINGS_CHANGED_EVENT_NAME, self.start_time, profile_user_id, num_times, setting=setting
)
def verify_user_preference_changed_event(self, username, user_id, setting, old_value=None, new_value=None):
"""
Verifies that the correct user preference changed event was recorded.
"""
self.verify_events_of_type(
username,
self.USER_SETTINGS_CHANGED_EVENT_NAME,
[{
u"user_id": long(user_id),
u"table": u"user_api_userpreference",
u"setting": unicode(setting),
u"old": old_value,
u"new": new_value,
u"truncated": [],
}],
expected_referers=["/u/{username}".format(username=username)],
)
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
self.set_birth_year(birth_year=birth_year if birth_year is not None else "")
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.age_limit_message_present, message is not None)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
profile_page.privacy = self.PRIVACY_PUBLIC
self.verify_user_preference_changed_event(
username, user_id, "account_privacy",
old_value=self.PRIVACY_PRIVATE, # Note: default value was public, so we first change to private
new_value=self.PRIVACY_PUBLIC,
)
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
profile_page.privacy = self.PRIVACY_PRIVATE
self.verify_user_preference_changed_event(
username, user_id, "account_privacy",
old_value=None, # Note: no old value as the default preference is public
new_value=self.PRIVACY_PRIVATE,
)
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see My Profile link in the dropdown menu.
When I click on My Profile link.
Then I will be navigated to My Profile page.
"""
username, user_id = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertTrue('My Profile' in dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertTrue(profile_page.mode_for_field('country'), 'edit')
self.assertTrue(profile_page.field_icon_present('country'))
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
self.assertTrue(profile_page.field_icon_present('language_proficiencies'))
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `Eat Sleep Code`.
Then displayed about me should be `Eat Sleep Code` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `Eat Sleep Code` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = (
"Tell other learners a little about yourself: where you live, what your interests are, "
"why you're taking courses, or what you hope to learn."
)
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'Eat Sleep Code', 'Eat Sleep Code', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 1)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 0)
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 0)
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='cohort_users_only_username.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 0)
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 2)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 2)
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self._initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self._initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self._initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
def _initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
| DefyVentures/edx-platform | common/test/acceptance/tests/lms/test_learner_profile.py | Python | agpl-3.0 | 30,799 | [
"VisIt"
] | 844b1425866bc0365917af1ff362cca1dfa53ba1e2acf0e5730bb261768837c8 |
# -*- coding: utf-8 -*-
__author__ = "Andres Giordano"
__version__ = "1.0"
__maintainer__ = "Andres Giordano"
__email__ = "andresgiordano.unlu@gmail.com"
__status__ = "Produccion"
import matplotlib as mpl
import matplotlib.pyplot as plt
import setuptools
import os.path
from pyart.graph import RadarDisplay, common, GridMapDisplay, RadarMapDisplay
import pyart.map
import pyart.config
import pyart.io
from .RainbowRadar import RainbowRadar
from .Utils import fig2img,PNG,JPEG,gen_ticks
class RainbowRadarProcessor(object):
def __init__(self, rainbowRadar):
"""
Constructor
:param RainbowRadar rainbowRadar: radar a procesar.
"""
self.__rainbowRadar = None
if isinstance(rainbowRadar, RainbowRadar):
self.__rainbowRadar = rainbowRadar
else:
raise Exception("El parametro rainbowRadar no es una instancia de RainbowRadar")
self.__volPath = self.__rainbowRadar.getFilePath()
self.__volFileName = self.__rainbowRadar.getFileName()
self.__RADAR_FILE_OUT = self.__volPath + self.__volFileName + "_ppi.grib"
def getPPIImage(self, elevation, mask=None, figsize=(25, 25), paddingImg=1, basemapFlag=True, basemapShapeFile=None,
dpi=200, font=None):
"""
Este metodo retorna la imagen bidimensional del retorno radarico segun
el numero de elevacion que se pase por parametro.
:param elevation: indica el numero de elevacion a obtener.
:type elevation: int
:param mask: mascara a aplicar a los datos de radar.
:param figsize: es una tupla que indica el tamaño de la imagen a generar
:type figsize: tuple of int
:param paddingImg: se usa para agregarle padding a la imagen del radar.
:type paddingImg: int
:param basemapFlag: se usa para indicar si se debe tomar :param basemapShapeFile: para indicar el archivo de capa para basemap.
:type basemapFlag: bool
:param basemapShapeFile: path al directorio que contiene el archivo de capa (.shp) para basemap.
:type basemapShapeFile: str
:param dpi: se usa para indicar calidad del grafico a generar.
:type dpi: int
:param font: configuracion de las fuentes del grafico --> ``Matplotlib.rc('font', **font)``. Por defecto: ``{'family': 'sans-serif', 'size': 35}``.
:return:
"""
plt.clf()
if font is None:
font = {'family': 'sans-serif', 'size': 35}
mpl.rc('font', **font)
# Aplico mascara a los datos del radar antes de obtener la elevacion
if mask is not None:
self.__rainbowRadar.setMask(maskString=mask, dst='raw')
eleN = self.__rainbowRadar.getSweep(elevation)
display_variable = RadarDisplay(eleN)
fig = plt.figure(figsize=figsize,dpi=dpi)
fig.add_subplot(1, 1, 1, aspect=1.0)
rango_anillos = self.__rainbowRadar.getStopRange() / 4
anillos = [rango_anillos, rango_anillos * 2, rango_anillos * 3, self.__rainbowRadar.getStopRange()]
# Obtengo longitud, latitud minima y maxima a partir de la grilla, y le agrego el padding
min_lat = self.__rainbowRadar.getMinLat() - paddingImg
max_lat = self.__rainbowRadar.getMaxLat() + paddingImg
min_lon = self.__rainbowRadar.getMinLon() - paddingImg
max_lon = self.__rainbowRadar.getMaxLon() + paddingImg
titulo = common.generate_title(self.__rainbowRadar.getRadar(), self.__rainbowRadar.getRadarVariable()[1],
elevation, datetime_format='%d-%m-%Y %M:%S')
if basemapFlag:
display_variable = RadarMapDisplay(eleN)
# Si hay un shapefile elegido por el usuario se toma ese, en otro caso se toma el shapefile por defecto
if basemapShapeFile is not None:
display_variable.plot_ppi_map(self.__rainbowRadar.getRadarVariable()[1],
colorbar_label=self.__rainbowRadar.getRadarVariable()[5],
title=titulo,
vmin=self.__rainbowRadar.getRadarVariable()[3],
vmax=self.__rainbowRadar.getRadarVariable()[4],
cmap=self.__rainbowRadar.getRadarVariable()[2],
shapefile=basemapShapeFile,
min_lat=min_lat,
max_lat=max_lat,
min_lon=min_lon,
max_lon=max_lon
)
else:
display_variable.plot_ppi_map(self.__rainbowRadar.getRadarVariable()[1],
colorbar_label=self.__rainbowRadar.getRadarVariable()[5],
title=titulo,
vmin=self.__rainbowRadar.getRadarVariable()[3],
vmax=self.__rainbowRadar.getRadarVariable()[4],
cmap=self.__rainbowRadar.getRadarVariable()[2],
shapefile=os.path.dirname(
__file__) + '/departamento/departamento',
min_lat=min_lat,
max_lat=max_lat,
min_lon=min_lon,
max_lon=max_lon
)
display_variable.basemap.fillcontinents(lake_color='aqua',
alpha=0.2)
# Se agregan las latitudes y longitudes
orig_lat = self.__rainbowRadar.getLatitude()
orig_lon = self.__rainbowRadar.getLongitude()
lat_ticks = gen_ticks(orig_lat, min_lat, max_lat)
lon_ticks = gen_ticks(orig_lon, min_lon, max_lon)
display_variable.basemap.drawparallels(lat_ticks, labels=[1, 0, 0, 0], labelstyle='+/-',
fmt='%.2f', linewidth=0, rotation=45)
display_variable.basemap.drawmeridians(lon_ticks, labels=[0, 0, 0, 1], labelstyle='+/-',
fmt='%.2f', linewidth=0, rotation=45)
else:
radar_range = [-self.__rainbowRadar.getStopRange() - paddingImg, self.__rainbowRadar.getStopRange() + paddingImg]
display_variable.plot_ppi(self.__rainbowRadar.getRadarVariable()[1],
colorbar_label=self.__rainbowRadar.getRadarVariable()[5],
title=titulo,
axislabels=('Distancia en X (km)', 'Distancia en Y (km)'),
vmin=self.__rainbowRadar.getRadarVariable()[3],
vmax=self.__rainbowRadar.getRadarVariable()[4],
cmap=self.__rainbowRadar.getRadarVariable()[2])
display_variable.set_limits(radar_range, radar_range)
display_variable.plot_range_rings(anillos, lw=0.5)
r_max = self.__rainbowRadar.getStopRange()
display_variable.plot_cross_hair(r_max)
res = fig2img(plt.gcf())
plt.close(fig)
return res
def getCAPPIImage(self, level, mask=None, figsize=(25, 25), paddingImg=1, basemapFlag=True,
basemapShapeFile=None, dpi=200, font=None):
"""
Genera la imagen del radar desde la grilla.
:param level: nivel de la grilla a graficar. Ej. : 3 --> 2km < altitud <= 3 km
:param mask: mascara a aplicar a al nivel de la grilla una vez generada.
:param figsize:
:param paddingImg:
:param basemapFlag:
:param basemapShapeFile:
:param dpi:
:param font: configuracion de las fuentes del grafico --> ``Matplotlib.rc('font', **font)``. Por defecto: ``{'family': 'sans-serif', 'size': 35}``.
:return:
"""
plt.clf()
if font is None:
font = {'family': 'sans-serif', 'size': 35}
mpl.rc('font', **font)
grilla = self.__rainbowRadar.getCartesianGrid()
# Aplico mascara
if mask is not None:
self.__rainbowRadar.setMask(mask, level=level)
# create the plot
fig = plt.figure(figsize=figsize,dpi=dpi)
ax = fig.add_subplot(111)
############################################################################
# TITULO
time_str = common.generate_grid_time_begin(grilla).strftime('%d-%m-%Y %M:%S')
height = grilla.z['data'][level] / 1000.
l1 = "%s %.1f km %s " % (common.generate_grid_name(grilla), height,
time_str)
field_name = common.generate_field_name(grilla, self.__rainbowRadar.getRadarVariable()[1])
titulo = l1 + '\n' + field_name
############################################################################
if basemapFlag:
# Se genera el grafico con el mapa debajo
grid_plot = GridMapDisplay(grilla)
min_lat = self.__rainbowRadar.getMinLat() - paddingImg
max_lat = self.__rainbowRadar.getMaxLat() + paddingImg
min_lon = self.__rainbowRadar.getMinLon() - paddingImg
max_lon = self.__rainbowRadar.getMaxLon() + paddingImg
grid_plot.plot_basemap(min_lon=min_lon,max_lon=max_lon,min_lat=min_lat,max_lat=max_lat,auto_range=False,resolution='h', projection='lcc')
# Si hay un shapefile elegido por el usuario se toma ese, en otro caso se toma el shapefile por defecto
if basemapShapeFile is not None:
grid_plot.basemap.readshapefile(basemapShapeFile,
os.path.basename(basemapShapeFile))
else:
grid_plot.basemap.readshapefile(os.path.dirname(__file__) + '/departamento/departamento', 'departamento',default_encoding='LATIN1')
grid_plot.basemap.fillcontinents(lake_color='aqua',
alpha=0.2)
grid_plot.plot_grid(self.__rainbowRadar.getRadarVariable()[1],
colorbar_label=self.__rainbowRadar.getRadarVariable()[5],
title=titulo,
title_flag=True,
colorbar_flag=True,
vmin=self.__rainbowRadar.getRadarVariable()[3],
vmax=self.__rainbowRadar.getRadarVariable()[4],
cmap=self.__rainbowRadar.getRadarVariable()[2],
level=level,
fig=fig,ax=ax)
# Se agregan las latitudes y longitudes
orig_lat = self.__rainbowRadar.getLatitude()
orig_lon = self.__rainbowRadar.getLongitude()
lat_ticks = gen_ticks(orig_lat, min_lat, max_lat)
lon_ticks = gen_ticks(orig_lon, min_lon, max_lon)
grid_plot.basemap.drawparallels(lat_ticks, labels=[1, 0, 0, 0], labelstyle='+/-',
fmt='%.2f', linewidth=0, rotation=45)
grid_plot.basemap.drawmeridians(lon_ticks, labels=[0, 0, 0, 1], labelstyle='+/-',
fmt='%.2f', linewidth=0, rotation=45)
else:
# Se genera el grafico comun con los anillos
# Limites de la grilla
radar_range = [-self.__rainbowRadar.getStopRange() - paddingImg, self.__rainbowRadar.getStopRange() + paddingImg]
# Shift para que el centro de la grafica sea (0,0)
shift = (-self.__rainbowRadar.getStopRange(), self.__rainbowRadar.getStopRange(), -self.__rainbowRadar.getStopRange(), self.__rainbowRadar.getStopRange())
# Se genera el grafico
im = ax.imshow(grilla.fields[self.__rainbowRadar.getRadarVariable()[1]]['data'][level],
origin='origin',
vmin=self.__rainbowRadar.getRadarVariable()[3],
vmax=self.__rainbowRadar.getRadarVariable()[4],
cmap=self.__rainbowRadar.getRadarVariable()[2],
extent=shift)
plt.xlabel('Distancia en X (km)')
plt.ylabel('Distancia en Y (km)')
plt.title(titulo)
# Distancia entre los anillos
rangoAnillos = self.__rainbowRadar.getStopRange() / 4
anillos = [rangoAnillos, rangoAnillos * 2, rangoAnillos * 3, self.__rainbowRadar.getStopRange()]
Rmax = self.__rainbowRadar.getStopRange()
fig.colorbar(im, ax=ax, cax=None)
# Se generan los anillos, se indican los limites y cross hair con los metodos estaticos de la clase RadarDisplay
for range_ring_location_km in anillos:
RadarDisplay.plot_range_ring(range_ring_location_km, lw=0.5)
RadarDisplay.set_limits(radar_range, radar_range)
RadarDisplay.plot_cross_hair(Rmax)
res = fig2img(plt.gcf())
plt.close(fig) # Para solucionar bug #3
return res
def showImage(self, elevation):
self.getCAPPIImage(elevation).show()
def saveImageToFile(self, pathOutput=None, fileOutput=None,
imageType=PNG, method='grid', image_method_params=None):
"""
Guarda en un archivo el grafico de la elevacion seleccionada.
:param pathOutput: Carpeta destino. Por defecto, la carpeta del archivo .vol
:param fileOutput: Archivo destino. Por defecto el nombre del archivo .vol__ele[elevation]
:param imageType: Formato en que se va a almacenar la imagen.
:param method: Metodo por el cual se obtiene la imagen. 'grid' obtiene la imagen a partir de la grilla cartesiana :func:`~RainbowRadarProcessor.getImageFromCartesianGrid` y 'simple' :func:`~RainbowRadarProcessor.getRawDataImage` genera la imagen con datos crudos.
:param image_method_params: en este parametro se pueden indicar los parametros a pasar al metodo de generacion de la imagen indicado por por el parametro `method`. Por defecto, elevation=0
:type image_method_params: dict
:return:
"""
if method == 'grid':
method_params = {'level': 0}
if image_method_params is not None:
method_params.update(image_method_params)
elevationImg = self.getCAPPIImage(**method_params)
m_filename_metadata = "_nivel_" + str(method_params['level'])
elif method == 'simple':
method_params = {'elevation': 0}
if image_method_params is not None:
method_params.update(image_method_params)
elevationImg = self.getPPIImage(**method_params)
m_filename_metadata = "_elevacion_" + str(method_params['elevation'])
else:
raise Exception(
"El metodo " + method + " no es un metodo valido para obtener la imagen. Posibles: [grid,simple] ")
if fileOutput is not None and pathOutput is not None:
if imageType == JPEG:
elevationImg.convert("RGB").save(
pathOutput + fileOutput + m_filename_metadata + '.' + imageType, quality=95)
else:
elevationImg.save(pathOutput + fileOutput + m_filename_metadata + '.' + imageType, quality=95)
elif fileOutput is None and pathOutput is not None:
if imageType == JPEG:
elevationImg.convert("RGB").save(
pathOutput + self.__volFileName + m_filename_metadata + '.' + imageType, quality=95)
else:
elevationImg.save(pathOutput + self.__volFileName + m_filename_metadata + '.' + imageType, quality=95)
elif fileOutput is not None and pathOutput is None:
if imageType == JPEG:
elevationImg.convert("RGB").save(self.__volPath + fileOutput + '.' + imageType, quality=95)
else:
elevationImg.save(self.__volPath + fileOutput + '.' + imageType, quality=95)
else:
if imageType == JPEG:
elevationImg.convert("RGB").save(
self.__volPath + self.__volFileName + m_filename_metadata + '.' + imageType, quality=95)
else:
elevationImg.save(
self.__volPath + self.__volFileName + m_filename_metadata + '.' + imageType, quality=95)
def saveToNETCDF(self, outFilePath, outFileName):
"""
Guarda la grilla en formato NETCDF
:param outFilePath: directorio donde se almacenará el archivo.
:param outFileName: nombre del archivo a guardar.
"""
pyart.io.write_grid(outFilePath + outFileName + ".netCDF",
self.__rainbowRadar.getCartesianGrid(),
format='NETCDF3_64BIT',
arm_time_variables=True)
def saveToGTiff(self, level, outFilePath, outFileName):
pyart.io.write_grid_geotiff(self.__rainbowRadar.getCartesianGrid(),
filename=outFilePath + outFileName + "_level_" + str(level) + ".tif",
field=self.__rainbowRadar.getRadarVariable()[1],
level=level,
#rgb=True,
#cmap= self.__rainbowRadar.getRadarVariable()[2],
#vmin= self.__rainbowRadar.getRadarVariable()[3],
#vmax= self.__rainbowRadar.getRadarVariable()[4],
warp=True
)
| INTA-Radar/radar-cmd | Procesador/RainbowRadarProcessor.py | Python | gpl-3.0 | 18,237 | [
"NetCDF"
] | a7e71f67defd0e63ba7bb9a574d663eddaac0a2fef2b82a1f2971ec3e4464c24 |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .stationary import Stationary
from .psi_comp import PSICOMP_RBF, PSICOMP_RBF_GPU
from ...core import Param
from paramz.caching import Cache_this
from paramz.transformations import Logexp
from .grid_kerns import GridRBF
class RBF(Stationary):
"""
Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel:
.. math::
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
"""
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False, inv_l=False):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
if self.useGPU:
self.psicomp = PSICOMP_RBF_GPU()
else:
self.psicomp = PSICOMP_RBF()
self.use_invLengthscale = inv_l
if inv_l:
self.unlink_parameter(self.lengthscale)
self.inv_l = Param('inv_lengthscale',1./self.lengthscale**2, Logexp())
self.link_parameter(self.inv_l)
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(RBF, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.RBF"
input_dict["inv_l"] = self.use_invLengthscale
if input_dict["inv_l"] == True:
input_dict["lengthscale"] = np.sqrt(1 / float(self.inv_l))
return input_dict
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
@Cache_this(limit=3, ignore_args=())
def dK_dX(self, X, X2, dimX):
r = self._scaled_dist(X, X2)
K = self.K_of_r(r)
dist = X[:,None,dimX]-X2[None,:,dimX]
lengthscale2inv = (np.ones((X.shape[1]))/(self.lengthscale**2))[dimX]
return -1.*K*dist*lengthscale2inv
@Cache_this(limit=3, ignore_args=())
def dK_dX2(self, X, X2, dimX2):
return -self.dK_dX(X,X2, dimX2)
@Cache_this(limit=3, ignore_args=())
def dK2_dXdX2(self, X, X2, dimX, dimX2):
r = self._scaled_dist(X, X2)
K = self.K_of_r(r)
if X2 is None:
X2=X
dist = X[:,None,:]-X2[None,:,:]
lengthscale2inv = np.ones((X.shape[1]))/(self.lengthscale**2)
return -1.*K*dist[:,:,dimX]*dist[:,:,dimX2]*lengthscale2inv[dimX]*lengthscale2inv[dimX2] + (dimX==dimX2)*K*lengthscale2inv[dimX]
def dK_dr(self, r):
return -r*self.K_of_r(r)
def dK2_drdr(self, r):
return (r**2-1)*self.K_of_r(r)
def dK2_drdr_diag(self):
return -self.variance # as the diagonal of r is always filled with zeros
@Cache_this(limit=3, ignore_args=())
def dK_dvariance(self,X,X2):
return self.K(X,X2)/self.variance
@Cache_this(limit=3, ignore_args=())
def dK_dlengthscale(self,X,X2):
r = self._scaled_dist(X, X2)
K = self.K_of_r(r)
if X2 is None:
X2=X
dist = X[:,None,:]-X2[None,:,:]
lengthscaleinv = np.ones((X.shape[1]))/(self.lengthscale)
if self.ARD:
g = []
for diml in range(X.shape[1]):
g += [ (dist[:,:,dimX]**2)*(lengthscaleinv[diml]**3)*K]
else:
g = np.sum(dist**2, axis=2)*(lengthscaleinv[0]**3)*K
return g
@Cache_this(limit=3, ignore_args=())
def dK2_dvariancedX(self, X, X2, dim):
return self.dK_dX(X,X2, dim)/self.variance
@Cache_this(limit=3, ignore_args=())
def dK2_dvariancedX2(self, X, X2, dim):
return self.dK_dX2(X,X2, dim)/self.variance
@Cache_this(limit=3, ignore_args=())
def dK3_dvariancedXdX2(self, X, X2, dim, dimX2):
return self.dK2_dXdX2(X, X2, dim, dimX2)/self.variance
@Cache_this(limit=3, ignore_args=())
def dK2_dlengthscaledX(self, X, X2, dimX):
r = self._scaled_dist(X, X2)
K = self.K_of_r(r)
if X2 is None:
X2=X
dist = X[:,None,:]-X2[None,:,:]
lengthscaleinv = np.ones((X.shape[1]))/(self.lengthscale)
if self.ARD:
g = []
for diml in range(X.shape[1]):
g += [-1.*K*dist[:,:,dimX]*(dist[:,:,diml]**2)*(lengthscaleinv[dimX]**2)*(lengthscaleinv[diml]**3) + 2.*dist[:,:,dimX]*(lengthscaleinv[diml]**3)*K*(dimX == diml)]
else:
g = -1.*K*dist[:,:,dimX]*np.sum(dist**2, axis=2)*(lengthscaleinv[dimX]**5) + 2.*dist[:,:,dimX]*(lengthscaleinv[dimX]**3)*K
return g
@Cache_this(limit=3, ignore_args=())
def dK2_dlengthscaledX2(self, X, X2, dimX2):
tmp = self.dK2_dlengthscaledX(X, X2, dimX2)
if self.ARD:
return [-1.*g for g in tmp]
else:
return -1*tmp
@Cache_this(limit=3, ignore_args=())
def dK3_dlengthscaledXdX2(self, X, X2, dimX, dimX2):
r = self._scaled_dist(X, X2)
K = self.K_of_r(r)
if X2 is None:
X2=X
dist = X[:,None,:]-X2[None,:,:]
lengthscaleinv = np.ones((X.shape[1]))/(self.lengthscale)
lengthscale2inv = lengthscaleinv**2
if self.ARD:
g = []
for diml in range(X.shape[1]):
tmp = -1.*K*dist[:,:,dimX]*dist[:,:,dimX2]*(dist[:,:,diml]**2)*lengthscale2inv[dimX]*lengthscale2inv[dimX2]*(lengthscaleinv[diml]**3)
if dimX == dimX2:
tmp += K*lengthscale2inv[dimX]*(lengthscaleinv[diml]**3)*(dist[:,:,diml]**2)
if diml == dimX:
tmp += 2.*K*dist[:,:,dimX]*dist[:,:,dimX2]*lengthscale2inv[dimX2]*(lengthscaleinv[dimX]**3)
if diml == dimX2:
tmp += 2.*K*dist[:,:,dimX]*dist[:,:,dimX2]*lengthscale2inv[dimX]*(lengthscaleinv[dimX2]**3)
if dimX == dimX2:
tmp += -2.*K*(lengthscaleinv[dimX]**3)
g += [tmp]
else:
g = -1.*K*dist[:,:,dimX]*dist[:,:,dimX2]*np.sum(dist**2, axis=2)*(lengthscaleinv[dimX]**7) +4*K*dist[:,:,dimX]*dist[:,:,dimX2]*(lengthscaleinv[dimX]**5)
if dimX == dimX2:
g += -2.*K*(lengthscaleinv[dimX]**3) + K*(lengthscaleinv[dimX]**5)*np.sum(dist**2, axis=2)
return g
def __getstate__(self):
dc = super(RBF, self).__getstate__()
if self.useGPU:
dc['psicomp'] = PSICOMP_RBF()
dc['useGPU'] = False
return dc
def __setstate__(self, state):
self.use_invLengthscale = False
return super(RBF, self).__setstate__(state)
def spectrum(self, omega):
assert self.input_dim == 1 #TODO: higher dim spectra?
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
def parameters_changed(self):
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
super(RBF,self).parameters_changed()
def get_one_dimensional_kernel(self, dim):
"""
Specially intended for Grid regression.
"""
oneDkernel = GridRBF(input_dim=1, variance=self.variance.copy(), originalDimensions=dim)
return oneDkernel
#---------------------------------------#
# PSI statistics #
#---------------------------------------#
def psi0(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[:2]
self.variance.gradient = dL_dvar
self.lengthscale.gradient = dL_dlengscale
if self.use_invLengthscale:
self.inv_l.gradient = dL_dlengscale*(self.lengthscale**3/-2.)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
def update_gradients_diag(self, dL_dKdiag, X):
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
def update_gradients_full(self, dL_dK, X, X2=None):
super(RBF,self).update_gradients_full(dL_dK, X, X2)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
| esiivola/GPYgradients | GPy/kern/src/rbf.py | Python | bsd-3-clause | 9,472 | [
"Gaussian"
] | 5711df051c87baacf8ae6e2f211e8b497e992da11d6fd4dd924514b9cd37a10f |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from ansible import constants as C
ANSIBLE_COLOR=True
if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR=False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR=False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR=False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR=True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30', 'bright gray': '0;37',
'blue': '0;34', 'white': '1;37',
'green': '0;32', 'bright blue': '1;34',
'cyan': '0;36', 'bright green': '1;32',
'red': '0;31', 'bright cyan': '1;36',
'purple': '0;35', 'bright red': '1;31',
'yellow': '0;33', 'bright purple': '1;35',
'dark gray': '1;30', 'bright yellow': '1;33',
'normal': '0'
}
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
return "\033["+codeCodes[color]+"m"+text+"\033[0m"
else:
return text
# --- end "pretty"
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
if num != 0 and ANSIBLE_COLOR and color is not None:
return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
else:
return "%s=%-4s" % (lead, str(num))
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return "%-37s" % stringc(host, 'red')
elif stats['changed'] != 0:
return "%-37s" % stringc(host, 'yellow')
else:
return "%-37s" % stringc(host, 'green')
return "%-26s" % host
| majidaldo/ansible | v2/ansible/utils/color.py | Python | gpl-3.0 | 3,134 | [
"Brian"
] | 4b458630f3d0d824aaa53f9508fca0e7ad4b098d5d6fad5e821afba7e997e606 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.